summaryrefslogtreecommitdiff
path: root/1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch
diff options
context:
space:
mode:
authorCoprDistGit <infra@openeuler.org>2025-04-22 00:55:42 +0000
committerCoprDistGit <infra@openeuler.org>2025-04-22 00:55:42 +0000
commite40525e6eefe9c7d1f448fd9f9f5a2f6e9c0e632 (patch)
tree06e4ab7aad41362baa9a143eabda192466714634 /1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch
parentb56834d5c2724cbea1898c54e64a337ac1703bd5 (diff)
automatic import of golang
Diffstat (limited to '1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch')
-rw-r--r--1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch589
1 files changed, 589 insertions, 0 deletions
diff --git a/1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch b/1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch
new file mode 100644
index 0000000..010f3ea
--- /dev/null
+++ b/1002-cmd-compile-don-t-merge-symbols-on-riscv64-when-dyna.patch
@@ -0,0 +1,589 @@
+From f11737fca5ac7ec43bbd25c07bae6faa6319cddc Mon Sep 17 00:00:00 2001
+From: Meng Zhuo <mengzhuo1203@gmail.com>
+Date: Thu, 12 Sep 2024 19:46:20 +0800
+Subject: [PATCH] cmd/compile: don't merge symbols on riscv64 when dynamic
+ linking
+
+Each plugin is compiled as a separate shared object,
+with its own symbol table. When dynamic linking plugin symbols
+are resolved within the plugin's scope, not globally merged to
+avoid conflicts.
+
+Change-Id: I9e6986085855c17fbd6c39b937cb6129d216f5e9
+Reviewed-on: https://go-review.googlesource.com/c/go/+/435015
+LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com>
+Reviewed-by: Joel Sing <joel@sing.id.au>
+Reviewed-by: Michael Pratt <mpratt@google.com>
+Reviewed-by: Cherry Mui <cherryyz@google.com>
+---
+ .../compile/internal/ssa/_gen/RISCV64.rules | 82 +++-------
+ .../compile/internal/ssa/rewriteRISCV64.go | 154 +++++++++++-------
+ 2 files changed, 115 insertions(+), 121 deletions(-)
+
+diff --git a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+index c2df433315..00d0d91b46 100644
+--- a/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
++++ b/src/cmd/compile/internal/ssa/_gen/RISCV64.rules
+@@ -271,65 +271,29 @@
+
+ // We need to fold MOVaddr into the LD/MOVDstore ops so that the live variable analysis
+ // knows what variables are being read/written by the ops.
+-(MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-(MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+-
+-(MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+-(MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+-(MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+-(MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) && is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) =>
+- (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+-(MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+- (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+-(MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+- (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+-(MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+- (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+-(MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem) && canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) =>
+- (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
+-
+-(MOVBUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVBUload [off1+int32(off2)] {sym} base mem)
+-(MOVBload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVBload [off1+int32(off2)] {sym} base mem)
+-(MOVHUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVHUload [off1+int32(off2)] {sym} base mem)
+-(MOVHload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVHload [off1+int32(off2)] {sym} base mem)
+-(MOVWUload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVWUload [off1+int32(off2)] {sym} base mem)
+-(MOVWload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVWload [off1+int32(off2)] {sym} base mem)
+-(MOVDload [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
+- (MOVDload [off1+int32(off2)] {sym} base mem)
+-
+-(MOVBstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+- (MOVBstore [off1+int32(off2)] {sym} base val mem)
+-(MOVHstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+- (MOVHstore [off1+int32(off2)] {sym} base val mem)
+-(MOVWstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+- (MOVWstore [off1+int32(off2)] {sym} base val mem)
+-(MOVDstore [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
+- (MOVDstore [off1+int32(off2)] {sym} base val mem)
+-(MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
+-(MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
+-(MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
+-(MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem) && is32Bit(int64(off1)+off2) => (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
++(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
++ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
++ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
++ (MOV(B|BU|H|HU|W|WU|D)load [off1+off2] {mergeSym(sym1,sym2)} base mem)
++
++(MOV(B|H|W|D)store [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem) &&
++ is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) &&
++ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
++ (MOV(B|H|W|D)store [off1+off2] {mergeSym(sym1,sym2)} base val mem)
++
++(MOV(B|H|W|D)storezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem) &&
++ canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) &&
++ (base.Op != OpSB || !config.ctxt.Flag_dynlink) =>
++ (MOV(B|H|W|D)storezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
++
++(MOV(B|BU|H|HU|W|WU|D)load [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
++ (MOV(B|BU|H|HU|W|WU|D)load [off1+int32(off2)] {sym} base mem)
++
++(MOV(B|H|W|D)store [off1] {sym} (ADDI [off2] base) val mem) && is32Bit(int64(off1)+off2) =>
++ (MOV(B|H|W|D)store [off1+int32(off2)] {sym} base val mem)
++
++(MOV(B|H|W|D)storezero [off1] {sym} (ADDI [off2] base) mem) && is32Bit(int64(off1)+off2) =>
++ (MOV(B|H|W|D)storezero [off1+int32(off2)] {sym} base mem)
+
+ // Similarly, fold ADDI into MOVaddr to avoid confusing live variable analysis
+ // with OffPtr -> ADDI.
+diff --git a/src/cmd/compile/internal/ssa/rewriteRISCV64.go b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+index f033b25bdd..9243702508 100644
+--- a/src/cmd/compile/internal/ssa/rewriteRISCV64.go
++++ b/src/cmd/compile/internal/ssa/rewriteRISCV64.go
+@@ -3870,8 +3870,10 @@ func rewriteValueRISCV64_OpRISCV64FSUBS(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVBUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -3883,7 +3885,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBUload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBUload)
+@@ -4177,8 +4179,10 @@ func rewriteValueRISCV64_OpRISCV64MOVBUreg(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVBload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4190,7 +4194,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBload)
+@@ -4303,8 +4307,10 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVBstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVBstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4317,7 +4323,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstore)
+@@ -4471,9 +4477,11 @@ func rewriteValueRISCV64_OpRISCV64MOVBstore(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+- // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+- // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
++ b := v.Block
++ config := b.Func.Config
++ // match: (MOVBstorezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
++ // result: (MOVBstorezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+@@ -4482,20 +4490,20 @@ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+- // match: (MOVBstorezero [off1] {sym} (ADDI [off2] ptr) mem)
++ // match: (MOVBstorezero [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+- // result: (MOVBstorezero [off1+int32(off2)] {sym} ptr mem)
++ // result: (MOVBstorezero [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+@@ -4503,7 +4511,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+@@ -4511,7 +4519,7 @@ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ v.reset(OpRISCV64MOVBstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+@@ -4519,8 +4527,10 @@ func rewriteValueRISCV64_OpRISCV64MOVBstorezero(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVDload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4532,7 +4542,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDload)
+@@ -4599,8 +4609,10 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVDstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVDstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4613,7 +4625,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstore)
+@@ -4665,9 +4677,11 @@ func rewriteValueRISCV64_OpRISCV64MOVDstore(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+- // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+- // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
++ b := v.Block
++ config := b.Func.Config
++ // match: (MOVDstorezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
++ // result: (MOVDstorezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+@@ -4676,20 +4690,20 @@ func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+- // match: (MOVDstorezero [off1] {sym} (ADDI [off2] ptr) mem)
++ // match: (MOVDstorezero [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+- // result: (MOVDstorezero [off1+int32(off2)] {sym} ptr mem)
++ // result: (MOVDstorezero [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+@@ -4697,7 +4711,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+@@ -4705,7 +4719,7 @@ func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ v.reset(OpRISCV64MOVDstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+@@ -4713,8 +4727,10 @@ func rewriteValueRISCV64_OpRISCV64MOVDstorezero(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVHUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4726,7 +4742,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHUload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHUload)
+@@ -4877,8 +4893,10 @@ func rewriteValueRISCV64_OpRISCV64MOVHUreg(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVHload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -4890,7 +4908,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHload)
+@@ -5047,8 +5065,10 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVHstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVHstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -5061,7 +5081,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstore)
+@@ -5181,9 +5201,11 @@ func rewriteValueRISCV64_OpRISCV64MOVHstore(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+- // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+- // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
++ b := v.Block
++ config := b.Func.Config
++ // match: (MOVHstorezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
++ // result: (MOVHstorezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+@@ -5192,20 +5214,20 @@ func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+- // match: (MOVHstorezero [off1] {sym} (ADDI [off2] ptr) mem)
++ // match: (MOVHstorezero [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+- // result: (MOVHstorezero [off1+int32(off2)] {sym} ptr mem)
++ // result: (MOVHstorezero [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+@@ -5213,7 +5235,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+@@ -5221,7 +5243,7 @@ func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ v.reset(OpRISCV64MOVHstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+@@ -5229,8 +5251,10 @@ func rewriteValueRISCV64_OpRISCV64MOVHstorezero(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVWUload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWUload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -5242,7 +5266,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWUload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWUload)
+@@ -5417,8 +5441,10 @@ func rewriteValueRISCV64_OpRISCV64MOVWUreg(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVWload [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWload [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -5430,7 +5456,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWload(v *Value) bool {
+ sym2 := auxToSym(v_0.Aux)
+ base := v_0.Args[0]
+ mem := v_1
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWload)
+@@ -5741,8 +5767,10 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ v_2 := v.Args[2]
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
++ b := v.Block
++ config := b.Func.Config
+ // match: (MOVWstore [off1] {sym1} (MOVaddr [off2] {sym2} base) val mem)
+- // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)
++ // cond: is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
+ // result: (MOVWstore [off1+off2] {mergeSym(sym1,sym2)} base val mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+@@ -5755,7 +5783,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ base := v_0.Args[0]
+ val := v_1
+ mem := v_2
+- if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2)) {
++ if !(is32Bit(int64(off1)+int64(off2)) && canMergeSym(sym1, sym2) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstore)
+@@ -5841,9 +5869,11 @@ func rewriteValueRISCV64_OpRISCV64MOVWstore(v *Value) bool {
+ func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v_1 := v.Args[1]
+ v_0 := v.Args[0]
+- // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} ptr) mem)
+- // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2))
+- // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} ptr mem)
++ b := v.Block
++ config := b.Func.Config
++ // match: (MOVWstorezero [off1] {sym1} (MOVaddr [off2] {sym2} base) mem)
++ // cond: canMergeSym(sym1,sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)
++ // result: (MOVWstorezero [off1+off2] {mergeSym(sym1,sym2)} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym1 := auxToSym(v.Aux)
+@@ -5852,20 +5882,20 @@ func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ }
+ off2 := auxIntToInt32(v_0.AuxInt)
+ sym2 := auxToSym(v_0.Aux)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+- if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2))) {
++ if !(canMergeSym(sym1, sym2) && is32Bit(int64(off1)+int64(off2)) && (base.Op != OpSB || !config.ctxt.Flag_dynlink)) {
+ break
+ }
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + off2)
+ v.Aux = symToAux(mergeSym(sym1, sym2))
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+- // match: (MOVWstorezero [off1] {sym} (ADDI [off2] ptr) mem)
++ // match: (MOVWstorezero [off1] {sym} (ADDI [off2] base) mem)
+ // cond: is32Bit(int64(off1)+off2)
+- // result: (MOVWstorezero [off1+int32(off2)] {sym} ptr mem)
++ // result: (MOVWstorezero [off1+int32(off2)] {sym} base mem)
+ for {
+ off1 := auxIntToInt32(v.AuxInt)
+ sym := auxToSym(v.Aux)
+@@ -5873,7 +5903,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ break
+ }
+ off2 := auxIntToInt64(v_0.AuxInt)
+- ptr := v_0.Args[0]
++ base := v_0.Args[0]
+ mem := v_1
+ if !(is32Bit(int64(off1) + off2)) {
+ break
+@@ -5881,7 +5911,7 @@ func rewriteValueRISCV64_OpRISCV64MOVWstorezero(v *Value) bool {
+ v.reset(OpRISCV64MOVWstorezero)
+ v.AuxInt = int32ToAuxInt(off1 + int32(off2))
+ v.Aux = symToAux(sym)
+- v.AddArg2(ptr, mem)
++ v.AddArg2(base, mem)
+ return true
+ }
+ return false
+--
+2.48.1.windows.1
+