diff options
author | CoprDistGit <infra@openeuler.org> | 2024-09-12 04:23:51 +0000 |
---|---|---|
committer | CoprDistGit <infra@openeuler.org> | 2024-09-12 04:23:51 +0000 |
commit | 86d143317839566c602c276fafb1a30ad469941e (patch) | |
tree | 4e895b6b563710cbc2cce86ead21f0a8d58cdcd3 /backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch | |
parent | 2784bd2c52574b27d271d643816d481d9e4dfc8c (diff) |
automatic import of golang
Diffstat (limited to 'backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch')
-rw-r--r-- | backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch | 143 |
1 files changed, 143 insertions, 0 deletions
diff --git a/backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch b/backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch new file mode 100644 index 0000000..80c9bbc --- /dev/null +++ b/backport-0014-cmd-compile-handle-constant-pointer-offsets-in-dead-.patch @@ -0,0 +1,143 @@ +From 131f773664fa2d0dbc870e11c388a3131a3a2029 Mon Sep 17 00:00:00 2001 +From: Keith Randall <khr@golang.org> +Date: Sun, 29 Oct 2023 21:00:29 -0700 +Subject: cmd/compile: handle constant pointer offsets in dead store elimination + +Conflict:NA +Reference:https://go-review.googlesource.com/c/go/+/538595 + +Update #63657 +Update #45573 + +Change-Id: I163c6038c13d974dc0ca9f02144472bc05331826 +Reviewed-on: https://go-review.googlesource.com/c/go/+/538595 +LUCI-TryBot-Result: Go LUCI <golang-scoped@luci-project-accounts.iam.gserviceaccount.com> +Reviewed-by: David Chase <drchase@google.com> +Reviewed-by: Keith Randall <khr@google.com> +--- + src/cmd/compile/internal/ssa/deadstore.go | 64 ++++++++++++++++++++--- + src/cmd/compile/internal/ssa/rewrite.go | 6 +++ + 2 files changed, 62 insertions(+), 8 deletions(-) + +diff --git a/src/cmd/compile/internal/ssa/deadstore.go b/src/cmd/compile/internal/ssa/deadstore.go +index 648b68af78b1..7656e45cb9d5 100644 +--- a/src/cmd/compile/internal/ssa/deadstore.go ++++ b/src/cmd/compile/internal/ssa/deadstore.go +@@ -73,9 +73,9 @@ func dse(f *Func) { + } + + // Walk backwards looking for dead stores. Keep track of shadowed addresses. +- // A "shadowed address" is a pointer and a size describing a memory region that +- // is known to be written. We keep track of shadowed addresses in the shadowed +- // map, mapping the ID of the address to the size of the shadowed region. ++ // A "shadowed address" is a pointer, offset, and size describing a memory region that ++ // is known to be written. We keep track of shadowed addresses in the shadowed map, ++ // mapping the ID of the address to a shadowRange where future writes will happen. + // Since we're walking backwards, writes to a shadowed region are useless, + // as they will be immediately overwritten. + shadowed.clear() +@@ -88,13 +88,20 @@ func dse(f *Func) { + shadowed.clear() + } + if v.Op == OpStore || v.Op == OpZero { ++ ptr := v.Args[0] ++ var off int64 ++ for ptr.Op == OpOffPtr { // Walk to base pointer ++ off += ptr.AuxInt ++ ptr = ptr.Args[0] ++ } + var sz int64 + if v.Op == OpStore { + sz = v.Aux.(*types.Type).Size() + } else { // OpZero + sz = v.AuxInt + } +- if shadowedSize := int64(shadowed.get(v.Args[0].ID)); shadowedSize != -1 && shadowedSize >= sz { ++ sr := shadowRange(shadowed.get(ptr.ID)) ++ if sr.contains(off, off+sz) { + // Modify the store/zero into a copy of the memory state, + // effectively eliding the store operation. + if v.Op == OpStore { +@@ -108,10 +115,8 @@ func dse(f *Func) { + v.AuxInt = 0 + v.Op = OpCopy + } else { +- if sz > 0x7fffffff { // work around sparseMap's int32 value type +- sz = 0x7fffffff +- } +- shadowed.set(v.Args[0].ID, int32(sz)) ++ // Extend shadowed region. ++ shadowed.set(ptr.ID, int32(sr.merge(off, off+sz))) + } + } + // walk to previous store +@@ -131,6 +136,49 @@ func dse(f *Func) { + } + } + ++// A shadowRange encodes a set of byte offsets [lo():hi()] from ++// a given pointer that will be written to later in the block. ++// A zero shadowRange encodes an empty shadowed range (and so ++// does a -1 shadowRange, which is what sparsemap.get returns ++// on a failed lookup). ++type shadowRange int32 ++ ++func (sr shadowRange) lo() int64 { ++ return int64(sr & 0xffff) ++} ++func (sr shadowRange) hi() int64 { ++ return int64((sr >> 16) & 0xffff) ++} ++ ++// contains reports whether [lo:hi] is completely within sr. ++func (sr shadowRange) contains(lo, hi int64) bool { ++ return lo >= sr.lo() && hi <= sr.hi() ++} ++ ++// merge returns the union of sr and [lo:hi]. ++// merge is allowed to return something smaller than the union. ++func (sr shadowRange) merge(lo, hi int64) shadowRange { ++ if lo < 0 || hi > 0xffff { ++ // Ignore offsets that are too large or small. ++ return sr ++ } ++ if sr.lo() == sr.hi() { ++ // Old range is empty - use new one. ++ return shadowRange(lo + hi<<16) ++ } ++ if hi < sr.lo() || lo > sr.hi() { ++ // The two regions don't overlap or abut, so we would ++ // have to keep track of multiple disjoint ranges. ++ // Because we can only keep one, keep the larger one. ++ if sr.hi()-sr.lo() >= hi-lo { ++ return sr ++ } ++ return shadowRange(lo + hi<<16) ++ } ++ // Regions overlap or abut - compute the union. ++ return shadowRange(min(lo, sr.lo()) + max(hi, sr.hi())<<16) ++} ++ + // elimDeadAutosGeneric deletes autos that are never accessed. To achieve this + // we track the operations that the address of each auto reaches and if it only + // reaches stores then we delete all the stores. The other operations will then +diff --git a/src/cmd/compile/internal/ssa/rewrite.go b/src/cmd/compile/internal/ssa/rewrite.go +index 43843bda5536..5c7ed16f12be 100644 +--- a/src/cmd/compile/internal/ssa/rewrite.go ++++ b/src/cmd/compile/internal/ssa/rewrite.go +@@ -1183,6 +1183,12 @@ func min(x, y int64) int64 { + } + return y + } ++func max(x, y int64) int64 { ++ if x > y { ++ return x ++ } ++ return y ++} + + func isConstZero(v *Value) bool { + switch v.Op { +-- +2.33.0 + |