Source file src/cmd/compile/internal/walk/order.go

     1  // Copyright 2012 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  package walk
     6  
     7  import (
     8  	"fmt"
     9  	"go/constant"
    10  
    11  	"cmd/compile/internal/base"
    12  	"cmd/compile/internal/ir"
    13  	"cmd/compile/internal/reflectdata"
    14  	"cmd/compile/internal/staticinit"
    15  	"cmd/compile/internal/typecheck"
    16  	"cmd/compile/internal/types"
    17  	"cmd/internal/objabi"
    18  	"cmd/internal/src"
    19  )
    20  
    21  // Rewrite tree to use separate statements to enforce
    22  // order of evaluation. Makes walk easier, because it
    23  // can (after this runs) reorder at will within an expression.
    24  //
    25  // Rewrite m[k] op= r into m[k] = m[k] op r if op is / or %.
    26  //
    27  // Introduce temporaries as needed by runtime routines.
    28  // For example, the map runtime routines take the map key
    29  // by reference, so make sure all map keys are addressable
    30  // by copying them to temporaries as needed.
    31  // The same is true for channel operations.
    32  //
    33  // Arrange that map index expressions only appear in direct
    34  // assignments x = m[k] or m[k] = x, never in larger expressions.
    35  //
    36  // Arrange that receive expressions only appear in direct assignments
    37  // x = <-c or as standalone statements <-c, never in larger expressions.
    38  
    39  // TODO(rsc): The temporary introduction during multiple assignments
    40  // should be moved into this file, so that the temporaries can be cleaned
    41  // and so that conversions implicit in the OAS2FUNC and OAS2RECV
    42  // nodes can be made explicit and then have their temporaries cleaned.
    43  
    44  // TODO(rsc): Goto and multilevel break/continue can jump over
    45  // inserted VARKILL annotations. Work out a way to handle these.
    46  // The current implementation is safe, in that it will execute correctly.
    47  // But it won't reuse temporaries as aggressively as it might, and
    48  // it can result in unnecessary zeroing of those variables in the function
    49  // prologue.
    50  
    51  // orderState holds state during the ordering process.
    52  type orderState struct {
    53  	out  []ir.Node             // list of generated statements
    54  	temp []*ir.Name            // stack of temporary variables
    55  	free map[string][]*ir.Name // free list of unused temporaries, by type.LinkString().
    56  	edit func(ir.Node) ir.Node // cached closure of o.exprNoLHS
    57  }
    58  
    59  // Order rewrites fn.Nbody to apply the ordering constraints
    60  // described in the comment at the top of the file.
    61  func order(fn *ir.Func) {
    62  	if base.Flag.W > 1 {
    63  		s := fmt.Sprintf("\nbefore order %v", fn.Sym())
    64  		ir.DumpList(s, fn.Body)
    65  	}
    66  
    67  	orderBlock(&fn.Body, map[string][]*ir.Name{})
    68  }
    69  
    70  // append typechecks stmt and appends it to out.
    71  func (o *orderState) append(stmt ir.Node) {
    72  	o.out = append(o.out, typecheck.Stmt(stmt))
    73  }
    74  
    75  // newTemp allocates a new temporary with the given type,
    76  // pushes it onto the temp stack, and returns it.
    77  // If clear is true, newTemp emits code to zero the temporary.
    78  func (o *orderState) newTemp(t *types.Type, clear bool) *ir.Name {
    79  	var v *ir.Name
    80  	key := t.LinkString()
    81  	if a := o.free[key]; len(a) > 0 {
    82  		v = a[len(a)-1]
    83  		if !types.Identical(t, v.Type()) {
    84  			base.Fatalf("expected %L to have type %v", v, t)
    85  		}
    86  		o.free[key] = a[:len(a)-1]
    87  	} else {
    88  		v = typecheck.Temp(t)
    89  	}
    90  	if clear {
    91  		o.append(ir.NewAssignStmt(base.Pos, v, nil))
    92  	}
    93  
    94  	o.temp = append(o.temp, v)
    95  	return v
    96  }
    97  
    98  // copyExpr behaves like newTemp but also emits
    99  // code to initialize the temporary to the value n.
   100  func (o *orderState) copyExpr(n ir.Node) *ir.Name {
   101  	return o.copyExpr1(n, false)
   102  }
   103  
   104  // copyExprClear is like copyExpr but clears the temp before assignment.
   105  // It is provided for use when the evaluation of tmp = n turns into
   106  // a function call that is passed a pointer to the temporary as the output space.
   107  // If the call blocks before tmp has been written,
   108  // the garbage collector will still treat the temporary as live,
   109  // so we must zero it before entering that call.
   110  // Today, this only happens for channel receive operations.
   111  // (The other candidate would be map access, but map access
   112  // returns a pointer to the result data instead of taking a pointer
   113  // to be filled in.)
   114  func (o *orderState) copyExprClear(n ir.Node) *ir.Name {
   115  	return o.copyExpr1(n, true)
   116  }
   117  
   118  func (o *orderState) copyExpr1(n ir.Node, clear bool) *ir.Name {
   119  	t := n.Type()
   120  	v := o.newTemp(t, clear)
   121  	o.append(ir.NewAssignStmt(base.Pos, v, n))
   122  	return v
   123  }
   124  
   125  // cheapExpr returns a cheap version of n.
   126  // The definition of cheap is that n is a variable or constant.
   127  // If not, cheapExpr allocates a new tmp, emits tmp = n,
   128  // and then returns tmp.
   129  func (o *orderState) cheapExpr(n ir.Node) ir.Node {
   130  	if n == nil {
   131  		return nil
   132  	}
   133  
   134  	switch n.Op() {
   135  	case ir.ONAME, ir.OLITERAL, ir.ONIL:
   136  		return n
   137  	case ir.OLEN, ir.OCAP:
   138  		n := n.(*ir.UnaryExpr)
   139  		l := o.cheapExpr(n.X)
   140  		if l == n.X {
   141  			return n
   142  		}
   143  		a := ir.SepCopy(n).(*ir.UnaryExpr)
   144  		a.X = l
   145  		return typecheck.Expr(a)
   146  	}
   147  
   148  	return o.copyExpr(n)
   149  }
   150  
   151  // safeExpr returns a safe version of n.
   152  // The definition of safe is that n can appear multiple times
   153  // without violating the semantics of the original program,
   154  // and that assigning to the safe version has the same effect
   155  // as assigning to the original n.
   156  //
   157  // The intended use is to apply to x when rewriting x += y into x = x + y.
   158  func (o *orderState) safeExpr(n ir.Node) ir.Node {
   159  	switch n.Op() {
   160  	case ir.ONAME, ir.OLITERAL, ir.ONIL:
   161  		return n
   162  
   163  	case ir.OLEN, ir.OCAP:
   164  		n := n.(*ir.UnaryExpr)
   165  		l := o.safeExpr(n.X)
   166  		if l == n.X {
   167  			return n
   168  		}
   169  		a := ir.SepCopy(n).(*ir.UnaryExpr)
   170  		a.X = l
   171  		return typecheck.Expr(a)
   172  
   173  	case ir.ODOT:
   174  		n := n.(*ir.SelectorExpr)
   175  		l := o.safeExpr(n.X)
   176  		if l == n.X {
   177  			return n
   178  		}
   179  		a := ir.SepCopy(n).(*ir.SelectorExpr)
   180  		a.X = l
   181  		return typecheck.Expr(a)
   182  
   183  	case ir.ODOTPTR:
   184  		n := n.(*ir.SelectorExpr)
   185  		l := o.cheapExpr(n.X)
   186  		if l == n.X {
   187  			return n
   188  		}
   189  		a := ir.SepCopy(n).(*ir.SelectorExpr)
   190  		a.X = l
   191  		return typecheck.Expr(a)
   192  
   193  	case ir.ODEREF:
   194  		n := n.(*ir.StarExpr)
   195  		l := o.cheapExpr(n.X)
   196  		if l == n.X {
   197  			return n
   198  		}
   199  		a := ir.SepCopy(n).(*ir.StarExpr)
   200  		a.X = l
   201  		return typecheck.Expr(a)
   202  
   203  	case ir.OINDEX, ir.OINDEXMAP:
   204  		n := n.(*ir.IndexExpr)
   205  		var l ir.Node
   206  		if n.X.Type().IsArray() {
   207  			l = o.safeExpr(n.X)
   208  		} else {
   209  			l = o.cheapExpr(n.X)
   210  		}
   211  		r := o.cheapExpr(n.Index)
   212  		if l == n.X && r == n.Index {
   213  			return n
   214  		}
   215  		a := ir.SepCopy(n).(*ir.IndexExpr)
   216  		a.X = l
   217  		a.Index = r
   218  		return typecheck.Expr(a)
   219  
   220  	default:
   221  		base.Fatalf("order.safeExpr %v", n.Op())
   222  		return nil // not reached
   223  	}
   224  }
   225  
   226  // isaddrokay reports whether it is okay to pass n's address to runtime routines.
   227  // Taking the address of a variable makes the liveness and optimization analyses
   228  // lose track of where the variable's lifetime ends. To avoid hurting the analyses
   229  // of ordinary stack variables, those are not 'isaddrokay'. Temporaries are okay,
   230  // because we emit explicit VARKILL instructions marking the end of those
   231  // temporaries' lifetimes.
   232  func isaddrokay(n ir.Node) bool {
   233  	return ir.IsAddressable(n) && (n.Op() != ir.ONAME || n.(*ir.Name).Class == ir.PEXTERN || ir.IsAutoTmp(n))
   234  }
   235  
   236  // addrTemp ensures that n is okay to pass by address to runtime routines.
   237  // If the original argument n is not okay, addrTemp creates a tmp, emits
   238  // tmp = n, and then returns tmp.
   239  // The result of addrTemp MUST be assigned back to n, e.g.
   240  // 	n.Left = o.addrTemp(n.Left)
   241  func (o *orderState) addrTemp(n ir.Node) ir.Node {
   242  	if n.Op() == ir.OLITERAL || n.Op() == ir.ONIL {
   243  		// TODO: expand this to all static composite literal nodes?
   244  		n = typecheck.DefaultLit(n, nil)
   245  		types.CalcSize(n.Type())
   246  		vstat := readonlystaticname(n.Type())
   247  		var s staticinit.Schedule
   248  		s.StaticAssign(vstat, 0, n, n.Type())
   249  		if s.Out != nil {
   250  			base.Fatalf("staticassign of const generated code: %+v", n)
   251  		}
   252  		vstat = typecheck.Expr(vstat).(*ir.Name)
   253  		return vstat
   254  	}
   255  	if isaddrokay(n) {
   256  		return n
   257  	}
   258  	return o.copyExpr(n)
   259  }
   260  
   261  // mapKeyTemp prepares n to be a key in a map runtime call and returns n.
   262  // It should only be used for map runtime calls which have *_fast* versions.
   263  func (o *orderState) mapKeyTemp(t *types.Type, n ir.Node) ir.Node {
   264  	// Most map calls need to take the address of the key.
   265  	// Exception: map*_fast* calls. See golang.org/issue/19015.
   266  	alg := mapfast(t)
   267  	if alg == mapslow {
   268  		return o.addrTemp(n)
   269  	}
   270  	var kt *types.Type
   271  	switch alg {
   272  	case mapfast32:
   273  		kt = types.Types[types.TUINT32]
   274  	case mapfast64:
   275  		kt = types.Types[types.TUINT64]
   276  	case mapfast32ptr, mapfast64ptr:
   277  		kt = types.Types[types.TUNSAFEPTR]
   278  	case mapfaststr:
   279  		kt = types.Types[types.TSTRING]
   280  	}
   281  	nt := n.Type()
   282  	switch {
   283  	case nt == kt:
   284  		return n
   285  	case nt.Kind() == kt.Kind(), nt.IsPtrShaped() && kt.IsPtrShaped():
   286  		// can directly convert (e.g. named type to underlying type, or one pointer to another)
   287  		return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONVNOP, kt, n))
   288  	case nt.IsInteger() && kt.IsInteger():
   289  		// can directly convert (e.g. int32 to uint32)
   290  		if n.Op() == ir.OLITERAL && nt.IsSigned() {
   291  			// avoid constant overflow error
   292  			n = ir.NewConstExpr(constant.MakeUint64(uint64(ir.Int64Val(n))), n)
   293  			n.SetType(kt)
   294  			return n
   295  		}
   296  		return typecheck.Expr(ir.NewConvExpr(n.Pos(), ir.OCONV, kt, n))
   297  	default:
   298  		// Unsafe cast through memory.
   299  		// We'll need to do a load with type kt. Create a temporary of type kt to
   300  		// ensure sufficient alignment. nt may be under-aligned.
   301  		if uint8(kt.Alignment()) < uint8(nt.Alignment()) {
   302  			base.Fatalf("mapKeyTemp: key type is not sufficiently aligned, kt=%v nt=%v", kt, nt)
   303  		}
   304  		tmp := o.newTemp(kt, true)
   305  		// *(*nt)(&tmp) = n
   306  		var e ir.Node = typecheck.NodAddr(tmp)
   307  		e = ir.NewConvExpr(n.Pos(), ir.OCONVNOP, nt.PtrTo(), e)
   308  		e = ir.NewStarExpr(n.Pos(), e)
   309  		o.append(ir.NewAssignStmt(base.Pos, e, n))
   310  		return tmp
   311  	}
   312  }
   313  
   314  // mapKeyReplaceStrConv replaces OBYTES2STR by OBYTES2STRTMP
   315  // in n to avoid string allocations for keys in map lookups.
   316  // Returns a bool that signals if a modification was made.
   317  //
   318  // For:
   319  //  x = m[string(k)]
   320  //  x = m[T1{... Tn{..., string(k), ...}]
   321  // where k is []byte, T1 to Tn is a nesting of struct and array literals,
   322  // the allocation of backing bytes for the string can be avoided
   323  // by reusing the []byte backing array. These are special cases
   324  // for avoiding allocations when converting byte slices to strings.
   325  // It would be nice to handle these generally, but because
   326  // []byte keys are not allowed in maps, the use of string(k)
   327  // comes up in important cases in practice. See issue 3512.
   328  func mapKeyReplaceStrConv(n ir.Node) bool {
   329  	var replaced bool
   330  	switch n.Op() {
   331  	case ir.OBYTES2STR:
   332  		n := n.(*ir.ConvExpr)
   333  		n.SetOp(ir.OBYTES2STRTMP)
   334  		replaced = true
   335  	case ir.OSTRUCTLIT:
   336  		n := n.(*ir.CompLitExpr)
   337  		for _, elem := range n.List {
   338  			elem := elem.(*ir.StructKeyExpr)
   339  			if mapKeyReplaceStrConv(elem.Value) {
   340  				replaced = true
   341  			}
   342  		}
   343  	case ir.OARRAYLIT:
   344  		n := n.(*ir.CompLitExpr)
   345  		for _, elem := range n.List {
   346  			if elem.Op() == ir.OKEY {
   347  				elem = elem.(*ir.KeyExpr).Value
   348  			}
   349  			if mapKeyReplaceStrConv(elem) {
   350  				replaced = true
   351  			}
   352  		}
   353  	}
   354  	return replaced
   355  }
   356  
   357  type ordermarker int
   358  
   359  // markTemp returns the top of the temporary variable stack.
   360  func (o *orderState) markTemp() ordermarker {
   361  	return ordermarker(len(o.temp))
   362  }
   363  
   364  // popTemp pops temporaries off the stack until reaching the mark,
   365  // which must have been returned by markTemp.
   366  func (o *orderState) popTemp(mark ordermarker) {
   367  	for _, n := range o.temp[mark:] {
   368  		key := n.Type().LinkString()
   369  		o.free[key] = append(o.free[key], n)
   370  	}
   371  	o.temp = o.temp[:mark]
   372  }
   373  
   374  // cleanTempNoPop emits VARKILL instructions to *out
   375  // for each temporary above the mark on the temporary stack.
   376  // It does not pop the temporaries from the stack.
   377  func (o *orderState) cleanTempNoPop(mark ordermarker) []ir.Node {
   378  	var out []ir.Node
   379  	for i := len(o.temp) - 1; i >= int(mark); i-- {
   380  		n := o.temp[i]
   381  		out = append(out, typecheck.Stmt(ir.NewUnaryExpr(base.Pos, ir.OVARKILL, n)))
   382  	}
   383  	return out
   384  }
   385  
   386  // cleanTemp emits VARKILL instructions for each temporary above the
   387  // mark on the temporary stack and removes them from the stack.
   388  func (o *orderState) cleanTemp(top ordermarker) {
   389  	o.out = append(o.out, o.cleanTempNoPop(top)...)
   390  	o.popTemp(top)
   391  }
   392  
   393  // stmtList orders each of the statements in the list.
   394  func (o *orderState) stmtList(l ir.Nodes) {
   395  	s := l
   396  	for i := range s {
   397  		orderMakeSliceCopy(s[i:])
   398  		o.stmt(s[i])
   399  	}
   400  }
   401  
   402  // orderMakeSliceCopy matches the pattern:
   403  //  m = OMAKESLICE([]T, x); OCOPY(m, s)
   404  // and rewrites it to:
   405  //  m = OMAKESLICECOPY([]T, x, s); nil
   406  func orderMakeSliceCopy(s []ir.Node) {
   407  	if base.Flag.N != 0 || base.Flag.Cfg.Instrumenting {
   408  		return
   409  	}
   410  	if len(s) < 2 || s[0] == nil || s[0].Op() != ir.OAS || s[1] == nil || s[1].Op() != ir.OCOPY {
   411  		return
   412  	}
   413  
   414  	as := s[0].(*ir.AssignStmt)
   415  	cp := s[1].(*ir.BinaryExpr)
   416  	if as.Y == nil || as.Y.Op() != ir.OMAKESLICE || ir.IsBlank(as.X) ||
   417  		as.X.Op() != ir.ONAME || cp.X.Op() != ir.ONAME || cp.Y.Op() != ir.ONAME ||
   418  		as.X.Name() != cp.X.Name() || cp.X.Name() == cp.Y.Name() {
   419  		// The line above this one is correct with the differing equality operators:
   420  		// we want as.X and cp.X to be the same name,
   421  		// but we want the initial data to be coming from a different name.
   422  		return
   423  	}
   424  
   425  	mk := as.Y.(*ir.MakeExpr)
   426  	if mk.Esc() == ir.EscNone || mk.Len == nil || mk.Cap != nil {
   427  		return
   428  	}
   429  	mk.SetOp(ir.OMAKESLICECOPY)
   430  	mk.Cap = cp.Y
   431  	// Set bounded when m = OMAKESLICE([]T, len(s)); OCOPY(m, s)
   432  	mk.SetBounded(mk.Len.Op() == ir.OLEN && ir.SameSafeExpr(mk.Len.(*ir.UnaryExpr).X, cp.Y))
   433  	as.Y = typecheck.Expr(mk)
   434  	s[1] = nil // remove separate copy call
   435  }
   436  
   437  // edge inserts coverage instrumentation for libfuzzer.
   438  func (o *orderState) edge() {
   439  	if base.Debug.Libfuzzer == 0 {
   440  		return
   441  	}
   442  
   443  	// Create a new uint8 counter to be allocated in section
   444  	// __libfuzzer_extra_counters.
   445  	counter := staticinit.StaticName(types.Types[types.TUINT8])
   446  	counter.SetLibfuzzerExtraCounter(true)
   447  	// As well as setting SetLibfuzzerExtraCounter, we preemptively set the
   448  	// symbol type to SLIBFUZZER_EXTRA_COUNTER so that the race detector
   449  	// instrumentation pass (which does not have access to the flags set by
   450  	// SetLibfuzzerExtraCounter) knows to ignore them. This information is
   451  	// lost by the time it reaches the compile step, so SetLibfuzzerExtraCounter
   452  	// is still necessary.
   453  	counter.Linksym().Type = objabi.SLIBFUZZER_EXTRA_COUNTER
   454  
   455  	// counter += 1
   456  	incr := ir.NewAssignOpStmt(base.Pos, ir.OADD, counter, ir.NewInt(1))
   457  	o.append(incr)
   458  }
   459  
   460  // orderBlock orders the block of statements in n into a new slice,
   461  // and then replaces the old slice in n with the new slice.
   462  // free is a map that can be used to obtain temporary variables by type.
   463  func orderBlock(n *ir.Nodes, free map[string][]*ir.Name) {
   464  	var order orderState
   465  	order.free = free
   466  	mark := order.markTemp()
   467  	order.edge()
   468  	order.stmtList(*n)
   469  	order.cleanTemp(mark)
   470  	*n = order.out
   471  }
   472  
   473  // exprInPlace orders the side effects in *np and
   474  // leaves them as the init list of the final *np.
   475  // The result of exprInPlace MUST be assigned back to n, e.g.
   476  // 	n.Left = o.exprInPlace(n.Left)
   477  func (o *orderState) exprInPlace(n ir.Node) ir.Node {
   478  	var order orderState
   479  	order.free = o.free
   480  	n = order.expr(n, nil)
   481  	n = ir.InitExpr(order.out, n)
   482  
   483  	// insert new temporaries from order
   484  	// at head of outer list.
   485  	o.temp = append(o.temp, order.temp...)
   486  	return n
   487  }
   488  
   489  // orderStmtInPlace orders the side effects of the single statement *np
   490  // and replaces it with the resulting statement list.
   491  // The result of orderStmtInPlace MUST be assigned back to n, e.g.
   492  // 	n.Left = orderStmtInPlace(n.Left)
   493  // free is a map that can be used to obtain temporary variables by type.
   494  func orderStmtInPlace(n ir.Node, free map[string][]*ir.Name) ir.Node {
   495  	var order orderState
   496  	order.free = free
   497  	mark := order.markTemp()
   498  	order.stmt(n)
   499  	order.cleanTemp(mark)
   500  	return ir.NewBlockStmt(src.NoXPos, order.out)
   501  }
   502  
   503  // init moves n's init list to o.out.
   504  func (o *orderState) init(n ir.Node) {
   505  	if ir.MayBeShared(n) {
   506  		// For concurrency safety, don't mutate potentially shared nodes.
   507  		// First, ensure that no work is required here.
   508  		if len(n.Init()) > 0 {
   509  			base.Fatalf("order.init shared node with ninit")
   510  		}
   511  		return
   512  	}
   513  	o.stmtList(ir.TakeInit(n))
   514  }
   515  
   516  // call orders the call expression n.
   517  // n.Op is OCALLFUNC/OCALLINTER or a builtin like OCOPY.
   518  func (o *orderState) call(nn ir.Node) {
   519  	if len(nn.Init()) > 0 {
   520  		// Caller should have already called o.init(nn).
   521  		base.Fatalf("%v with unexpected ninit", nn.Op())
   522  	}
   523  	if nn.Op() == ir.OCALLMETH {
   524  		base.FatalfAt(nn.Pos(), "OCALLMETH missed by typecheck")
   525  	}
   526  
   527  	// Builtin functions.
   528  	if nn.Op() != ir.OCALLFUNC && nn.Op() != ir.OCALLINTER {
   529  		switch n := nn.(type) {
   530  		default:
   531  			base.Fatalf("unexpected call: %+v", n)
   532  		case *ir.UnaryExpr:
   533  			n.X = o.expr(n.X, nil)
   534  		case *ir.ConvExpr:
   535  			n.X = o.expr(n.X, nil)
   536  		case *ir.BinaryExpr:
   537  			n.X = o.expr(n.X, nil)
   538  			n.Y = o.expr(n.Y, nil)
   539  		case *ir.MakeExpr:
   540  			n.Len = o.expr(n.Len, nil)
   541  			n.Cap = o.expr(n.Cap, nil)
   542  		case *ir.CallExpr:
   543  			o.exprList(n.Args)
   544  		}
   545  		return
   546  	}
   547  
   548  	n := nn.(*ir.CallExpr)
   549  	typecheck.FixVariadicCall(n)
   550  
   551  	if isFuncPCIntrinsic(n) && isIfaceOfFunc(n.Args[0]) {
   552  		// For internal/abi.FuncPCABIxxx(fn), if fn is a defined function,
   553  		// do not introduce temporaries here, so it is easier to rewrite it
   554  		// to symbol address reference later in walk.
   555  		return
   556  	}
   557  
   558  	n.X = o.expr(n.X, nil)
   559  	o.exprList(n.Args)
   560  }
   561  
   562  // mapAssign appends n to o.out.
   563  func (o *orderState) mapAssign(n ir.Node) {
   564  	switch n.Op() {
   565  	default:
   566  		base.Fatalf("order.mapAssign %v", n.Op())
   567  
   568  	case ir.OAS:
   569  		n := n.(*ir.AssignStmt)
   570  		if n.X.Op() == ir.OINDEXMAP {
   571  			n.Y = o.safeMapRHS(n.Y)
   572  		}
   573  		o.out = append(o.out, n)
   574  	case ir.OASOP:
   575  		n := n.(*ir.AssignOpStmt)
   576  		if n.X.Op() == ir.OINDEXMAP {
   577  			n.Y = o.safeMapRHS(n.Y)
   578  		}
   579  		o.out = append(o.out, n)
   580  	}
   581  }
   582  
   583  func (o *orderState) safeMapRHS(r ir.Node) ir.Node {
   584  	// Make sure we evaluate the RHS before starting the map insert.
   585  	// We need to make sure the RHS won't panic.  See issue 22881.
   586  	if r.Op() == ir.OAPPEND {
   587  		r := r.(*ir.CallExpr)
   588  		s := r.Args[1:]
   589  		for i, n := range s {
   590  			s[i] = o.cheapExpr(n)
   591  		}
   592  		return r
   593  	}
   594  	return o.cheapExpr(r)
   595  }
   596  
   597  // stmt orders the statement n, appending to o.out.
   598  // Temporaries created during the statement are cleaned
   599  // up using VARKILL instructions as possible.
   600  func (o *orderState) stmt(n ir.Node) {
   601  	if n == nil {
   602  		return
   603  	}
   604  
   605  	lno := ir.SetPos(n)
   606  	o.init(n)
   607  
   608  	switch n.Op() {
   609  	default:
   610  		base.Fatalf("order.stmt %v", n.Op())
   611  
   612  	case ir.OVARKILL, ir.OVARLIVE, ir.OINLMARK:
   613  		o.out = append(o.out, n)
   614  
   615  	case ir.OAS:
   616  		n := n.(*ir.AssignStmt)
   617  		t := o.markTemp()
   618  		n.X = o.expr(n.X, nil)
   619  		n.Y = o.expr(n.Y, n.X)
   620  		o.mapAssign(n)
   621  		o.cleanTemp(t)
   622  
   623  	case ir.OASOP:
   624  		n := n.(*ir.AssignOpStmt)
   625  		t := o.markTemp()
   626  		n.X = o.expr(n.X, nil)
   627  		n.Y = o.expr(n.Y, nil)
   628  
   629  		if base.Flag.Cfg.Instrumenting || n.X.Op() == ir.OINDEXMAP && (n.AsOp == ir.ODIV || n.AsOp == ir.OMOD) {
   630  			// Rewrite m[k] op= r into m[k] = m[k] op r so
   631  			// that we can ensure that if op panics
   632  			// because r is zero, the panic happens before
   633  			// the map assignment.
   634  			// DeepCopy is a big hammer here, but safeExpr
   635  			// makes sure there is nothing too deep being copied.
   636  			l1 := o.safeExpr(n.X)
   637  			l2 := ir.DeepCopy(src.NoXPos, l1)
   638  			if l2.Op() == ir.OINDEXMAP {
   639  				l2 := l2.(*ir.IndexExpr)
   640  				l2.Assigned = false
   641  			}
   642  			l2 = o.copyExpr(l2)
   643  			r := o.expr(typecheck.Expr(ir.NewBinaryExpr(n.Pos(), n.AsOp, l2, n.Y)), nil)
   644  			as := typecheck.Stmt(ir.NewAssignStmt(n.Pos(), l1, r))
   645  			o.mapAssign(as)
   646  			o.cleanTemp(t)
   647  			return
   648  		}
   649  
   650  		o.mapAssign(n)
   651  		o.cleanTemp(t)
   652  
   653  	case ir.OAS2:
   654  		n := n.(*ir.AssignListStmt)
   655  		t := o.markTemp()
   656  		o.exprList(n.Lhs)
   657  		o.exprList(n.Rhs)
   658  		o.out = append(o.out, n)
   659  		o.cleanTemp(t)
   660  
   661  	// Special: avoid copy of func call n.Right
   662  	case ir.OAS2FUNC:
   663  		n := n.(*ir.AssignListStmt)
   664  		t := o.markTemp()
   665  		o.exprList(n.Lhs)
   666  		call := n.Rhs[0]
   667  		o.init(call)
   668  		if ic, ok := call.(*ir.InlinedCallExpr); ok {
   669  			o.stmtList(ic.Body)
   670  
   671  			n.SetOp(ir.OAS2)
   672  			n.Rhs = ic.ReturnVars
   673  
   674  			o.exprList(n.Rhs)
   675  			o.out = append(o.out, n)
   676  		} else {
   677  			o.call(call)
   678  			o.as2func(n)
   679  		}
   680  		o.cleanTemp(t)
   681  
   682  	// Special: use temporary variables to hold result,
   683  	// so that runtime can take address of temporary.
   684  	// No temporary for blank assignment.
   685  	//
   686  	// OAS2MAPR: make sure key is addressable if needed,
   687  	//           and make sure OINDEXMAP is not copied out.
   688  	case ir.OAS2DOTTYPE, ir.OAS2RECV, ir.OAS2MAPR:
   689  		n := n.(*ir.AssignListStmt)
   690  		t := o.markTemp()
   691  		o.exprList(n.Lhs)
   692  
   693  		switch r := n.Rhs[0]; r.Op() {
   694  		case ir.ODOTTYPE2:
   695  			r := r.(*ir.TypeAssertExpr)
   696  			r.X = o.expr(r.X, nil)
   697  		case ir.ODYNAMICDOTTYPE2:
   698  			r := r.(*ir.DynamicTypeAssertExpr)
   699  			r.X = o.expr(r.X, nil)
   700  			r.T = o.expr(r.T, nil)
   701  		case ir.ORECV:
   702  			r := r.(*ir.UnaryExpr)
   703  			r.X = o.expr(r.X, nil)
   704  		case ir.OINDEXMAP:
   705  			r := r.(*ir.IndexExpr)
   706  			r.X = o.expr(r.X, nil)
   707  			r.Index = o.expr(r.Index, nil)
   708  			// See similar conversion for OINDEXMAP below.
   709  			_ = mapKeyReplaceStrConv(r.Index)
   710  			r.Index = o.mapKeyTemp(r.X.Type(), r.Index)
   711  		default:
   712  			base.Fatalf("order.stmt: %v", r.Op())
   713  		}
   714  
   715  		o.as2ok(n)
   716  		o.cleanTemp(t)
   717  
   718  	// Special: does not save n onto out.
   719  	case ir.OBLOCK:
   720  		n := n.(*ir.BlockStmt)
   721  		o.stmtList(n.List)
   722  
   723  	// Special: n->left is not an expression; save as is.
   724  	case ir.OBREAK,
   725  		ir.OCONTINUE,
   726  		ir.ODCL,
   727  		ir.ODCLCONST,
   728  		ir.ODCLTYPE,
   729  		ir.OFALL,
   730  		ir.OGOTO,
   731  		ir.OLABEL,
   732  		ir.OTAILCALL:
   733  		o.out = append(o.out, n)
   734  
   735  	// Special: handle call arguments.
   736  	case ir.OCALLFUNC, ir.OCALLINTER:
   737  		n := n.(*ir.CallExpr)
   738  		t := o.markTemp()
   739  		o.call(n)
   740  		o.out = append(o.out, n)
   741  		o.cleanTemp(t)
   742  
   743  	case ir.OINLCALL:
   744  		n := n.(*ir.InlinedCallExpr)
   745  		o.stmtList(n.Body)
   746  
   747  		// discard results; double-check for no side effects
   748  		for _, result := range n.ReturnVars {
   749  			if staticinit.AnySideEffects(result) {
   750  				base.FatalfAt(result.Pos(), "inlined call result has side effects: %v", result)
   751  			}
   752  		}
   753  
   754  	case ir.OCHECKNIL, ir.OCLOSE, ir.OPANIC, ir.ORECV:
   755  		n := n.(*ir.UnaryExpr)
   756  		t := o.markTemp()
   757  		n.X = o.expr(n.X, nil)
   758  		o.out = append(o.out, n)
   759  		o.cleanTemp(t)
   760  
   761  	case ir.OCOPY:
   762  		n := n.(*ir.BinaryExpr)
   763  		t := o.markTemp()
   764  		n.X = o.expr(n.X, nil)
   765  		n.Y = o.expr(n.Y, nil)
   766  		o.out = append(o.out, n)
   767  		o.cleanTemp(t)
   768  
   769  	case ir.OPRINT, ir.OPRINTN, ir.ORECOVERFP:
   770  		n := n.(*ir.CallExpr)
   771  		t := o.markTemp()
   772  		o.call(n)
   773  		o.out = append(o.out, n)
   774  		o.cleanTemp(t)
   775  
   776  	// Special: order arguments to inner call but not call itself.
   777  	case ir.ODEFER, ir.OGO:
   778  		n := n.(*ir.GoDeferStmt)
   779  		t := o.markTemp()
   780  		o.init(n.Call)
   781  		o.call(n.Call)
   782  		o.out = append(o.out, n)
   783  		o.cleanTemp(t)
   784  
   785  	case ir.ODELETE:
   786  		n := n.(*ir.CallExpr)
   787  		t := o.markTemp()
   788  		n.Args[0] = o.expr(n.Args[0], nil)
   789  		n.Args[1] = o.expr(n.Args[1], nil)
   790  		n.Args[1] = o.mapKeyTemp(n.Args[0].Type(), n.Args[1])
   791  		o.out = append(o.out, n)
   792  		o.cleanTemp(t)
   793  
   794  	// Clean temporaries from condition evaluation at
   795  	// beginning of loop body and after for statement.
   796  	case ir.OFOR:
   797  		n := n.(*ir.ForStmt)
   798  		t := o.markTemp()
   799  		n.Cond = o.exprInPlace(n.Cond)
   800  		n.Body.Prepend(o.cleanTempNoPop(t)...)
   801  		orderBlock(&n.Body, o.free)
   802  		n.Post = orderStmtInPlace(n.Post, o.free)
   803  		o.out = append(o.out, n)
   804  		o.cleanTemp(t)
   805  
   806  	// Clean temporaries from condition at
   807  	// beginning of both branches.
   808  	case ir.OIF:
   809  		n := n.(*ir.IfStmt)
   810  		t := o.markTemp()
   811  		n.Cond = o.exprInPlace(n.Cond)
   812  		n.Body.Prepend(o.cleanTempNoPop(t)...)
   813  		n.Else.Prepend(o.cleanTempNoPop(t)...)
   814  		o.popTemp(t)
   815  		orderBlock(&n.Body, o.free)
   816  		orderBlock(&n.Else, o.free)
   817  		o.out = append(o.out, n)
   818  
   819  	case ir.ORANGE:
   820  		// n.Right is the expression being ranged over.
   821  		// order it, and then make a copy if we need one.
   822  		// We almost always do, to ensure that we don't
   823  		// see any value changes made during the loop.
   824  		// Usually the copy is cheap (e.g., array pointer,
   825  		// chan, slice, string are all tiny).
   826  		// The exception is ranging over an array value
   827  		// (not a slice, not a pointer to array),
   828  		// which must make a copy to avoid seeing updates made during
   829  		// the range body. Ranging over an array value is uncommon though.
   830  
   831  		// Mark []byte(str) range expression to reuse string backing storage.
   832  		// It is safe because the storage cannot be mutated.
   833  		n := n.(*ir.RangeStmt)
   834  		if n.X.Op() == ir.OSTR2BYTES {
   835  			n.X.(*ir.ConvExpr).SetOp(ir.OSTR2BYTESTMP)
   836  		}
   837  
   838  		t := o.markTemp()
   839  		n.X = o.expr(n.X, nil)
   840  
   841  		orderBody := true
   842  		xt := typecheck.RangeExprType(n.X.Type())
   843  		switch xt.Kind() {
   844  		default:
   845  			base.Fatalf("order.stmt range %v", n.Type())
   846  
   847  		case types.TARRAY, types.TSLICE:
   848  			if n.Value == nil || ir.IsBlank(n.Value) {
   849  				// for i := range x will only use x once, to compute len(x).
   850  				// No need to copy it.
   851  				break
   852  			}
   853  			fallthrough
   854  
   855  		case types.TCHAN, types.TSTRING:
   856  			// chan, string, slice, array ranges use value multiple times.
   857  			// make copy.
   858  			r := n.X
   859  
   860  			if r.Type().IsString() && r.Type() != types.Types[types.TSTRING] {
   861  				r = ir.NewConvExpr(base.Pos, ir.OCONV, nil, r)
   862  				r.SetType(types.Types[types.TSTRING])
   863  				r = typecheck.Expr(r)
   864  			}
   865  
   866  			n.X = o.copyExpr(r)
   867  
   868  		case types.TMAP:
   869  			if isMapClear(n) {
   870  				// Preserve the body of the map clear pattern so it can
   871  				// be detected during walk. The loop body will not be used
   872  				// when optimizing away the range loop to a runtime call.
   873  				orderBody = false
   874  				break
   875  			}
   876  
   877  			// copy the map value in case it is a map literal.
   878  			// TODO(rsc): Make tmp = literal expressions reuse tmp.
   879  			// For maps tmp is just one word so it hardly matters.
   880  			r := n.X
   881  			n.X = o.copyExpr(r)
   882  
   883  			// n.Prealloc is the temp for the iterator.
   884  			// MapIterType contains pointers and needs to be zeroed.
   885  			n.Prealloc = o.newTemp(reflectdata.MapIterType(xt), true)
   886  		}
   887  		n.Key = o.exprInPlace(n.Key)
   888  		n.Value = o.exprInPlace(n.Value)
   889  		if orderBody {
   890  			orderBlock(&n.Body, o.free)
   891  		}
   892  		o.out = append(o.out, n)
   893  		o.cleanTemp(t)
   894  
   895  	case ir.ORETURN:
   896  		n := n.(*ir.ReturnStmt)
   897  		o.exprList(n.Results)
   898  		o.out = append(o.out, n)
   899  
   900  	// Special: clean case temporaries in each block entry.
   901  	// Select must enter one of its blocks, so there is no
   902  	// need for a cleaning at the end.
   903  	// Doubly special: evaluation order for select is stricter
   904  	// than ordinary expressions. Even something like p.c
   905  	// has to be hoisted into a temporary, so that it cannot be
   906  	// reordered after the channel evaluation for a different
   907  	// case (if p were nil, then the timing of the fault would
   908  	// give this away).
   909  	case ir.OSELECT:
   910  		n := n.(*ir.SelectStmt)
   911  		t := o.markTemp()
   912  		for _, ncas := range n.Cases {
   913  			r := ncas.Comm
   914  			ir.SetPos(ncas)
   915  
   916  			// Append any new body prologue to ninit.
   917  			// The next loop will insert ninit into nbody.
   918  			if len(ncas.Init()) != 0 {
   919  				base.Fatalf("order select ninit")
   920  			}
   921  			if r == nil {
   922  				continue
   923  			}
   924  			switch r.Op() {
   925  			default:
   926  				ir.Dump("select case", r)
   927  				base.Fatalf("unknown op in select %v", r.Op())
   928  
   929  			case ir.OSELRECV2:
   930  				// case x, ok = <-c
   931  				r := r.(*ir.AssignListStmt)
   932  				recv := r.Rhs[0].(*ir.UnaryExpr)
   933  				recv.X = o.expr(recv.X, nil)
   934  				if !ir.IsAutoTmp(recv.X) {
   935  					recv.X = o.copyExpr(recv.X)
   936  				}
   937  				init := ir.TakeInit(r)
   938  
   939  				colas := r.Def
   940  				do := func(i int, t *types.Type) {
   941  					n := r.Lhs[i]
   942  					if ir.IsBlank(n) {
   943  						return
   944  					}
   945  					// If this is case x := <-ch or case x, y := <-ch, the case has
   946  					// the ODCL nodes to declare x and y. We want to delay that
   947  					// declaration (and possible allocation) until inside the case body.
   948  					// Delete the ODCL nodes here and recreate them inside the body below.
   949  					if colas {
   950  						if len(init) > 0 && init[0].Op() == ir.ODCL && init[0].(*ir.Decl).X == n {
   951  							init = init[1:]
   952  
   953  							// iimport may have added a default initialization assignment,
   954  							// due to how it handles ODCL statements.
   955  							if len(init) > 0 && init[0].Op() == ir.OAS && init[0].(*ir.AssignStmt).X == n {
   956  								init = init[1:]
   957  							}
   958  						}
   959  						dcl := typecheck.Stmt(ir.NewDecl(base.Pos, ir.ODCL, n.(*ir.Name)))
   960  						ncas.PtrInit().Append(dcl)
   961  					}
   962  					tmp := o.newTemp(t, t.HasPointers())
   963  					as := typecheck.Stmt(ir.NewAssignStmt(base.Pos, n, typecheck.Conv(tmp, n.Type())))
   964  					ncas.PtrInit().Append(as)
   965  					r.Lhs[i] = tmp
   966  				}
   967  				do(0, recv.X.Type().Elem())
   968  				do(1, types.Types[types.TBOOL])
   969  				if len(init) != 0 {
   970  					ir.DumpList("ninit", r.Init())
   971  					base.Fatalf("ninit on select recv")
   972  				}
   973  				orderBlock(ncas.PtrInit(), o.free)
   974  
   975  			case ir.OSEND:
   976  				r := r.(*ir.SendStmt)
   977  				if len(r.Init()) != 0 {
   978  					ir.DumpList("ninit", r.Init())
   979  					base.Fatalf("ninit on select send")
   980  				}
   981  
   982  				// case c <- x
   983  				// r->left is c, r->right is x, both are always evaluated.
   984  				r.Chan = o.expr(r.Chan, nil)
   985  
   986  				if !ir.IsAutoTmp(r.Chan) {
   987  					r.Chan = o.copyExpr(r.Chan)
   988  				}
   989  				r.Value = o.expr(r.Value, nil)
   990  				if !ir.IsAutoTmp(r.Value) {
   991  					r.Value = o.copyExpr(r.Value)
   992  				}
   993  			}
   994  		}
   995  		// Now that we have accumulated all the temporaries, clean them.
   996  		// Also insert any ninit queued during the previous loop.
   997  		// (The temporary cleaning must follow that ninit work.)
   998  		for _, cas := range n.Cases {
   999  			orderBlock(&cas.Body, o.free)
  1000  			cas.Body.Prepend(o.cleanTempNoPop(t)...)
  1001  
  1002  			// TODO(mdempsky): Is this actually necessary?
  1003  			// walkSelect appears to walk Ninit.
  1004  			cas.Body.Prepend(ir.TakeInit(cas)...)
  1005  		}
  1006  
  1007  		o.out = append(o.out, n)
  1008  		o.popTemp(t)
  1009  
  1010  	// Special: value being sent is passed as a pointer; make it addressable.
  1011  	case ir.OSEND:
  1012  		n := n.(*ir.SendStmt)
  1013  		t := o.markTemp()
  1014  		n.Chan = o.expr(n.Chan, nil)
  1015  		n.Value = o.expr(n.Value, nil)
  1016  		if base.Flag.Cfg.Instrumenting {
  1017  			// Force copying to the stack so that (chan T)(nil) <- x
  1018  			// is still instrumented as a read of x.
  1019  			n.Value = o.copyExpr(n.Value)
  1020  		} else {
  1021  			n.Value = o.addrTemp(n.Value)
  1022  		}
  1023  		o.out = append(o.out, n)
  1024  		o.cleanTemp(t)
  1025  
  1026  	// TODO(rsc): Clean temporaries more aggressively.
  1027  	// Note that because walkSwitch will rewrite some of the
  1028  	// switch into a binary search, this is not as easy as it looks.
  1029  	// (If we ran that code here we could invoke order.stmt on
  1030  	// the if-else chain instead.)
  1031  	// For now just clean all the temporaries at the end.
  1032  	// In practice that's fine.
  1033  	case ir.OSWITCH:
  1034  		n := n.(*ir.SwitchStmt)
  1035  		if base.Debug.Libfuzzer != 0 && !hasDefaultCase(n) {
  1036  			// Add empty "default:" case for instrumentation.
  1037  			n.Cases = append(n.Cases, ir.NewCaseStmt(base.Pos, nil, nil))
  1038  		}
  1039  
  1040  		t := o.markTemp()
  1041  		n.Tag = o.expr(n.Tag, nil)
  1042  		for _, ncas := range n.Cases {
  1043  			o.exprListInPlace(ncas.List)
  1044  			orderBlock(&ncas.Body, o.free)
  1045  		}
  1046  
  1047  		o.out = append(o.out, n)
  1048  		o.cleanTemp(t)
  1049  	}
  1050  
  1051  	base.Pos = lno
  1052  }
  1053  
  1054  func hasDefaultCase(n *ir.SwitchStmt) bool {
  1055  	for _, ncas := range n.Cases {
  1056  		if len(ncas.List) == 0 {
  1057  			return true
  1058  		}
  1059  	}
  1060  	return false
  1061  }
  1062  
  1063  // exprList orders the expression list l into o.
  1064  func (o *orderState) exprList(l ir.Nodes) {
  1065  	s := l
  1066  	for i := range s {
  1067  		s[i] = o.expr(s[i], nil)
  1068  	}
  1069  }
  1070  
  1071  // exprListInPlace orders the expression list l but saves
  1072  // the side effects on the individual expression ninit lists.
  1073  func (o *orderState) exprListInPlace(l ir.Nodes) {
  1074  	s := l
  1075  	for i := range s {
  1076  		s[i] = o.exprInPlace(s[i])
  1077  	}
  1078  }
  1079  
  1080  func (o *orderState) exprNoLHS(n ir.Node) ir.Node {
  1081  	return o.expr(n, nil)
  1082  }
  1083  
  1084  // expr orders a single expression, appending side
  1085  // effects to o.out as needed.
  1086  // If this is part of an assignment lhs = *np, lhs is given.
  1087  // Otherwise lhs == nil. (When lhs != nil it may be possible
  1088  // to avoid copying the result of the expression to a temporary.)
  1089  // The result of expr MUST be assigned back to n, e.g.
  1090  // 	n.Left = o.expr(n.Left, lhs)
  1091  func (o *orderState) expr(n, lhs ir.Node) ir.Node {
  1092  	if n == nil {
  1093  		return n
  1094  	}
  1095  	lno := ir.SetPos(n)
  1096  	n = o.expr1(n, lhs)
  1097  	base.Pos = lno
  1098  	return n
  1099  }
  1100  
  1101  func (o *orderState) expr1(n, lhs ir.Node) ir.Node {
  1102  	o.init(n)
  1103  
  1104  	switch n.Op() {
  1105  	default:
  1106  		if o.edit == nil {
  1107  			o.edit = o.exprNoLHS // create closure once
  1108  		}
  1109  		ir.EditChildren(n, o.edit)
  1110  		return n
  1111  
  1112  	// Addition of strings turns into a function call.
  1113  	// Allocate a temporary to hold the strings.
  1114  	// Fewer than 5 strings use direct runtime helpers.
  1115  	case ir.OADDSTR:
  1116  		n := n.(*ir.AddStringExpr)
  1117  		o.exprList(n.List)
  1118  
  1119  		if len(n.List) > 5 {
  1120  			t := types.NewArray(types.Types[types.TSTRING], int64(len(n.List)))
  1121  			n.Prealloc = o.newTemp(t, false)
  1122  		}
  1123  
  1124  		// Mark string(byteSlice) arguments to reuse byteSlice backing
  1125  		// buffer during conversion. String concatenation does not
  1126  		// memorize the strings for later use, so it is safe.
  1127  		// However, we can do it only if there is at least one non-empty string literal.
  1128  		// Otherwise if all other arguments are empty strings,
  1129  		// concatstrings will return the reference to the temp string
  1130  		// to the caller.
  1131  		hasbyte := false
  1132  
  1133  		haslit := false
  1134  		for _, n1 := range n.List {
  1135  			hasbyte = hasbyte || n1.Op() == ir.OBYTES2STR
  1136  			haslit = haslit || n1.Op() == ir.OLITERAL && len(ir.StringVal(n1)) != 0
  1137  		}
  1138  
  1139  		if haslit && hasbyte {
  1140  			for _, n2 := range n.List {
  1141  				if n2.Op() == ir.OBYTES2STR {
  1142  					n2 := n2.(*ir.ConvExpr)
  1143  					n2.SetOp(ir.OBYTES2STRTMP)
  1144  				}
  1145  			}
  1146  		}
  1147  		return n
  1148  
  1149  	case ir.OINDEXMAP:
  1150  		n := n.(*ir.IndexExpr)
  1151  		n.X = o.expr(n.X, nil)
  1152  		n.Index = o.expr(n.Index, nil)
  1153  		needCopy := false
  1154  
  1155  		if !n.Assigned {
  1156  			// Enforce that any []byte slices we are not copying
  1157  			// can not be changed before the map index by forcing
  1158  			// the map index to happen immediately following the
  1159  			// conversions. See copyExpr a few lines below.
  1160  			needCopy = mapKeyReplaceStrConv(n.Index)
  1161  
  1162  			if base.Flag.Cfg.Instrumenting {
  1163  				// Race detector needs the copy.
  1164  				needCopy = true
  1165  			}
  1166  		}
  1167  
  1168  		// key must be addressable
  1169  		n.Index = o.mapKeyTemp(n.X.Type(), n.Index)
  1170  		if needCopy {
  1171  			return o.copyExpr(n)
  1172  		}
  1173  		return n
  1174  
  1175  	// concrete type (not interface) argument might need an addressable
  1176  	// temporary to pass to the runtime conversion routine.
  1177  	case ir.OCONVIFACE, ir.OCONVIDATA:
  1178  		n := n.(*ir.ConvExpr)
  1179  		n.X = o.expr(n.X, nil)
  1180  		if n.X.Type().IsInterface() {
  1181  			return n
  1182  		}
  1183  		if _, _, needsaddr := dataWordFuncName(n.X.Type()); needsaddr || isStaticCompositeLiteral(n.X) {
  1184  			// Need a temp if we need to pass the address to the conversion function.
  1185  			// We also process static composite literal node here, making a named static global
  1186  			// whose address we can put directly in an interface (see OCONVIFACE/OCONVIDATA case in walk).
  1187  			n.X = o.addrTemp(n.X)
  1188  		}
  1189  		return n
  1190  
  1191  	case ir.OCONVNOP:
  1192  		n := n.(*ir.ConvExpr)
  1193  		if n.X.Op() == ir.OCALLMETH {
  1194  			base.FatalfAt(n.X.Pos(), "OCALLMETH missed by typecheck")
  1195  		}
  1196  		if n.Type().IsKind(types.TUNSAFEPTR) && n.X.Type().IsKind(types.TUINTPTR) && (n.X.Op() == ir.OCALLFUNC || n.X.Op() == ir.OCALLINTER) {
  1197  			call := n.X.(*ir.CallExpr)
  1198  			// When reordering unsafe.Pointer(f()) into a separate
  1199  			// statement, the conversion and function call must stay
  1200  			// together. See golang.org/issue/15329.
  1201  			o.init(call)
  1202  			o.call(call)
  1203  			if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
  1204  				return o.copyExpr(n)
  1205  			}
  1206  		} else {
  1207  			n.X = o.expr(n.X, nil)
  1208  		}
  1209  		return n
  1210  
  1211  	case ir.OANDAND, ir.OOROR:
  1212  		// ... = LHS && RHS
  1213  		//
  1214  		// var r bool
  1215  		// r = LHS
  1216  		// if r {       // or !r, for OROR
  1217  		//     r = RHS
  1218  		// }
  1219  		// ... = r
  1220  
  1221  		n := n.(*ir.LogicalExpr)
  1222  		r := o.newTemp(n.Type(), false)
  1223  
  1224  		// Evaluate left-hand side.
  1225  		lhs := o.expr(n.X, nil)
  1226  		o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, lhs)))
  1227  
  1228  		// Evaluate right-hand side, save generated code.
  1229  		saveout := o.out
  1230  		o.out = nil
  1231  		t := o.markTemp()
  1232  		o.edge()
  1233  		rhs := o.expr(n.Y, nil)
  1234  		o.out = append(o.out, typecheck.Stmt(ir.NewAssignStmt(base.Pos, r, rhs)))
  1235  		o.cleanTemp(t)
  1236  		gen := o.out
  1237  		o.out = saveout
  1238  
  1239  		// If left-hand side doesn't cause a short-circuit, issue right-hand side.
  1240  		nif := ir.NewIfStmt(base.Pos, r, nil, nil)
  1241  		if n.Op() == ir.OANDAND {
  1242  			nif.Body = gen
  1243  		} else {
  1244  			nif.Else = gen
  1245  		}
  1246  		o.out = append(o.out, nif)
  1247  		return r
  1248  
  1249  	case ir.OCALLMETH:
  1250  		base.FatalfAt(n.Pos(), "OCALLMETH missed by typecheck")
  1251  		panic("unreachable")
  1252  
  1253  	case ir.OCALLFUNC,
  1254  		ir.OCALLINTER,
  1255  		ir.OCAP,
  1256  		ir.OCOMPLEX,
  1257  		ir.OCOPY,
  1258  		ir.OIMAG,
  1259  		ir.OLEN,
  1260  		ir.OMAKECHAN,
  1261  		ir.OMAKEMAP,
  1262  		ir.OMAKESLICE,
  1263  		ir.OMAKESLICECOPY,
  1264  		ir.ONEW,
  1265  		ir.OREAL,
  1266  		ir.ORECOVERFP,
  1267  		ir.OSTR2BYTES,
  1268  		ir.OSTR2BYTESTMP,
  1269  		ir.OSTR2RUNES:
  1270  
  1271  		if isRuneCount(n) {
  1272  			// len([]rune(s)) is rewritten to runtime.countrunes(s) later.
  1273  			conv := n.(*ir.UnaryExpr).X.(*ir.ConvExpr)
  1274  			conv.X = o.expr(conv.X, nil)
  1275  		} else {
  1276  			o.call(n)
  1277  		}
  1278  
  1279  		if lhs == nil || lhs.Op() != ir.ONAME || base.Flag.Cfg.Instrumenting {
  1280  			return o.copyExpr(n)
  1281  		}
  1282  		return n
  1283  
  1284  	case ir.OINLCALL:
  1285  		n := n.(*ir.InlinedCallExpr)
  1286  		o.stmtList(n.Body)
  1287  		return n.SingleResult()
  1288  
  1289  	case ir.OAPPEND:
  1290  		// Check for append(x, make([]T, y)...) .
  1291  		n := n.(*ir.CallExpr)
  1292  		if isAppendOfMake(n) {
  1293  			n.Args[0] = o.expr(n.Args[0], nil) // order x
  1294  			mk := n.Args[1].(*ir.MakeExpr)
  1295  			mk.Len = o.expr(mk.Len, nil) // order y
  1296  		} else {
  1297  			o.exprList(n.Args)
  1298  		}
  1299  
  1300  		if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.Args[0]) {
  1301  			return o.copyExpr(n)
  1302  		}
  1303  		return n
  1304  
  1305  	case ir.OSLICE, ir.OSLICEARR, ir.OSLICESTR, ir.OSLICE3, ir.OSLICE3ARR:
  1306  		n := n.(*ir.SliceExpr)
  1307  		n.X = o.expr(n.X, nil)
  1308  		n.Low = o.cheapExpr(o.expr(n.Low, nil))
  1309  		n.High = o.cheapExpr(o.expr(n.High, nil))
  1310  		n.Max = o.cheapExpr(o.expr(n.Max, nil))
  1311  		if lhs == nil || lhs.Op() != ir.ONAME && !ir.SameSafeExpr(lhs, n.X) {
  1312  			return o.copyExpr(n)
  1313  		}
  1314  		return n
  1315  
  1316  	case ir.OCLOSURE:
  1317  		n := n.(*ir.ClosureExpr)
  1318  		if n.Transient() && len(n.Func.ClosureVars) > 0 {
  1319  			n.Prealloc = o.newTemp(typecheck.ClosureType(n), false)
  1320  		}
  1321  		return n
  1322  
  1323  	case ir.OMETHVALUE:
  1324  		n := n.(*ir.SelectorExpr)
  1325  		n.X = o.expr(n.X, nil)
  1326  		if n.Transient() {
  1327  			t := typecheck.MethodValueType(n)
  1328  			n.Prealloc = o.newTemp(t, false)
  1329  		}
  1330  		return n
  1331  
  1332  	case ir.OSLICELIT:
  1333  		n := n.(*ir.CompLitExpr)
  1334  		o.exprList(n.List)
  1335  		if n.Transient() {
  1336  			t := types.NewArray(n.Type().Elem(), n.Len)
  1337  			n.Prealloc = o.newTemp(t, false)
  1338  		}
  1339  		return n
  1340  
  1341  	case ir.ODOTTYPE, ir.ODOTTYPE2:
  1342  		n := n.(*ir.TypeAssertExpr)
  1343  		n.X = o.expr(n.X, nil)
  1344  		if !types.IsDirectIface(n.Type()) || base.Flag.Cfg.Instrumenting {
  1345  			return o.copyExprClear(n)
  1346  		}
  1347  		return n
  1348  
  1349  	case ir.ORECV:
  1350  		n := n.(*ir.UnaryExpr)
  1351  		n.X = o.expr(n.X, nil)
  1352  		return o.copyExprClear(n)
  1353  
  1354  	case ir.OEQ, ir.ONE, ir.OLT, ir.OLE, ir.OGT, ir.OGE:
  1355  		n := n.(*ir.BinaryExpr)
  1356  		n.X = o.expr(n.X, nil)
  1357  		n.Y = o.expr(n.Y, nil)
  1358  
  1359  		t := n.X.Type()
  1360  		switch {
  1361  		case t.IsString():
  1362  			// Mark string(byteSlice) arguments to reuse byteSlice backing
  1363  			// buffer during conversion. String comparison does not
  1364  			// memorize the strings for later use, so it is safe.
  1365  			if n.X.Op() == ir.OBYTES2STR {
  1366  				n.X.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
  1367  			}
  1368  			if n.Y.Op() == ir.OBYTES2STR {
  1369  				n.Y.(*ir.ConvExpr).SetOp(ir.OBYTES2STRTMP)
  1370  			}
  1371  
  1372  		case t.IsStruct() || t.IsArray():
  1373  			// for complex comparisons, we need both args to be
  1374  			// addressable so we can pass them to the runtime.
  1375  			n.X = o.addrTemp(n.X)
  1376  			n.Y = o.addrTemp(n.Y)
  1377  		}
  1378  		return n
  1379  
  1380  	case ir.OMAPLIT:
  1381  		// Order map by converting:
  1382  		//   map[int]int{
  1383  		//     a(): b(),
  1384  		//     c(): d(),
  1385  		//     e(): f(),
  1386  		//   }
  1387  		// to
  1388  		//   m := map[int]int{}
  1389  		//   m[a()] = b()
  1390  		//   m[c()] = d()
  1391  		//   m[e()] = f()
  1392  		// Then order the result.
  1393  		// Without this special case, order would otherwise compute all
  1394  		// the keys and values before storing any of them to the map.
  1395  		// See issue 26552.
  1396  		n := n.(*ir.CompLitExpr)
  1397  		entries := n.List
  1398  		statics := entries[:0]
  1399  		var dynamics []*ir.KeyExpr
  1400  		for _, r := range entries {
  1401  			r := r.(*ir.KeyExpr)
  1402  
  1403  			if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
  1404  				dynamics = append(dynamics, r)
  1405  				continue
  1406  			}
  1407  
  1408  			// Recursively ordering some static entries can change them to dynamic;
  1409  			// e.g., OCONVIFACE nodes. See #31777.
  1410  			r = o.expr(r, nil).(*ir.KeyExpr)
  1411  			if !isStaticCompositeLiteral(r.Key) || !isStaticCompositeLiteral(r.Value) {
  1412  				dynamics = append(dynamics, r)
  1413  				continue
  1414  			}
  1415  
  1416  			statics = append(statics, r)
  1417  		}
  1418  		n.List = statics
  1419  
  1420  		if len(dynamics) == 0 {
  1421  			return n
  1422  		}
  1423  
  1424  		// Emit the creation of the map (with all its static entries).
  1425  		m := o.newTemp(n.Type(), false)
  1426  		as := ir.NewAssignStmt(base.Pos, m, n)
  1427  		typecheck.Stmt(as)
  1428  		o.stmt(as)
  1429  
  1430  		// Emit eval+insert of dynamic entries, one at a time.
  1431  		for _, r := range dynamics {
  1432  			as := ir.NewAssignStmt(base.Pos, ir.NewIndexExpr(base.Pos, m, r.Key), r.Value)
  1433  			typecheck.Stmt(as) // Note: this converts the OINDEX to an OINDEXMAP
  1434  			o.stmt(as)
  1435  		}
  1436  		return m
  1437  	}
  1438  
  1439  	// No return - type-assertions above. Each case must return for itself.
  1440  }
  1441  
  1442  // as2func orders OAS2FUNC nodes. It creates temporaries to ensure left-to-right assignment.
  1443  // The caller should order the right-hand side of the assignment before calling order.as2func.
  1444  // It rewrites,
  1445  //	a, b, a = ...
  1446  // as
  1447  //	tmp1, tmp2, tmp3 = ...
  1448  //	a, b, a = tmp1, tmp2, tmp3
  1449  // This is necessary to ensure left to right assignment order.
  1450  func (o *orderState) as2func(n *ir.AssignListStmt) {
  1451  	results := n.Rhs[0].Type()
  1452  	as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
  1453  	for i, nl := range n.Lhs {
  1454  		if !ir.IsBlank(nl) {
  1455  			typ := results.Field(i).Type
  1456  			tmp := o.newTemp(typ, typ.HasPointers())
  1457  			n.Lhs[i] = tmp
  1458  			as.Lhs = append(as.Lhs, nl)
  1459  			as.Rhs = append(as.Rhs, tmp)
  1460  		}
  1461  	}
  1462  
  1463  	o.out = append(o.out, n)
  1464  	o.stmt(typecheck.Stmt(as))
  1465  }
  1466  
  1467  // as2ok orders OAS2XXX with ok.
  1468  // Just like as2func, this also adds temporaries to ensure left-to-right assignment.
  1469  func (o *orderState) as2ok(n *ir.AssignListStmt) {
  1470  	as := ir.NewAssignListStmt(n.Pos(), ir.OAS2, nil, nil)
  1471  
  1472  	do := func(i int, typ *types.Type) {
  1473  		if nl := n.Lhs[i]; !ir.IsBlank(nl) {
  1474  			var tmp ir.Node = o.newTemp(typ, typ.HasPointers())
  1475  			n.Lhs[i] = tmp
  1476  			as.Lhs = append(as.Lhs, nl)
  1477  			if i == 1 {
  1478  				// The "ok" result is an untyped boolean according to the Go
  1479  				// spec. We need to explicitly convert it to the LHS type in
  1480  				// case the latter is a defined boolean type (#8475).
  1481  				tmp = typecheck.Conv(tmp, nl.Type())
  1482  			}
  1483  			as.Rhs = append(as.Rhs, tmp)
  1484  		}
  1485  	}
  1486  
  1487  	do(0, n.Rhs[0].Type())
  1488  	do(1, types.Types[types.TBOOL])
  1489  
  1490  	o.out = append(o.out, n)
  1491  	o.stmt(typecheck.Stmt(as))
  1492  }
  1493  
  1494  // isFuncPCIntrinsic returns whether n is a direct call of internal/abi.FuncPCABIxxx functions.
  1495  func isFuncPCIntrinsic(n *ir.CallExpr) bool {
  1496  	if n.Op() != ir.OCALLFUNC || n.X.Op() != ir.ONAME {
  1497  		return false
  1498  	}
  1499  	fn := n.X.(*ir.Name).Sym()
  1500  	return (fn.Name == "FuncPCABI0" || fn.Name == "FuncPCABIInternal") &&
  1501  		(fn.Pkg.Path == "internal/abi" || fn.Pkg == types.LocalPkg && base.Ctxt.Pkgpath == "internal/abi")
  1502  }
  1503  
  1504  // isIfaceOfFunc returns whether n is an interface conversion from a direct reference of a func.
  1505  func isIfaceOfFunc(n ir.Node) bool {
  1506  	return n.Op() == ir.OCONVIFACE && n.(*ir.ConvExpr).X.Op() == ir.ONAME && n.(*ir.ConvExpr).X.(*ir.Name).Class == ir.PFUNC
  1507  }
  1508  

View as plain text