Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/cgocall.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Cgo call and callback support.
     6  //
     7  // To call into the C function f from Go, the cgo-generated code calls
     8  // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
     9  // gcc-compiled function written by cgo.
    10  //
    11  // runtime.cgocall (below) calls entersyscall so as not to block
    12  // other goroutines or the garbage collector, and then calls
    13  // runtime.asmcgocall(_cgo_Cfunc_f, frame).
    14  //
    15  // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
    16  // (assumed to be an operating system-allocated stack, so safe to run
    17  // gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
    18  //
    19  // _cgo_Cfunc_f invokes the actual C function f with arguments
    20  // taken from the frame structure, records the results in the frame,
    21  // and returns to runtime.asmcgocall.
    22  //
    23  // After it regains control, runtime.asmcgocall switches back to the
    24  // original g (m->curg)'s stack and returns to runtime.cgocall.
    25  //
    26  // After it regains control, runtime.cgocall calls exitsyscall, which blocks
    27  // until this m can run Go code without violating the $GOMAXPROCS limit,
    28  // and then unlocks g from m.
    29  //
    30  // The above description skipped over the possibility of the gcc-compiled
    31  // function f calling back into Go. If that happens, we continue down
    32  // the rabbit hole during the execution of f.
    33  //
    34  // To make it possible for gcc-compiled C code to call a Go function p.GoF,
    35  // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
    36  // know about packages).  The gcc-compiled C function f calls GoF.
    37  //
    38  // GoF calls crosscall2(_cgoexp_GoF, frame, framesize).  Crosscall2
    39  // (in cgo/gcc_$GOARCH.S, a gcc-compiled assembly file) is a two-argument
    40  // adapter from the gcc function call ABI to the 6c function call ABI.
    41  // It is called from gcc to call 6c functions. In this case it calls
    42  // _cgoexp_GoF(frame, framesize), still running on m->g0's stack
    43  // and outside the $GOMAXPROCS limit. Thus, this code cannot yet
    44  // call arbitrary Go code directly and must be careful not to allocate
    45  // memory or use up m->g0's stack.
    46  //
    47  // _cgoexp_GoF calls runtime.cgocallback(p.GoF, frame, framesize, ctxt).
    48  // (The reason for having _cgoexp_GoF instead of writing a crosscall3
    49  // to make this call directly is that _cgoexp_GoF, because it is compiled
    50  // with 6c instead of gcc, can refer to dotted names like
    51  // runtime.cgocallback and p.GoF.)
    52  //
    53  // runtime.cgocallback (in asm_$GOARCH.s) switches from m->g0's
    54  // stack to the original g (m->curg)'s stack, on which it calls
    55  // runtime.cgocallbackg(p.GoF, frame, framesize).
    56  // As part of the stack switch, runtime.cgocallback saves the current
    57  // SP as m->g0->sched.sp, so that any use of m->g0's stack during the
    58  // execution of the callback will be done below the existing stack frames.
    59  // Before overwriting m->g0->sched.sp, it pushes the old value on the
    60  // m->g0 stack, so that it can be restored later.
    61  //
    62  // runtime.cgocallbackg (below) is now running on a real goroutine
    63  // stack (not an m->g0 stack).  First it calls runtime.exitsyscall, which will
    64  // block until the $GOMAXPROCS limit allows running this goroutine.
    65  // Once exitsyscall has returned, it is safe to do things like call the memory
    66  // allocator or invoke the Go callback function p.GoF.  runtime.cgocallbackg
    67  // first defers a function to unwind m->g0.sched.sp, so that if p.GoF
    68  // panics, m->g0.sched.sp will be restored to its old value: the m->g0 stack
    69  // and the m->curg stack will be unwound in lock step.
    70  // Then it calls p.GoF.  Finally it pops but does not execute the deferred
    71  // function, calls runtime.entersyscall, and returns to runtime.cgocallback.
    72  //
    73  // After it regains control, runtime.cgocallback switches back to
    74  // m->g0's stack (the pointer is still in m->g0.sched.sp), restores the old
    75  // m->g0.sched.sp value from the stack, and returns to _cgoexp_GoF.
    76  //
    77  // _cgoexp_GoF immediately returns to crosscall2, which restores the
    78  // callee-save registers for gcc and returns to GoF, which returns to f.
    79  
    80  package runtime
    81  
    82  import (
    83  	"runtime/internal/atomic"
    84  	"runtime/internal/sys"
    85  	"unsafe"
    86  )
    87  
    88  // Addresses collected in a cgo backtrace when crashing.
    89  // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
    90  type cgoCallers [32]uintptr
    91  
    92  // Call from Go to C.
    93  //
    94  // This must be nosplit because it's used for syscalls on some
    95  // platforms. Syscalls may have untyped arguments on the stack, so
    96  // it's not safe to grow or scan the stack.
    97  //
    98  //go:nosplit
    99  func cgocall(fn, arg unsafe.Pointer) int32 {
   100  	if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
   101  		throw("cgocall unavailable")
   102  	}
   103  
   104  	if fn == nil {
   105  		throw("cgocall nil")
   106  	}
   107  
   108  	if raceenabled {
   109  		racereleasemerge(unsafe.Pointer(&racecgosync))
   110  	}
   111  
   112  	mp := getg().m
   113  	mp.ncgocall++
   114  	mp.ncgo++
   115  
   116  	// Reset traceback.
   117  	mp.cgoCallers[0] = 0
   118  
   119  	// Announce we are entering a system call
   120  	// so that the scheduler knows to create another
   121  	// M to run goroutines while we are in the
   122  	// foreign code.
   123  	//
   124  	// The call to asmcgocall is guaranteed not to
   125  	// grow the stack and does not allocate memory,
   126  	// so it is safe to call while "in a system call", outside
   127  	// the $GOMAXPROCS accounting.
   128  	//
   129  	// fn may call back into Go code, in which case we'll exit the
   130  	// "system call", run the Go code (which may grow the stack),
   131  	// and then re-enter the "system call" reusing the PC and SP
   132  	// saved by entersyscall here.
   133  	entersyscall()
   134  
   135  	// Tell asynchronous preemption that we're entering external
   136  	// code. We do this after entersyscall because this may block
   137  	// and cause an async preemption to fail, but at this point a
   138  	// sync preemption will succeed (though this is not a matter
   139  	// of correctness).
   140  	osPreemptExtEnter(mp)
   141  
   142  	mp.incgo = true
   143  	errno := asmcgocall(fn, arg)
   144  
   145  	// Update accounting before exitsyscall because exitsyscall may
   146  	// reschedule us on to a different M.
   147  	mp.incgo = false
   148  	mp.ncgo--
   149  
   150  	osPreemptExtExit(mp)
   151  
   152  	exitsyscall()
   153  
   154  	// Note that raceacquire must be called only after exitsyscall has
   155  	// wired this M to a P.
   156  	if raceenabled {
   157  		raceacquire(unsafe.Pointer(&racecgosync))
   158  	}
   159  
   160  	// From the garbage collector's perspective, time can move
   161  	// backwards in the sequence above. If there's a callback into
   162  	// Go code, GC will see this function at the call to
   163  	// asmcgocall. When the Go call later returns to C, the
   164  	// syscall PC/SP is rolled back and the GC sees this function
   165  	// back at the call to entersyscall. Normally, fn and arg
   166  	// would be live at entersyscall and dead at asmcgocall, so if
   167  	// time moved backwards, GC would see these arguments as dead
   168  	// and then live. Prevent these undead arguments from crashing
   169  	// GC by forcing them to stay live across this time warp.
   170  	KeepAlive(fn)
   171  	KeepAlive(arg)
   172  	KeepAlive(mp)
   173  
   174  	return errno
   175  }
   176  
   177  // Call from C back to Go.
   178  //go:nosplit
   179  func cgocallbackg(ctxt uintptr) {
   180  	gp := getg()
   181  	if gp != gp.m.curg {
   182  		println("runtime: bad g in cgocallback")
   183  		exit(2)
   184  	}
   185  
   186  	// The call from C is on gp.m's g0 stack, so we must ensure
   187  	// that we stay on that M. We have to do this before calling
   188  	// exitsyscall, since it would otherwise be free to move us to
   189  	// a different M. The call to unlockOSThread is in unwindm.
   190  	lockOSThread()
   191  
   192  	// Save current syscall parameters, so m.syscall can be
   193  	// used again if callback decide to make syscall.
   194  	syscall := gp.m.syscall
   195  
   196  	// entersyscall saves the caller's SP to allow the GC to trace the Go
   197  	// stack. However, since we're returning to an earlier stack frame and
   198  	// need to pair with the entersyscall() call made by cgocall, we must
   199  	// save syscall* and let reentersyscall restore them.
   200  	savedsp := unsafe.Pointer(gp.syscallsp)
   201  	savedpc := gp.syscallpc
   202  	exitsyscall() // coming out of cgo call
   203  	gp.m.incgo = false
   204  
   205  	osPreemptExtExit(gp.m)
   206  
   207  	cgocallbackg1(ctxt)
   208  
   209  	// At this point unlockOSThread has been called.
   210  	// The following code must not change to a different m.
   211  	// This is enforced by checking incgo in the schedule function.
   212  
   213  	osPreemptExtEnter(gp.m)
   214  
   215  	gp.m.incgo = true
   216  	// going back to cgo call
   217  	reentersyscall(savedpc, uintptr(savedsp))
   218  
   219  	gp.m.syscall = syscall
   220  }
   221  
   222  func cgocallbackg1(ctxt uintptr) {
   223  	gp := getg()
   224  	if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
   225  		gp.m.needextram = false
   226  		systemstack(newextram)
   227  	}
   228  
   229  	if ctxt != 0 {
   230  		s := append(gp.cgoCtxt, ctxt)
   231  
   232  		// Now we need to set gp.cgoCtxt = s, but we could get
   233  		// a SIGPROF signal while manipulating the slice, and
   234  		// the SIGPROF handler could pick up gp.cgoCtxt while
   235  		// tracing up the stack.  We need to ensure that the
   236  		// handler always sees a valid slice, so set the
   237  		// values in an order such that it always does.
   238  		p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
   239  		atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
   240  		p.cap = cap(s)
   241  		p.len = len(s)
   242  
   243  		defer func(gp *g) {
   244  			// Decrease the length of the slice by one, safely.
   245  			p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
   246  			p.len--
   247  		}(gp)
   248  	}
   249  
   250  	if gp.m.ncgo == 0 {
   251  		// The C call to Go came from a thread not currently running
   252  		// any Go. In the case of -buildmode=c-archive or c-shared,
   253  		// this call may be coming in before package initialization
   254  		// is complete. Wait until it is.
   255  		<-main_init_done
   256  	}
   257  
   258  	// Add entry to defer stack in case of panic.
   259  	restore := true
   260  	defer unwindm(&restore)
   261  
   262  	if raceenabled {
   263  		raceacquire(unsafe.Pointer(&racecgosync))
   264  	}
   265  
   266  	type args struct {
   267  		fn      *funcval
   268  		arg     unsafe.Pointer
   269  		argsize uintptr
   270  	}
   271  	var cb *args
   272  
   273  	// Location of callback arguments depends on stack frame layout
   274  	// and size of stack frame of cgocallback_gofunc.
   275  	sp := gp.m.g0.sched.sp
   276  	switch GOARCH {
   277  	default:
   278  		throw("cgocallbackg is unimplemented on arch")
   279  	case "arm":
   280  		// On arm, stack frame is two words and there's a saved LR between
   281  		// SP and the stack frame and between the stack frame and the arguments.
   282  		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
   283  	case "arm64":
   284  		// On arm64, stack frame is four words and there's a saved LR between
   285  		// SP and the stack frame and between the stack frame and the arguments.
   286  		// Additional two words (16-byte alignment) are for saving FP.
   287  		cb = (*args)(unsafe.Pointer(sp + 7*sys.PtrSize))
   288  	case "amd64":
   289  		// On amd64, stack frame is two words, plus caller PC.
   290  		if framepointer_enabled {
   291  			// In this case, there's also saved BP.
   292  			cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
   293  			break
   294  		}
   295  		cb = (*args)(unsafe.Pointer(sp + 3*sys.PtrSize))
   296  	case "386":
   297  		// On 386, stack frame is three words, plus caller PC.
   298  		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
   299  	case "ppc64", "ppc64le", "s390x":
   300  		// On ppc64 and s390x, the callback arguments are in the arguments area of
   301  		// cgocallback's stack frame. The stack looks like this:
   302  		// +--------------------+------------------------------+
   303  		// |                    | ...                          |
   304  		// | cgoexp_$fn         +------------------------------+
   305  		// |                    | fixed frame area             |
   306  		// +--------------------+------------------------------+
   307  		// |                    | arguments area               |
   308  		// | cgocallback        +------------------------------+ <- sp + 2*minFrameSize + 2*ptrSize
   309  		// |                    | fixed frame area             |
   310  		// +--------------------+------------------------------+ <- sp + minFrameSize + 2*ptrSize
   311  		// |                    | local variables (2 pointers) |
   312  		// | cgocallback_gofunc +------------------------------+ <- sp + minFrameSize
   313  		// |                    | fixed frame area             |
   314  		// +--------------------+------------------------------+ <- sp
   315  		cb = (*args)(unsafe.Pointer(sp + 2*sys.MinFrameSize + 2*sys.PtrSize))
   316  	case "mips64", "mips64le":
   317  		// On mips64x, stack frame is two words and there's a saved LR between
   318  		// SP and the stack frame and between the stack frame and the arguments.
   319  		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
   320  	case "mips", "mipsle":
   321  		// On mipsx, stack frame is two words and there's a saved LR between
   322  		// SP and the stack frame and between the stack frame and the arguments.
   323  		cb = (*args)(unsafe.Pointer(sp + 4*sys.PtrSize))
   324  	}
   325  
   326  	// Invoke callback.
   327  	// NOTE(rsc): passing nil for argtype means that the copying of the
   328  	// results back into cb.arg happens without any corresponding write barriers.
   329  	// For cgo, cb.arg points into a C stack frame and therefore doesn't
   330  	// hold any pointers that the GC can find anyway - the write barrier
   331  	// would be a no-op.
   332  	reflectcall(nil, unsafe.Pointer(cb.fn), cb.arg, uint32(cb.argsize), 0)
   333  
   334  	if raceenabled {
   335  		racereleasemerge(unsafe.Pointer(&racecgosync))
   336  	}
   337  	if msanenabled {
   338  		// Tell msan that we wrote to the entire argument block.
   339  		// This tells msan that we set the results.
   340  		// Since we have already called the function it doesn't
   341  		// matter that we are writing to the non-result parameters.
   342  		msanwrite(cb.arg, cb.argsize)
   343  	}
   344  
   345  	// Do not unwind m->g0->sched.sp.
   346  	// Our caller, cgocallback, will do that.
   347  	restore = false
   348  }
   349  
   350  func unwindm(restore *bool) {
   351  	if *restore {
   352  		// Restore sp saved by cgocallback during
   353  		// unwind of g's stack (see comment at top of file).
   354  		mp := acquirem()
   355  		sched := &mp.g0.sched
   356  		switch GOARCH {
   357  		default:
   358  			throw("unwindm not implemented")
   359  		case "386", "amd64", "arm", "ppc64", "ppc64le", "mips64", "mips64le", "s390x", "mips", "mipsle":
   360  			sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
   361  		case "arm64":
   362  			sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
   363  		}
   364  
   365  		// Do the accounting that cgocall will not have a chance to do
   366  		// during an unwind.
   367  		//
   368  		// In the case where a Go call originates from C, ncgo is 0
   369  		// and there is no matching cgocall to end.
   370  		if mp.ncgo > 0 {
   371  			mp.incgo = false
   372  			mp.ncgo--
   373  			osPreemptExtExit(mp)
   374  		}
   375  
   376  		releasem(mp)
   377  	}
   378  
   379  	// Undo the call to lockOSThread in cgocallbackg.
   380  	// We must still stay on the same m.
   381  	unlockOSThread()
   382  }
   383  
   384  // called from assembly
   385  func badcgocallback() {
   386  	throw("misaligned stack in cgocallback")
   387  }
   388  
   389  // called from (incomplete) assembly
   390  func cgounimpl() {
   391  	throw("cgo not implemented")
   392  }
   393  
   394  var racecgosync uint64 // represents possible synchronization in C code
   395  
   396  // Pointer checking for cgo code.
   397  
   398  // We want to detect all cases where a program that does not use
   399  // unsafe makes a cgo call passing a Go pointer to memory that
   400  // contains a Go pointer. Here a Go pointer is defined as a pointer
   401  // to memory allocated by the Go runtime. Programs that use unsafe
   402  // can evade this restriction easily, so we don't try to catch them.
   403  // The cgo program will rewrite all possibly bad pointer arguments to
   404  // call cgoCheckPointer, where we can catch cases of a Go pointer
   405  // pointing to a Go pointer.
   406  
   407  // Complicating matters, taking the address of a slice or array
   408  // element permits the C program to access all elements of the slice
   409  // or array. In that case we will see a pointer to a single element,
   410  // but we need to check the entire data structure.
   411  
   412  // The cgoCheckPointer call takes additional arguments indicating that
   413  // it was called on an address expression. An additional argument of
   414  // true means that it only needs to check a single element. An
   415  // additional argument of a slice or array means that it needs to
   416  // check the entire slice/array, but nothing else. Otherwise, the
   417  // pointer could be anything, and we check the entire heap object,
   418  // which is conservative but safe.
   419  
   420  // When and if we implement a moving garbage collector,
   421  // cgoCheckPointer will pin the pointer for the duration of the cgo
   422  // call.  (This is necessary but not sufficient; the cgo program will
   423  // also have to change to pin Go pointers that cannot point to Go
   424  // pointers.)
   425  
   426  // cgoCheckPointer checks if the argument contains a Go pointer that
   427  // points to a Go pointer, and panics if it does.
   428  func cgoCheckPointer(ptr interface{}, arg interface{}) {
   429  	if debug.cgocheck == 0 {
   430  		return
   431  	}
   432  
   433  	ep := efaceOf(&ptr)
   434  	t := ep._type
   435  
   436  	top := true
   437  	if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
   438  		p := ep.data
   439  		if t.kind&kindDirectIface == 0 {
   440  			p = *(*unsafe.Pointer)(p)
   441  		}
   442  		if p == nil || !cgoIsGoPointer(p) {
   443  			return
   444  		}
   445  		aep := efaceOf(&arg)
   446  		switch aep._type.kind & kindMask {
   447  		case kindBool:
   448  			if t.kind&kindMask == kindUnsafePointer {
   449  				// We don't know the type of the element.
   450  				break
   451  			}
   452  			pt := (*ptrtype)(unsafe.Pointer(t))
   453  			cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
   454  			return
   455  		case kindSlice:
   456  			// Check the slice rather than the pointer.
   457  			ep = aep
   458  			t = ep._type
   459  		case kindArray:
   460  			// Check the array rather than the pointer.
   461  			// Pass top as false since we have a pointer
   462  			// to the array.
   463  			ep = aep
   464  			t = ep._type
   465  			top = false
   466  		default:
   467  			throw("can't happen")
   468  		}
   469  	}
   470  
   471  	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
   472  }
   473  
   474  const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
   475  const cgoResultFail = "cgo result has Go pointer"
   476  
   477  // cgoCheckArg is the real work of cgoCheckPointer. The argument p
   478  // is either a pointer to the value (of type t), or the value itself,
   479  // depending on indir. The top parameter is whether we are at the top
   480  // level, where Go pointers are allowed.
   481  func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
   482  	if t.ptrdata == 0 || p == nil {
   483  		// If the type has no pointers there is nothing to do.
   484  		return
   485  	}
   486  
   487  	switch t.kind & kindMask {
   488  	default:
   489  		throw("can't happen")
   490  	case kindArray:
   491  		at := (*arraytype)(unsafe.Pointer(t))
   492  		if !indir {
   493  			if at.len != 1 {
   494  				throw("can't happen")
   495  			}
   496  			cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
   497  			return
   498  		}
   499  		for i := uintptr(0); i < at.len; i++ {
   500  			cgoCheckArg(at.elem, p, true, top, msg)
   501  			p = add(p, at.elem.size)
   502  		}
   503  	case kindChan, kindMap:
   504  		// These types contain internal pointers that will
   505  		// always be allocated in the Go heap. It's never OK
   506  		// to pass them to C.
   507  		panic(errorString(msg))
   508  	case kindFunc:
   509  		if indir {
   510  			p = *(*unsafe.Pointer)(p)
   511  		}
   512  		if !cgoIsGoPointer(p) {
   513  			return
   514  		}
   515  		panic(errorString(msg))
   516  	case kindInterface:
   517  		it := *(**_type)(p)
   518  		if it == nil {
   519  			return
   520  		}
   521  		// A type known at compile time is OK since it's
   522  		// constant. A type not known at compile time will be
   523  		// in the heap and will not be OK.
   524  		if inheap(uintptr(unsafe.Pointer(it))) {
   525  			panic(errorString(msg))
   526  		}
   527  		p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
   528  		if !cgoIsGoPointer(p) {
   529  			return
   530  		}
   531  		if !top {
   532  			panic(errorString(msg))
   533  		}
   534  		cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
   535  	case kindSlice:
   536  		st := (*slicetype)(unsafe.Pointer(t))
   537  		s := (*slice)(p)
   538  		p = s.array
   539  		if p == nil || !cgoIsGoPointer(p) {
   540  			return
   541  		}
   542  		if !top {
   543  			panic(errorString(msg))
   544  		}
   545  		if st.elem.ptrdata == 0 {
   546  			return
   547  		}
   548  		for i := 0; i < s.cap; i++ {
   549  			cgoCheckArg(st.elem, p, true, false, msg)
   550  			p = add(p, st.elem.size)
   551  		}
   552  	case kindString:
   553  		ss := (*stringStruct)(p)
   554  		if !cgoIsGoPointer(ss.str) {
   555  			return
   556  		}
   557  		if !top {
   558  			panic(errorString(msg))
   559  		}
   560  	case kindStruct:
   561  		st := (*structtype)(unsafe.Pointer(t))
   562  		if !indir {
   563  			if len(st.fields) != 1 {
   564  				throw("can't happen")
   565  			}
   566  			cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
   567  			return
   568  		}
   569  		for _, f := range st.fields {
   570  			if f.typ.ptrdata == 0 {
   571  				continue
   572  			}
   573  			cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg)
   574  		}
   575  	case kindPtr, kindUnsafePointer:
   576  		if indir {
   577  			p = *(*unsafe.Pointer)(p)
   578  			if p == nil {
   579  				return
   580  			}
   581  		}
   582  
   583  		if !cgoIsGoPointer(p) {
   584  			return
   585  		}
   586  		if !top {
   587  			panic(errorString(msg))
   588  		}
   589  
   590  		cgoCheckUnknownPointer(p, msg)
   591  	}
   592  }
   593  
   594  // cgoCheckUnknownPointer is called for an arbitrary pointer into Go
   595  // memory. It checks whether that Go memory contains any other
   596  // pointer into Go memory. If it does, we panic.
   597  // The return values are unused but useful to see in panic tracebacks.
   598  func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
   599  	if inheap(uintptr(p)) {
   600  		b, span, _ := findObject(uintptr(p), 0, 0)
   601  		base = b
   602  		if base == 0 {
   603  			return
   604  		}
   605  		hbits := heapBitsForAddr(base)
   606  		n := span.elemsize
   607  		for i = uintptr(0); i < n; i += sys.PtrSize {
   608  			if i != 1*sys.PtrSize && !hbits.morePointers() {
   609  				// No more possible pointers.
   610  				break
   611  			}
   612  			if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
   613  				panic(errorString(msg))
   614  			}
   615  			hbits = hbits.next()
   616  		}
   617  
   618  		return
   619  	}
   620  
   621  	for _, datap := range activeModules() {
   622  		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
   623  			// We have no way to know the size of the object.
   624  			// We have to assume that it might contain a pointer.
   625  			panic(errorString(msg))
   626  		}
   627  		// In the text or noptr sections, we know that the
   628  		// pointer does not point to a Go pointer.
   629  	}
   630  
   631  	return
   632  }
   633  
   634  // cgoIsGoPointer reports whether the pointer is a Go pointer--a
   635  // pointer to Go memory. We only care about Go memory that might
   636  // contain pointers.
   637  //go:nosplit
   638  //go:nowritebarrierrec
   639  func cgoIsGoPointer(p unsafe.Pointer) bool {
   640  	if p == nil {
   641  		return false
   642  	}
   643  
   644  	if inHeapOrStack(uintptr(p)) {
   645  		return true
   646  	}
   647  
   648  	for _, datap := range activeModules() {
   649  		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
   650  			return true
   651  		}
   652  	}
   653  
   654  	return false
   655  }
   656  
   657  // cgoInRange reports whether p is between start and end.
   658  //go:nosplit
   659  //go:nowritebarrierrec
   660  func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
   661  	return start <= uintptr(p) && uintptr(p) < end
   662  }
   663  
   664  // cgoCheckResult is called to check the result parameter of an
   665  // exported Go function. It panics if the result is or contains a Go
   666  // pointer.
   667  func cgoCheckResult(val interface{}) {
   668  	if debug.cgocheck == 0 {
   669  		return
   670  	}
   671  
   672  	ep := efaceOf(&val)
   673  	t := ep._type
   674  	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
   675  }
   676  

View as plain text