Black Lives Matter. Support the Equal Justice Initiative.

Source file src/runtime/cgocall.go

Documentation: runtime

     1  // Copyright 2009 The Go Authors. All rights reserved.
     2  // Use of this source code is governed by a BSD-style
     3  // license that can be found in the LICENSE file.
     4  
     5  // Cgo call and callback support.
     6  //
     7  // To call into the C function f from Go, the cgo-generated code calls
     8  // runtime.cgocall(_cgo_Cfunc_f, frame), where _cgo_Cfunc_f is a
     9  // gcc-compiled function written by cgo.
    10  //
    11  // runtime.cgocall (below) calls entersyscall so as not to block
    12  // other goroutines or the garbage collector, and then calls
    13  // runtime.asmcgocall(_cgo_Cfunc_f, frame).
    14  //
    15  // runtime.asmcgocall (in asm_$GOARCH.s) switches to the m->g0 stack
    16  // (assumed to be an operating system-allocated stack, so safe to run
    17  // gcc-compiled code on) and calls _cgo_Cfunc_f(frame).
    18  //
    19  // _cgo_Cfunc_f invokes the actual C function f with arguments
    20  // taken from the frame structure, records the results in the frame,
    21  // and returns to runtime.asmcgocall.
    22  //
    23  // After it regains control, runtime.asmcgocall switches back to the
    24  // original g (m->curg)'s stack and returns to runtime.cgocall.
    25  //
    26  // After it regains control, runtime.cgocall calls exitsyscall, which blocks
    27  // until this m can run Go code without violating the $GOMAXPROCS limit,
    28  // and then unlocks g from m.
    29  //
    30  // The above description skipped over the possibility of the gcc-compiled
    31  // function f calling back into Go. If that happens, we continue down
    32  // the rabbit hole during the execution of f.
    33  //
    34  // To make it possible for gcc-compiled C code to call a Go function p.GoF,
    35  // cgo writes a gcc-compiled function named GoF (not p.GoF, since gcc doesn't
    36  // know about packages).  The gcc-compiled C function f calls GoF.
    37  //
    38  // GoF initializes "frame", a structure containing all of its
    39  // arguments and slots for p.GoF's results. It calls
    40  // crosscall2(_cgoexp_GoF, frame, framesize, ctxt) using the gcc ABI.
    41  //
    42  // crosscall2 (in cgo/asm_$GOARCH.s) is a four-argument adapter from
    43  // the gcc function call ABI to the gc function call ABI. At this
    44  // point we're in the Go runtime, but we're still running on m.g0's
    45  // stack and outside the $GOMAXPROCS limit. crosscall2 calls
    46  // runtime.cgocallback(_cgoexp_GoF, frame, ctxt) using the gc ABI.
    47  // (crosscall2's framesize argument is no longer used, but there's one
    48  // case where SWIG calls crosscall2 directly and expects to pass this
    49  // argument. See _cgo_panic.)
    50  //
    51  // runtime.cgocallback (in asm_$GOARCH.s) switches from m.g0's stack
    52  // to the original g (m.curg)'s stack, on which it calls
    53  // runtime.cgocallbackg(_cgoexp_GoF, frame, ctxt). As part of the
    54  // stack switch, runtime.cgocallback saves the current SP as
    55  // m.g0.sched.sp, so that any use of m.g0's stack during the execution
    56  // of the callback will be done below the existing stack frames.
    57  // Before overwriting m.g0.sched.sp, it pushes the old value on the
    58  // m.g0 stack, so that it can be restored later.
    59  //
    60  // runtime.cgocallbackg (below) is now running on a real goroutine
    61  // stack (not an m.g0 stack).  First it calls runtime.exitsyscall, which will
    62  // block until the $GOMAXPROCS limit allows running this goroutine.
    63  // Once exitsyscall has returned, it is safe to do things like call the memory
    64  // allocator or invoke the Go callback function.  runtime.cgocallbackg
    65  // first defers a function to unwind m.g0.sched.sp, so that if p.GoF
    66  // panics, m.g0.sched.sp will be restored to its old value: the m.g0 stack
    67  // and the m.curg stack will be unwound in lock step.
    68  // Then it calls _cgoexp_GoF(frame).
    69  //
    70  // _cgoexp_GoF, which was generated by cmd/cgo, unpacks the arguments
    71  // from frame, calls p.GoF, writes the results back to frame, and
    72  // returns. Now we start unwinding this whole process.
    73  //
    74  // runtime.cgocallbackg pops but does not execute the deferred
    75  // function to unwind m.g0.sched.sp, calls runtime.entersyscall, and
    76  // returns to runtime.cgocallback.
    77  //
    78  // After it regains control, runtime.cgocallback switches back to
    79  // m.g0's stack (the pointer is still in m.g0.sched.sp), restores the old
    80  // m.g0.sched.sp value from the stack, and returns to crosscall2.
    81  //
    82  // crosscall2 restores the callee-save registers for gcc and returns
    83  // to GoF, which unpacks any result values and returns to f.
    84  
    85  package runtime
    86  
    87  import (
    88  	"runtime/internal/atomic"
    89  	"runtime/internal/sys"
    90  	"unsafe"
    91  )
    92  
    93  // Addresses collected in a cgo backtrace when crashing.
    94  // Length must match arg.Max in x_cgo_callers in runtime/cgo/gcc_traceback.c.
    95  type cgoCallers [32]uintptr
    96  
    97  // argset matches runtime/cgo/linux_syscall.c:argset_t
    98  type argset struct {
    99  	args   unsafe.Pointer
   100  	retval uintptr
   101  }
   102  
   103  // wrapper for syscall package to call cgocall for libc (cgo) calls.
   104  //go:linkname syscall_cgocaller syscall.cgocaller
   105  //go:nosplit
   106  //go:uintptrescapes
   107  func syscall_cgocaller(fn unsafe.Pointer, args ...uintptr) uintptr {
   108  	as := argset{args: unsafe.Pointer(&args[0])}
   109  	cgocall(fn, unsafe.Pointer(&as))
   110  	return as.retval
   111  }
   112  
   113  // Call from Go to C.
   114  //
   115  // This must be nosplit because it's used for syscalls on some
   116  // platforms. Syscalls may have untyped arguments on the stack, so
   117  // it's not safe to grow or scan the stack.
   118  //
   119  //go:nosplit
   120  func cgocall(fn, arg unsafe.Pointer) int32 {
   121  	if !iscgo && GOOS != "solaris" && GOOS != "illumos" && GOOS != "windows" {
   122  		throw("cgocall unavailable")
   123  	}
   124  
   125  	if fn == nil {
   126  		throw("cgocall nil")
   127  	}
   128  
   129  	if raceenabled {
   130  		racereleasemerge(unsafe.Pointer(&racecgosync))
   131  	}
   132  
   133  	mp := getg().m
   134  	mp.ncgocall++
   135  	mp.ncgo++
   136  
   137  	// Reset traceback.
   138  	mp.cgoCallers[0] = 0
   139  
   140  	// Announce we are entering a system call
   141  	// so that the scheduler knows to create another
   142  	// M to run goroutines while we are in the
   143  	// foreign code.
   144  	//
   145  	// The call to asmcgocall is guaranteed not to
   146  	// grow the stack and does not allocate memory,
   147  	// so it is safe to call while "in a system call", outside
   148  	// the $GOMAXPROCS accounting.
   149  	//
   150  	// fn may call back into Go code, in which case we'll exit the
   151  	// "system call", run the Go code (which may grow the stack),
   152  	// and then re-enter the "system call" reusing the PC and SP
   153  	// saved by entersyscall here.
   154  	entersyscall()
   155  
   156  	// Tell asynchronous preemption that we're entering external
   157  	// code. We do this after entersyscall because this may block
   158  	// and cause an async preemption to fail, but at this point a
   159  	// sync preemption will succeed (though this is not a matter
   160  	// of correctness).
   161  	osPreemptExtEnter(mp)
   162  
   163  	mp.incgo = true
   164  	errno := asmcgocall(fn, arg)
   165  
   166  	// Update accounting before exitsyscall because exitsyscall may
   167  	// reschedule us on to a different M.
   168  	mp.incgo = false
   169  	mp.ncgo--
   170  
   171  	osPreemptExtExit(mp)
   172  
   173  	exitsyscall()
   174  
   175  	// Note that raceacquire must be called only after exitsyscall has
   176  	// wired this M to a P.
   177  	if raceenabled {
   178  		raceacquire(unsafe.Pointer(&racecgosync))
   179  	}
   180  
   181  	// From the garbage collector's perspective, time can move
   182  	// backwards in the sequence above. If there's a callback into
   183  	// Go code, GC will see this function at the call to
   184  	// asmcgocall. When the Go call later returns to C, the
   185  	// syscall PC/SP is rolled back and the GC sees this function
   186  	// back at the call to entersyscall. Normally, fn and arg
   187  	// would be live at entersyscall and dead at asmcgocall, so if
   188  	// time moved backwards, GC would see these arguments as dead
   189  	// and then live. Prevent these undead arguments from crashing
   190  	// GC by forcing them to stay live across this time warp.
   191  	KeepAlive(fn)
   192  	KeepAlive(arg)
   193  	KeepAlive(mp)
   194  
   195  	return errno
   196  }
   197  
   198  // Call from C back to Go.
   199  //go:nosplit
   200  func cgocallbackg(fn, frame unsafe.Pointer, ctxt uintptr) {
   201  	gp := getg()
   202  	if gp != gp.m.curg {
   203  		println("runtime: bad g in cgocallback")
   204  		exit(2)
   205  	}
   206  
   207  	// The call from C is on gp.m's g0 stack, so we must ensure
   208  	// that we stay on that M. We have to do this before calling
   209  	// exitsyscall, since it would otherwise be free to move us to
   210  	// a different M. The call to unlockOSThread is in unwindm.
   211  	lockOSThread()
   212  
   213  	// Save current syscall parameters, so m.syscall can be
   214  	// used again if callback decide to make syscall.
   215  	syscall := gp.m.syscall
   216  
   217  	// entersyscall saves the caller's SP to allow the GC to trace the Go
   218  	// stack. However, since we're returning to an earlier stack frame and
   219  	// need to pair with the entersyscall() call made by cgocall, we must
   220  	// save syscall* and let reentersyscall restore them.
   221  	savedsp := unsafe.Pointer(gp.syscallsp)
   222  	savedpc := gp.syscallpc
   223  	exitsyscall() // coming out of cgo call
   224  	gp.m.incgo = false
   225  
   226  	osPreemptExtExit(gp.m)
   227  
   228  	cgocallbackg1(fn, frame, ctxt)
   229  
   230  	// At this point unlockOSThread has been called.
   231  	// The following code must not change to a different m.
   232  	// This is enforced by checking incgo in the schedule function.
   233  
   234  	osPreemptExtEnter(gp.m)
   235  
   236  	gp.m.incgo = true
   237  	// going back to cgo call
   238  	reentersyscall(savedpc, uintptr(savedsp))
   239  
   240  	gp.m.syscall = syscall
   241  }
   242  
   243  func cgocallbackg1(fn, frame unsafe.Pointer, ctxt uintptr) {
   244  	gp := getg()
   245  	if gp.m.needextram || atomic.Load(&extraMWaiters) > 0 {
   246  		gp.m.needextram = false
   247  		systemstack(newextram)
   248  	}
   249  
   250  	if ctxt != 0 {
   251  		s := append(gp.cgoCtxt, ctxt)
   252  
   253  		// Now we need to set gp.cgoCtxt = s, but we could get
   254  		// a SIGPROF signal while manipulating the slice, and
   255  		// the SIGPROF handler could pick up gp.cgoCtxt while
   256  		// tracing up the stack.  We need to ensure that the
   257  		// handler always sees a valid slice, so set the
   258  		// values in an order such that it always does.
   259  		p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
   260  		atomicstorep(unsafe.Pointer(&p.array), unsafe.Pointer(&s[0]))
   261  		p.cap = cap(s)
   262  		p.len = len(s)
   263  
   264  		defer func(gp *g) {
   265  			// Decrease the length of the slice by one, safely.
   266  			p := (*slice)(unsafe.Pointer(&gp.cgoCtxt))
   267  			p.len--
   268  		}(gp)
   269  	}
   270  
   271  	if gp.m.ncgo == 0 {
   272  		// The C call to Go came from a thread not currently running
   273  		// any Go. In the case of -buildmode=c-archive or c-shared,
   274  		// this call may be coming in before package initialization
   275  		// is complete. Wait until it is.
   276  		<-main_init_done
   277  	}
   278  
   279  	// Add entry to defer stack in case of panic.
   280  	restore := true
   281  	defer unwindm(&restore)
   282  
   283  	if raceenabled {
   284  		raceacquire(unsafe.Pointer(&racecgosync))
   285  	}
   286  
   287  	// Invoke callback. This function is generated by cmd/cgo and
   288  	// will unpack the argument frame and call the Go function.
   289  	var cb func(frame unsafe.Pointer)
   290  	cbFV := funcval{uintptr(fn)}
   291  	*(*unsafe.Pointer)(unsafe.Pointer(&cb)) = noescape(unsafe.Pointer(&cbFV))
   292  	cb(frame)
   293  
   294  	if raceenabled {
   295  		racereleasemerge(unsafe.Pointer(&racecgosync))
   296  	}
   297  
   298  	// Do not unwind m->g0->sched.sp.
   299  	// Our caller, cgocallback, will do that.
   300  	restore = false
   301  }
   302  
   303  func unwindm(restore *bool) {
   304  	if *restore {
   305  		// Restore sp saved by cgocallback during
   306  		// unwind of g's stack (see comment at top of file).
   307  		mp := acquirem()
   308  		sched := &mp.g0.sched
   309  		switch GOARCH {
   310  		default:
   311  			throw("unwindm not implemented")
   312  		case "386", "amd64", "arm", "ppc64", "ppc64le", "mips64", "mips64le", "s390x", "mips", "mipsle", "riscv64":
   313  			sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + sys.MinFrameSize))
   314  		case "arm64":
   315  			sched.sp = *(*uintptr)(unsafe.Pointer(sched.sp + 16))
   316  		}
   317  
   318  		// Do the accounting that cgocall will not have a chance to do
   319  		// during an unwind.
   320  		//
   321  		// In the case where a Go call originates from C, ncgo is 0
   322  		// and there is no matching cgocall to end.
   323  		if mp.ncgo > 0 {
   324  			mp.incgo = false
   325  			mp.ncgo--
   326  			osPreemptExtExit(mp)
   327  		}
   328  
   329  		releasem(mp)
   330  	}
   331  
   332  	// Undo the call to lockOSThread in cgocallbackg.
   333  	// We must still stay on the same m.
   334  	unlockOSThread()
   335  }
   336  
   337  // called from assembly
   338  func badcgocallback() {
   339  	throw("misaligned stack in cgocallback")
   340  }
   341  
   342  // called from (incomplete) assembly
   343  func cgounimpl() {
   344  	throw("cgo not implemented")
   345  }
   346  
   347  var racecgosync uint64 // represents possible synchronization in C code
   348  
   349  // Pointer checking for cgo code.
   350  
   351  // We want to detect all cases where a program that does not use
   352  // unsafe makes a cgo call passing a Go pointer to memory that
   353  // contains a Go pointer. Here a Go pointer is defined as a pointer
   354  // to memory allocated by the Go runtime. Programs that use unsafe
   355  // can evade this restriction easily, so we don't try to catch them.
   356  // The cgo program will rewrite all possibly bad pointer arguments to
   357  // call cgoCheckPointer, where we can catch cases of a Go pointer
   358  // pointing to a Go pointer.
   359  
   360  // Complicating matters, taking the address of a slice or array
   361  // element permits the C program to access all elements of the slice
   362  // or array. In that case we will see a pointer to a single element,
   363  // but we need to check the entire data structure.
   364  
   365  // The cgoCheckPointer call takes additional arguments indicating that
   366  // it was called on an address expression. An additional argument of
   367  // true means that it only needs to check a single element. An
   368  // additional argument of a slice or array means that it needs to
   369  // check the entire slice/array, but nothing else. Otherwise, the
   370  // pointer could be anything, and we check the entire heap object,
   371  // which is conservative but safe.
   372  
   373  // When and if we implement a moving garbage collector,
   374  // cgoCheckPointer will pin the pointer for the duration of the cgo
   375  // call.  (This is necessary but not sufficient; the cgo program will
   376  // also have to change to pin Go pointers that cannot point to Go
   377  // pointers.)
   378  
   379  // cgoCheckPointer checks if the argument contains a Go pointer that
   380  // points to a Go pointer, and panics if it does.
   381  func cgoCheckPointer(ptr interface{}, arg interface{}) {
   382  	if debug.cgocheck == 0 {
   383  		return
   384  	}
   385  
   386  	ep := efaceOf(&ptr)
   387  	t := ep._type
   388  
   389  	top := true
   390  	if arg != nil && (t.kind&kindMask == kindPtr || t.kind&kindMask == kindUnsafePointer) {
   391  		p := ep.data
   392  		if t.kind&kindDirectIface == 0 {
   393  			p = *(*unsafe.Pointer)(p)
   394  		}
   395  		if p == nil || !cgoIsGoPointer(p) {
   396  			return
   397  		}
   398  		aep := efaceOf(&arg)
   399  		switch aep._type.kind & kindMask {
   400  		case kindBool:
   401  			if t.kind&kindMask == kindUnsafePointer {
   402  				// We don't know the type of the element.
   403  				break
   404  			}
   405  			pt := (*ptrtype)(unsafe.Pointer(t))
   406  			cgoCheckArg(pt.elem, p, true, false, cgoCheckPointerFail)
   407  			return
   408  		case kindSlice:
   409  			// Check the slice rather than the pointer.
   410  			ep = aep
   411  			t = ep._type
   412  		case kindArray:
   413  			// Check the array rather than the pointer.
   414  			// Pass top as false since we have a pointer
   415  			// to the array.
   416  			ep = aep
   417  			t = ep._type
   418  			top = false
   419  		default:
   420  			throw("can't happen")
   421  		}
   422  	}
   423  
   424  	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, top, cgoCheckPointerFail)
   425  }
   426  
   427  const cgoCheckPointerFail = "cgo argument has Go pointer to Go pointer"
   428  const cgoResultFail = "cgo result has Go pointer"
   429  
   430  // cgoCheckArg is the real work of cgoCheckPointer. The argument p
   431  // is either a pointer to the value (of type t), or the value itself,
   432  // depending on indir. The top parameter is whether we are at the top
   433  // level, where Go pointers are allowed.
   434  func cgoCheckArg(t *_type, p unsafe.Pointer, indir, top bool, msg string) {
   435  	if t.ptrdata == 0 || p == nil {
   436  		// If the type has no pointers there is nothing to do.
   437  		return
   438  	}
   439  
   440  	switch t.kind & kindMask {
   441  	default:
   442  		throw("can't happen")
   443  	case kindArray:
   444  		at := (*arraytype)(unsafe.Pointer(t))
   445  		if !indir {
   446  			if at.len != 1 {
   447  				throw("can't happen")
   448  			}
   449  			cgoCheckArg(at.elem, p, at.elem.kind&kindDirectIface == 0, top, msg)
   450  			return
   451  		}
   452  		for i := uintptr(0); i < at.len; i++ {
   453  			cgoCheckArg(at.elem, p, true, top, msg)
   454  			p = add(p, at.elem.size)
   455  		}
   456  	case kindChan, kindMap:
   457  		// These types contain internal pointers that will
   458  		// always be allocated in the Go heap. It's never OK
   459  		// to pass them to C.
   460  		panic(errorString(msg))
   461  	case kindFunc:
   462  		if indir {
   463  			p = *(*unsafe.Pointer)(p)
   464  		}
   465  		if !cgoIsGoPointer(p) {
   466  			return
   467  		}
   468  		panic(errorString(msg))
   469  	case kindInterface:
   470  		it := *(**_type)(p)
   471  		if it == nil {
   472  			return
   473  		}
   474  		// A type known at compile time is OK since it's
   475  		// constant. A type not known at compile time will be
   476  		// in the heap and will not be OK.
   477  		if inheap(uintptr(unsafe.Pointer(it))) {
   478  			panic(errorString(msg))
   479  		}
   480  		p = *(*unsafe.Pointer)(add(p, sys.PtrSize))
   481  		if !cgoIsGoPointer(p) {
   482  			return
   483  		}
   484  		if !top {
   485  			panic(errorString(msg))
   486  		}
   487  		cgoCheckArg(it, p, it.kind&kindDirectIface == 0, false, msg)
   488  	case kindSlice:
   489  		st := (*slicetype)(unsafe.Pointer(t))
   490  		s := (*slice)(p)
   491  		p = s.array
   492  		if p == nil || !cgoIsGoPointer(p) {
   493  			return
   494  		}
   495  		if !top {
   496  			panic(errorString(msg))
   497  		}
   498  		if st.elem.ptrdata == 0 {
   499  			return
   500  		}
   501  		for i := 0; i < s.cap; i++ {
   502  			cgoCheckArg(st.elem, p, true, false, msg)
   503  			p = add(p, st.elem.size)
   504  		}
   505  	case kindString:
   506  		ss := (*stringStruct)(p)
   507  		if !cgoIsGoPointer(ss.str) {
   508  			return
   509  		}
   510  		if !top {
   511  			panic(errorString(msg))
   512  		}
   513  	case kindStruct:
   514  		st := (*structtype)(unsafe.Pointer(t))
   515  		if !indir {
   516  			if len(st.fields) != 1 {
   517  				throw("can't happen")
   518  			}
   519  			cgoCheckArg(st.fields[0].typ, p, st.fields[0].typ.kind&kindDirectIface == 0, top, msg)
   520  			return
   521  		}
   522  		for _, f := range st.fields {
   523  			if f.typ.ptrdata == 0 {
   524  				continue
   525  			}
   526  			cgoCheckArg(f.typ, add(p, f.offset()), true, top, msg)
   527  		}
   528  	case kindPtr, kindUnsafePointer:
   529  		if indir {
   530  			p = *(*unsafe.Pointer)(p)
   531  			if p == nil {
   532  				return
   533  			}
   534  		}
   535  
   536  		if !cgoIsGoPointer(p) {
   537  			return
   538  		}
   539  		if !top {
   540  			panic(errorString(msg))
   541  		}
   542  
   543  		cgoCheckUnknownPointer(p, msg)
   544  	}
   545  }
   546  
   547  // cgoCheckUnknownPointer is called for an arbitrary pointer into Go
   548  // memory. It checks whether that Go memory contains any other
   549  // pointer into Go memory. If it does, we panic.
   550  // The return values are unused but useful to see in panic tracebacks.
   551  func cgoCheckUnknownPointer(p unsafe.Pointer, msg string) (base, i uintptr) {
   552  	if inheap(uintptr(p)) {
   553  		b, span, _ := findObject(uintptr(p), 0, 0)
   554  		base = b
   555  		if base == 0 {
   556  			return
   557  		}
   558  		hbits := heapBitsForAddr(base)
   559  		n := span.elemsize
   560  		for i = uintptr(0); i < n; i += sys.PtrSize {
   561  			if !hbits.morePointers() {
   562  				// No more possible pointers.
   563  				break
   564  			}
   565  			if hbits.isPointer() && cgoIsGoPointer(*(*unsafe.Pointer)(unsafe.Pointer(base + i))) {
   566  				panic(errorString(msg))
   567  			}
   568  			hbits = hbits.next()
   569  		}
   570  
   571  		return
   572  	}
   573  
   574  	for _, datap := range activeModules() {
   575  		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
   576  			// We have no way to know the size of the object.
   577  			// We have to assume that it might contain a pointer.
   578  			panic(errorString(msg))
   579  		}
   580  		// In the text or noptr sections, we know that the
   581  		// pointer does not point to a Go pointer.
   582  	}
   583  
   584  	return
   585  }
   586  
   587  // cgoIsGoPointer reports whether the pointer is a Go pointer--a
   588  // pointer to Go memory. We only care about Go memory that might
   589  // contain pointers.
   590  //go:nosplit
   591  //go:nowritebarrierrec
   592  func cgoIsGoPointer(p unsafe.Pointer) bool {
   593  	if p == nil {
   594  		return false
   595  	}
   596  
   597  	if inHeapOrStack(uintptr(p)) {
   598  		return true
   599  	}
   600  
   601  	for _, datap := range activeModules() {
   602  		if cgoInRange(p, datap.data, datap.edata) || cgoInRange(p, datap.bss, datap.ebss) {
   603  			return true
   604  		}
   605  	}
   606  
   607  	return false
   608  }
   609  
   610  // cgoInRange reports whether p is between start and end.
   611  //go:nosplit
   612  //go:nowritebarrierrec
   613  func cgoInRange(p unsafe.Pointer, start, end uintptr) bool {
   614  	return start <= uintptr(p) && uintptr(p) < end
   615  }
   616  
   617  // cgoCheckResult is called to check the result parameter of an
   618  // exported Go function. It panics if the result is or contains a Go
   619  // pointer.
   620  func cgoCheckResult(val interface{}) {
   621  	if debug.cgocheck == 0 {
   622  		return
   623  	}
   624  
   625  	ep := efaceOf(&val)
   626  	t := ep._type
   627  	cgoCheckArg(t, ep.data, t.kind&kindDirectIface == 0, false, cgoResultFail)
   628  }
   629  

View as plain text