...
Run Format

Text file src/runtime/proc.c

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	#include "runtime.h"
     6	#include "arch_GOARCH.h"
     7	#include "zaexperiment.h"
     8	#include "malloc.h"
     9	#include "stack.h"
    10	#include "race.h"
    11	#include "type.h"
    12	#include "mgc0.h"
    13	#include "textflag.h"
    14	
    15	// Goroutine scheduler
    16	// The scheduler's job is to distribute ready-to-run goroutines over worker threads.
    17	//
    18	// The main concepts are:
    19	// G - goroutine.
    20	// M - worker thread, or machine.
    21	// P - processor, a resource that is required to execute Go code.
    22	//     M must have an associated P to execute Go code, however it can be
    23	//     blocked or in a syscall w/o an associated P.
    24	//
    25	// Design doc at http://golang.org/s/go11sched.
    26	
    27	enum
    28	{
    29		// Number of goroutine ids to grab from runtime·sched.goidgen to local per-P cache at once.
    30		// 16 seems to provide enough amortization, but other than that it's mostly arbitrary number.
    31		GoidCacheBatch = 16,
    32	};
    33	
    34	SchedT	runtime·sched;
    35	int32	runtime·gomaxprocs;
    36	uint32	runtime·needextram;
    37	bool	runtime·iscgo;
    38	M	runtime·m0;
    39	G	runtime·g0;	// idle goroutine for m0
    40	G*	runtime·lastg;
    41	M*	runtime·allm;
    42	M*	runtime·extram;
    43	P*	runtime·allp[MaxGomaxprocs+1];
    44	int8*	runtime·goos;
    45	int32	runtime·ncpu;
    46	int32	runtime·newprocs;
    47	
    48	Mutex runtime·allglock;	// the following vars are protected by this lock or by stoptheworld
    49	G**	runtime·allg;
    50	Slice	runtime·allgs;
    51	uintptr runtime·allglen;
    52	ForceGCState	runtime·forcegc;
    53	
    54	void runtime·mstart(void);
    55	static void runqput(P*, G*);
    56	static G* runqget(P*);
    57	static bool runqputslow(P*, G*, uint32, uint32);
    58	static G* runqsteal(P*, P*);
    59	static void mput(M*);
    60	static M* mget(void);
    61	static void mcommoninit(M*);
    62	static void schedule(void);
    63	static void procresize(int32);
    64	static void acquirep(P*);
    65	static P* releasep(void);
    66	static void newm(void(*)(void), P*);
    67	static void stopm(void);
    68	static void startm(P*, bool);
    69	static void handoffp(P*);
    70	static void wakep(void);
    71	static void stoplockedm(void);
    72	static void startlockedm(G*);
    73	static void sysmon(void);
    74	static uint32 retake(int64);
    75	static void incidlelocked(int32);
    76	static void checkdead(void);
    77	static void exitsyscall0(G*);
    78	void runtime·park_m(G*);
    79	static void goexit0(G*);
    80	static void gfput(P*, G*);
    81	static G* gfget(P*);
    82	static void gfpurge(P*);
    83	static void globrunqput(G*);
    84	static void globrunqputbatch(G*, G*, int32);
    85	static G* globrunqget(P*, int32);
    86	static P* pidleget(void);
    87	static void pidleput(P*);
    88	static void injectglist(G*);
    89	static bool preemptall(void);
    90	static bool preemptone(P*);
    91	static bool exitsyscallfast(void);
    92	static bool haveexperiment(int8*);
    93	void runtime·allgadd(G*);
    94	static void dropg(void);
    95	
    96	extern String runtime·buildVersion;
    97	
    98	// For cgo-using programs with external linking,
    99	// export "main" (defined in assembly) so that libc can handle basic
   100	// C runtime startup and call the Go program as if it were
   101	// the C main function.
   102	#pragma cgo_export_static main
   103	
   104	// Filled in by dynamic linker when Cgo is available.
   105	void (*_cgo_init)(void);
   106	void (*_cgo_malloc)(void);
   107	void (*_cgo_free)(void);
   108	
   109	// Copy for Go code.
   110	void* runtime·cgoMalloc;
   111	void* runtime·cgoFree;
   112	
   113	// The bootstrap sequence is:
   114	//
   115	//	call osinit
   116	//	call schedinit
   117	//	make & queue new G
   118	//	call runtime·mstart
   119	//
   120	// The new G calls runtime·main.
   121	void
   122	runtime·schedinit(void)
   123	{
   124		int32 n, procs;
   125		byte *p;
   126	
   127		// raceinit must be the first call to race detector.
   128		// In particular, it must be done before mallocinit below calls racemapshadow.
   129		if(raceenabled)
   130			g->racectx = runtime·raceinit();
   131	
   132		runtime·sched.maxmcount = 10000;
   133	
   134		runtime·tracebackinit();
   135		runtime·symtabinit();
   136		runtime·stackinit();
   137		runtime·mallocinit();
   138		mcommoninit(g->m);
   139		
   140		runtime·goargs();
   141		runtime·goenvs();
   142		runtime·parsedebugvars();
   143		runtime·gcinit();
   144	
   145		runtime·sched.lastpoll = runtime·nanotime();
   146		procs = 1;
   147		p = runtime·getenv("GOMAXPROCS");
   148		if(p != nil && (n = runtime·atoi(p)) > 0) {
   149			if(n > MaxGomaxprocs)
   150				n = MaxGomaxprocs;
   151			procs = n;
   152		}
   153		procresize(procs);
   154	
   155		if(runtime·buildVersion.str == nil) {
   156			// Condition should never trigger.  This code just serves
   157			// to ensure runtime·buildVersion is kept in the resulting binary.
   158			runtime·buildVersion.str = (uint8*)"unknown";
   159			runtime·buildVersion.len = 7;
   160		}
   161	
   162		runtime·cgoMalloc = _cgo_malloc;
   163		runtime·cgoFree = _cgo_free;
   164	}
   165	
   166	void
   167	runtime·newsysmon(void)
   168	{
   169		newm(sysmon, nil);
   170	}
   171	
   172	static void
   173	dumpgstatus(G* gp)
   174	{
   175		runtime·printf("runtime: gp: gp=%p, goid=%D, gp->atomicstatus=%x\n", gp, gp->goid, runtime·readgstatus(gp));
   176		runtime·printf("runtime:  g:  g=%p, goid=%D,  g->atomicstatus=%x\n", g, g->goid, runtime·readgstatus(g));
   177	}
   178	
   179	static void
   180	checkmcount(void)
   181	{
   182		// sched lock is held
   183		if(runtime·sched.mcount > runtime·sched.maxmcount){
   184			runtime·printf("runtime: program exceeds %d-thread limit\n", runtime·sched.maxmcount);
   185			runtime·throw("thread exhaustion");
   186		}
   187	}
   188	
   189	static void
   190	mcommoninit(M *mp)
   191	{
   192		// g0 stack won't make sense for user (and is not necessary unwindable).
   193		if(g != g->m->g0)
   194			runtime·callers(1, mp->createstack, nelem(mp->createstack));
   195	
   196		mp->fastrand = 0x49f6428aUL + mp->id + runtime·cputicks();
   197	
   198		runtime·lock(&runtime·sched.lock);
   199		mp->id = runtime·sched.mcount++;
   200		checkmcount();
   201		runtime·mpreinit(mp);
   202		if(mp->gsignal)
   203			mp->gsignal->stackguard1 = mp->gsignal->stack.lo + StackGuard;
   204	
   205		// Add to runtime·allm so garbage collector doesn't free g->m
   206		// when it is just in a register or thread-local storage.
   207		mp->alllink = runtime·allm;
   208		// runtime·NumCgoCall() iterates over allm w/o schedlock,
   209		// so we need to publish it safely.
   210		runtime·atomicstorep(&runtime·allm, mp);
   211		runtime·unlock(&runtime·sched.lock);
   212	}
   213	
   214	// Mark gp ready to run.
   215	void
   216	runtime·ready(G *gp)
   217	{
   218		uint32 status;
   219	
   220		status = runtime·readgstatus(gp);
   221		// Mark runnable.
   222		g->m->locks++;  // disable preemption because it can be holding p in a local var
   223		if((status&~Gscan) != Gwaiting){
   224			dumpgstatus(gp);
   225			runtime·throw("bad g->status in ready");
   226		}
   227		// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq
   228		runtime·casgstatus(gp, Gwaiting, Grunnable);
   229		runqput(g->m->p, gp);
   230		if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0)  // TODO: fast atomic
   231			wakep();
   232		g->m->locks--;
   233		if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
   234			g->stackguard0 = StackPreempt;
   235	}
   236	
   237	void
   238	runtime·ready_m(void)
   239	{
   240		G *gp;
   241	
   242		gp = g->m->ptrarg[0];
   243		g->m->ptrarg[0] = nil;
   244		runtime·ready(gp);
   245	}
   246	
   247	int32
   248	runtime·gcprocs(void)
   249	{
   250		int32 n;
   251	
   252		// Figure out how many CPUs to use during GC.
   253		// Limited by gomaxprocs, number of actual CPUs, and MaxGcproc.
   254		runtime·lock(&runtime·sched.lock);
   255		n = runtime·gomaxprocs;
   256		if(n > runtime·ncpu)
   257			n = runtime·ncpu;
   258		if(n > MaxGcproc)
   259			n = MaxGcproc;
   260		if(n > runtime·sched.nmidle+1) // one M is currently running
   261			n = runtime·sched.nmidle+1;
   262		runtime·unlock(&runtime·sched.lock);
   263		return n;
   264	}
   265	
   266	static bool
   267	needaddgcproc(void)
   268	{
   269		int32 n;
   270	
   271		runtime·lock(&runtime·sched.lock);
   272		n = runtime·gomaxprocs;
   273		if(n > runtime·ncpu)
   274			n = runtime·ncpu;
   275		if(n > MaxGcproc)
   276			n = MaxGcproc;
   277		n -= runtime·sched.nmidle+1; // one M is currently running
   278		runtime·unlock(&runtime·sched.lock);
   279		return n > 0;
   280	}
   281	
   282	void
   283	runtime·helpgc(int32 nproc)
   284	{
   285		M *mp;
   286		int32 n, pos;
   287	
   288		runtime·lock(&runtime·sched.lock);
   289		pos = 0;
   290		for(n = 1; n < nproc; n++) {  // one M is currently running
   291			if(runtime·allp[pos]->mcache == g->m->mcache)
   292				pos++;
   293			mp = mget();
   294			if(mp == nil)
   295				runtime·throw("runtime·gcprocs inconsistency");
   296			mp->helpgc = n;
   297			mp->mcache = runtime·allp[pos]->mcache;
   298			pos++;
   299			runtime·notewakeup(&mp->park);
   300		}
   301		runtime·unlock(&runtime·sched.lock);
   302	}
   303	
   304	// Similar to stoptheworld but best-effort and can be called several times.
   305	// There is no reverse operation, used during crashing.
   306	// This function must not lock any mutexes.
   307	void
   308	runtime·freezetheworld(void)
   309	{
   310		int32 i;
   311	
   312		if(runtime·gomaxprocs == 1)
   313			return;
   314		// stopwait and preemption requests can be lost
   315		// due to races with concurrently executing threads,
   316		// so try several times
   317		for(i = 0; i < 5; i++) {
   318			// this should tell the scheduler to not start any new goroutines
   319			runtime·sched.stopwait = 0x7fffffff;
   320			runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
   321			// this should stop running goroutines
   322			if(!preemptall())
   323				break;  // no running goroutines
   324			runtime·usleep(1000);
   325		}
   326		// to be sure
   327		runtime·usleep(1000);
   328		preemptall();
   329		runtime·usleep(1000);
   330	}
   331	
   332	static bool
   333	isscanstatus(uint32 status)
   334	{
   335		if(status == Gscan)
   336			runtime·throw("isscanstatus: Bad status Gscan");
   337		return (status&Gscan) == Gscan;
   338	}
   339	
   340	// All reads and writes of g's status go through readgstatus, casgstatus
   341	// castogscanstatus, casfromgscanstatus.
   342	#pragma textflag NOSPLIT
   343	uint32
   344	runtime·readgstatus(G *gp)
   345	{
   346		return runtime·atomicload(&gp->atomicstatus);
   347	}
   348	
   349	// The Gscanstatuses are acting like locks and this releases them.
   350	// If it proves to be a performance hit we should be able to make these
   351	// simple atomic stores but for now we are going to throw if
   352	// we see an inconsistent state.
   353	void
   354	runtime·casfromgscanstatus(G *gp, uint32 oldval, uint32 newval)
   355	{
   356		bool success = false;
   357	
   358		// Check that transition is valid.
   359		switch(oldval) {
   360		case Gscanrunnable:
   361		case Gscanwaiting:
   362		case Gscanrunning:
   363		case Gscansyscall:
   364			if(newval == (oldval&~Gscan))
   365				success = runtime·cas(&gp->atomicstatus, oldval, newval);
   366			break;
   367		case Gscanenqueue:
   368			if(newval == Gwaiting)
   369				success = runtime·cas(&gp->atomicstatus, oldval, newval);
   370			break;
   371		}	
   372		if(!success){
   373			runtime·printf("runtime: casfromgscanstatus failed gp=%p, oldval=%d, newval=%d\n",  
   374				gp, oldval, newval);
   375			dumpgstatus(gp);
   376			runtime·throw("casfromgscanstatus: gp->status is not in scan state");
   377		}
   378	}
   379	
   380	// This will return false if the gp is not in the expected status and the cas fails. 
   381	// This acts like a lock acquire while the casfromgstatus acts like a lock release.
   382	bool
   383	runtime·castogscanstatus(G *gp, uint32 oldval, uint32 newval)
   384	{
   385		switch(oldval) {
   386		case Grunnable:
   387		case Gwaiting:
   388		case Gsyscall:
   389			if(newval == (oldval|Gscan))
   390				return runtime·cas(&gp->atomicstatus, oldval, newval);
   391			break;
   392		case Grunning:
   393			if(newval == Gscanrunning || newval == Gscanenqueue)
   394				return runtime·cas(&gp->atomicstatus, oldval, newval);
   395			break;   
   396		}
   397	
   398		runtime·printf("runtime: castogscanstatus oldval=%d newval=%d\n", oldval, newval);
   399		runtime·throw("castogscanstatus");
   400		return false; // not reached
   401	}
   402	
   403	static void badcasgstatus(void);
   404	static void helpcasgstatus(void);
   405	static void badgstatusrunnable(void);
   406	
   407	// If asked to move to or from a Gscanstatus this will throw. Use the castogscanstatus
   408	// and casfromgscanstatus instead.
   409	// casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 
   410	// put it in the Gscan state is finished.
   411	#pragma textflag NOSPLIT
   412	void
   413	runtime·casgstatus(G *gp, uint32 oldval, uint32 newval)
   414	{
   415		void (*fn)(void);
   416	
   417		if((oldval&Gscan) || (newval&Gscan) || oldval == newval) {
   418			g->m->scalararg[0] = oldval;
   419			g->m->scalararg[1] = newval;
   420			fn = badcasgstatus;
   421			runtime·onM(&fn);
   422		}
   423	
   424		// loop if gp->atomicstatus is in a scan state giving
   425		// GC time to finish and change the state to oldval.
   426		while(!runtime·cas(&gp->atomicstatus, oldval, newval)) {
   427			if(oldval == Gwaiting && gp->atomicstatus == Grunnable) {
   428				fn = badgstatusrunnable;
   429				runtime·onM(&fn);
   430			}
   431			// Help GC if needed. 
   432			if(gp->preemptscan && !gp->gcworkdone && (oldval == Grunning || oldval == Gsyscall)) {
   433				gp->preemptscan = false;
   434				g->m->ptrarg[0] = gp;
   435				fn = helpcasgstatus;
   436				runtime·onM(&fn);
   437			}
   438		}	
   439	}
   440	
   441	static void
   442	badgstatusrunnable(void)
   443	{
   444		runtime·throw("casgstatus: waiting for Gwaiting but is Grunnable");
   445	}
   446	
   447	// casgstatus(gp, oldstatus, Gcopystack), assuming oldstatus is Gwaiting or Grunnable.
   448	// Returns old status. Cannot call casgstatus directly, because we are racing with an
   449	// async wakeup that might come in from netpoll. If we see Gwaiting from the readgstatus,
   450	// it might have become Grunnable by the time we get to the cas. If we called casgstatus,
   451	// it would loop waiting for the status to go back to Gwaiting, which it never will.
   452	#pragma textflag NOSPLIT
   453	uint32
   454	runtime·casgcopystack(G *gp)
   455	{
   456		uint32 oldstatus;
   457	
   458		for(;;) {
   459			oldstatus = runtime·readgstatus(gp) & ~Gscan;
   460			if(oldstatus != Gwaiting && oldstatus != Grunnable)
   461				runtime·throw("copystack: bad status, not Gwaiting or Grunnable");
   462			if(runtime·cas(&gp->atomicstatus, oldstatus, Gcopystack))
   463				break;
   464		}
   465		return oldstatus;
   466	}
   467	
   468	static void
   469	badcasgstatus(void)
   470	{
   471		uint32 oldval, newval;
   472		
   473		oldval = g->m->scalararg[0];
   474		newval = g->m->scalararg[1];
   475		g->m->scalararg[0] = 0;
   476		g->m->scalararg[1] = 0;
   477	
   478		runtime·printf("casgstatus: oldval=%d, newval=%d\n", oldval, newval);
   479		runtime·throw("casgstatus: bad incoming values");
   480	}
   481	
   482	static void
   483	helpcasgstatus(void)
   484	{
   485		G *gp;
   486		
   487		gp = g->m->ptrarg[0];
   488		g->m->ptrarg[0] = 0;
   489		runtime·gcphasework(gp);
   490	}
   491	
   492	// stopg ensures that gp is stopped at a GC safe point where its stack can be scanned
   493	// or in the context of a moving collector the pointers can be flipped from pointing 
   494	// to old object to pointing to new objects. 
   495	// If stopg returns true, the caller knows gp is at a GC safe point and will remain there until
   496	// the caller calls restartg.
   497	// If stopg returns false, the caller is not responsible for calling restartg. This can happen
   498	// if another thread, either the gp itself or another GC thread is taking the responsibility 
   499	// to do the GC work related to this thread.
   500	bool
   501	runtime·stopg(G *gp)
   502	{
   503		uint32 s;
   504	
   505		for(;;) {
   506			if(gp->gcworkdone)
   507				return false;
   508	
   509			s = runtime·readgstatus(gp);
   510			switch(s) {
   511			default:
   512				dumpgstatus(gp);
   513				runtime·throw("stopg: gp->atomicstatus is not valid");
   514	
   515			case Gdead:
   516				return false;
   517	
   518			case Gcopystack:
   519				// Loop until a new stack is in place.
   520				break;
   521	
   522			case Grunnable:
   523			case Gsyscall:
   524			case Gwaiting:
   525				// Claim goroutine by setting scan bit.
   526				if(!runtime·castogscanstatus(gp, s, s|Gscan))
   527					break;
   528				// In scan state, do work.
   529				runtime·gcphasework(gp);
   530				return true;
   531	
   532			case Gscanrunnable:
   533			case Gscanwaiting:
   534			case Gscansyscall:
   535				// Goroutine already claimed by another GC helper.
   536				return false;
   537	
   538			case Grunning:
   539				// Claim goroutine, so we aren't racing with a status
   540				// transition away from Grunning.
   541				if(!runtime·castogscanstatus(gp, Grunning, Gscanrunning))
   542					break;
   543	
   544				// Mark gp for preemption.
   545				if(!gp->gcworkdone) {
   546					gp->preemptscan = true;
   547					gp->preempt = true;
   548					gp->stackguard0 = StackPreempt;
   549				}
   550	
   551				// Unclaim.
   552				runtime·casfromgscanstatus(gp, Gscanrunning, Grunning);
   553				return false;
   554			}
   555		}
   556		// Should not be here....
   557	}
   558	
   559	// The GC requests that this routine be moved from a scanmumble state to a mumble state.
   560	void 
   561	runtime·restartg (G *gp)
   562	{
   563		uint32 s;
   564	
   565		s = runtime·readgstatus(gp);
   566		switch(s) {
   567		default:
   568			dumpgstatus(gp); 
   569			runtime·throw("restartg: unexpected status");
   570	
   571		case Gdead:
   572			break;
   573	
   574		case Gscanrunnable:
   575		case Gscanwaiting:
   576		case Gscansyscall:
   577			runtime·casfromgscanstatus(gp, s, s&~Gscan);
   578			break;
   579	
   580		case Gscanenqueue:
   581			// Scan is now completed.
   582			// Goroutine now needs to be made runnable.
   583			// We put it on the global run queue; ready blocks on the global scheduler lock.
   584			runtime·casfromgscanstatus(gp, Gscanenqueue, Gwaiting);
   585			if(gp != g->m->curg)
   586				runtime·throw("processing Gscanenqueue on wrong m");
   587			dropg();
   588			runtime·ready(gp);
   589			break;
   590		}
   591	}
   592	
   593	static void
   594	stopscanstart(G* gp)
   595	{
   596		if(g == gp)
   597			runtime·throw("GC not moved to G0");
   598		if(runtime·stopg(gp)) {
   599			if(!isscanstatus(runtime·readgstatus(gp))) {
   600				dumpgstatus(gp);
   601				runtime·throw("GC not in scan state");
   602			}
   603			runtime·restartg(gp);
   604		}
   605	}
   606	
   607	// Runs on g0 and does the actual work after putting the g back on the run queue.
   608	static void
   609	mquiesce(G *gpmaster)
   610	{
   611		G* gp;
   612		uint32 i;
   613		uint32 status;
   614		uint32 activeglen;
   615	
   616		activeglen = runtime·allglen;
   617		// enqueue the calling goroutine.
   618		runtime·restartg(gpmaster);
   619		for(i = 0; i < activeglen; i++) {
   620			gp = runtime·allg[i];
   621			if(runtime·readgstatus(gp) == Gdead) 
   622				gp->gcworkdone = true; // noop scan.
   623			else 
   624				gp->gcworkdone = false; 
   625			stopscanstart(gp); 
   626		}
   627	
   628		// Check that the G's gcwork (such as scanning) has been done. If not do it now. 
   629		// You can end up doing work here if the page trap on a Grunning Goroutine has
   630		// not been sprung or in some race situations. For example a runnable goes dead
   631		// and is started up again with a gp->gcworkdone set to false.
   632		for(i = 0; i < activeglen; i++) {
   633			gp = runtime·allg[i];
   634			while (!gp->gcworkdone) {
   635				status = runtime·readgstatus(gp);
   636				if(status == Gdead) {
   637					gp->gcworkdone = true; // scan is a noop
   638					break;
   639					//do nothing, scan not needed. 
   640				}
   641				if(status == Grunning && gp->stackguard0 == (uintptr)StackPreempt && runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) // nanosecond arg 
   642					runtime·noteclear(&runtime·sched.stopnote);
   643				else 
   644					stopscanstart(gp);
   645			}
   646		}
   647	
   648		for(i = 0; i < activeglen; i++) {
   649			gp = runtime·allg[i];
   650			status = runtime·readgstatus(gp);
   651			if(isscanstatus(status)) {
   652				runtime·printf("mstopandscang:bottom: post scan bad status gp=%p has status %x\n", gp, status);
   653				dumpgstatus(gp);
   654			}
   655			if(!gp->gcworkdone && status != Gdead) {
   656				runtime·printf("mstopandscang:bottom: post scan gp=%p->gcworkdone still false\n", gp);
   657				dumpgstatus(gp);
   658			}
   659		}
   660	
   661		schedule(); // Never returns.
   662	}
   663	
   664	// quiesce moves all the goroutines to a GC safepoint which for now is a at preemption point.
   665	// If the global runtime·gcphase is GCmark quiesce will ensure that all of the goroutine's stacks
   666	// have been scanned before it returns.
   667	void
   668	runtime·quiesce(G* mastergp)
   669	{
   670		void (*fn)(G*);
   671	
   672		runtime·castogscanstatus(mastergp, Grunning, Gscanenqueue);
   673		// Now move this to the g0 (aka m) stack.
   674		// g0 will potentially scan this thread and put mastergp on the runqueue 
   675		fn = mquiesce;
   676		runtime·mcall(&fn);
   677	}
   678	
   679	// This is used by the GC as well as the routines that do stack dumps. In the case
   680	// of GC all the routines can be reliably stopped. This is not always the case
   681	// when the system is in panic or being exited.
   682	void
   683	runtime·stoptheworld(void)
   684	{
   685		int32 i;
   686		uint32 s;
   687		P *p;
   688		bool wait;
   689	
   690		// If we hold a lock, then we won't be able to stop another M
   691		// that is blocked trying to acquire the lock.
   692		if(g->m->locks > 0)
   693			runtime·throw("stoptheworld: holding locks");
   694	
   695		runtime·lock(&runtime·sched.lock);
   696		runtime·sched.stopwait = runtime·gomaxprocs;
   697		runtime·atomicstore((uint32*)&runtime·sched.gcwaiting, 1);
   698		preemptall();
   699		// stop current P
   700		g->m->p->status = Pgcstop; // Pgcstop is only diagnostic.
   701		runtime·sched.stopwait--;
   702		// try to retake all P's in Psyscall status
   703		for(i = 0; i < runtime·gomaxprocs; i++) {
   704			p = runtime·allp[i];
   705			s = p->status;
   706			if(s == Psyscall && runtime·cas(&p->status, s, Pgcstop))
   707				runtime·sched.stopwait--;
   708		}
   709		// stop idle P's
   710		while(p = pidleget()) {
   711			p->status = Pgcstop;
   712			runtime·sched.stopwait--;
   713		}
   714		wait = runtime·sched.stopwait > 0;
   715		runtime·unlock(&runtime·sched.lock);
   716	
   717		// wait for remaining P's to stop voluntarily
   718		if(wait) {
   719			for(;;) {
   720				// wait for 100us, then try to re-preempt in case of any races
   721				if(runtime·notetsleep(&runtime·sched.stopnote, 100*1000)) {
   722					runtime·noteclear(&runtime·sched.stopnote);
   723					break;
   724				}
   725				preemptall();
   726			}
   727		}
   728		if(runtime·sched.stopwait)
   729			runtime·throw("stoptheworld: not stopped");
   730		for(i = 0; i < runtime·gomaxprocs; i++) {
   731			p = runtime·allp[i];
   732			if(p->status != Pgcstop)
   733				runtime·throw("stoptheworld: not stopped");
   734		}
   735	}
   736	
   737	static void
   738	mhelpgc(void)
   739	{
   740		g->m->helpgc = -1;
   741	}
   742	
   743	void
   744	runtime·starttheworld(void)
   745	{
   746		P *p, *p1;
   747		M *mp;
   748		G *gp;
   749		bool add;
   750	
   751		g->m->locks++;  // disable preemption because it can be holding p in a local var
   752		gp = runtime·netpoll(false);  // non-blocking
   753		injectglist(gp);
   754		add = needaddgcproc();
   755		runtime·lock(&runtime·sched.lock);
   756		if(runtime·newprocs) {
   757			procresize(runtime·newprocs);
   758			runtime·newprocs = 0;
   759		} else
   760			procresize(runtime·gomaxprocs);
   761		runtime·sched.gcwaiting = 0;
   762	
   763		p1 = nil;
   764		while(p = pidleget()) {
   765			// procresize() puts p's with work at the beginning of the list.
   766			// Once we reach a p without a run queue, the rest don't have one either.
   767			if(p->runqhead == p->runqtail) {
   768				pidleput(p);
   769				break;
   770			}
   771			p->m = mget();
   772			p->link = p1;
   773			p1 = p;
   774		}
   775		if(runtime·sched.sysmonwait) {
   776			runtime·sched.sysmonwait = false;
   777			runtime·notewakeup(&runtime·sched.sysmonnote);
   778		}
   779		runtime·unlock(&runtime·sched.lock);
   780	
   781		while(p1) {
   782			p = p1;
   783			p1 = p1->link;
   784			if(p->m) {
   785				mp = p->m;
   786				p->m = nil;
   787				if(mp->nextp)
   788					runtime·throw("starttheworld: inconsistent mp->nextp");
   789				mp->nextp = p;
   790				runtime·notewakeup(&mp->park);
   791			} else {
   792				// Start M to run P.  Do not start another M below.
   793				newm(nil, p);
   794				add = false;
   795			}
   796		}
   797	
   798		if(add) {
   799			// If GC could have used another helper proc, start one now,
   800			// in the hope that it will be available next time.
   801			// It would have been even better to start it before the collection,
   802			// but doing so requires allocating memory, so it's tricky to
   803			// coordinate.  This lazy approach works out in practice:
   804			// we don't mind if the first couple gc rounds don't have quite
   805			// the maximum number of procs.
   806			newm(mhelpgc, nil);
   807		}
   808		g->m->locks--;
   809		if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
   810			g->stackguard0 = StackPreempt;
   811	}
   812	
   813	static void mstart(void);
   814	
   815	// Called to start an M.
   816	#pragma textflag NOSPLIT
   817	void
   818	runtime·mstart(void)
   819	{
   820		uintptr x, size;
   821		
   822		if(g->stack.lo == 0) {
   823			// Initialize stack bounds from system stack.
   824			// Cgo may have left stack size in stack.hi.
   825			size = g->stack.hi;
   826			if(size == 0)
   827				size = 8192;
   828			g->stack.hi = (uintptr)&x;
   829			g->stack.lo = g->stack.hi - size + 1024;
   830		}
   831		
   832		// Initialize stack guards so that we can start calling
   833		// both Go and C functions with stack growth prologues.
   834		g->stackguard0 = g->stack.lo + StackGuard;
   835		g->stackguard1 = g->stackguard0;
   836		mstart();
   837	}
   838	
   839	static void
   840	mstart(void)
   841	{
   842		if(g != g->m->g0)
   843			runtime·throw("bad runtime·mstart");
   844	
   845		// Record top of stack for use by mcall.
   846		// Once we call schedule we're never coming back,
   847		// so other calls can reuse this stack space.
   848		runtime·gosave(&g->m->g0->sched);
   849		g->m->g0->sched.pc = (uintptr)-1;  // make sure it is never used
   850		runtime·asminit();
   851		runtime·minit();
   852	
   853		// Install signal handlers; after minit so that minit can
   854		// prepare the thread to be able to handle the signals.
   855		if(g->m == &runtime·m0)
   856			runtime·initsig();
   857		
   858		if(g->m->mstartfn)
   859			g->m->mstartfn();
   860	
   861		if(g->m->helpgc) {
   862			g->m->helpgc = 0;
   863			stopm();
   864		} else if(g->m != &runtime·m0) {
   865			acquirep(g->m->nextp);
   866			g->m->nextp = nil;
   867		}
   868		schedule();
   869	
   870		// TODO(brainman): This point is never reached, because scheduler
   871		// does not release os threads at the moment. But once this path
   872		// is enabled, we must remove our seh here.
   873	}
   874	
   875	// When running with cgo, we call _cgo_thread_start
   876	// to start threads for us so that we can play nicely with
   877	// foreign code.
   878	void (*_cgo_thread_start)(void*);
   879	
   880	typedef struct CgoThreadStart CgoThreadStart;
   881	struct CgoThreadStart
   882	{
   883		G *g;
   884		uintptr *tls;
   885		void (*fn)(void);
   886	};
   887	
   888	M *runtime·newM(void); // in proc.go
   889	
   890	// Allocate a new m unassociated with any thread.
   891	// Can use p for allocation context if needed.
   892	M*
   893	runtime·allocm(P *p)
   894	{
   895		M *mp;
   896	
   897		g->m->locks++;  // disable GC because it can be called from sysmon
   898		if(g->m->p == nil)
   899			acquirep(p);  // temporarily borrow p for mallocs in this function
   900		mp = runtime·newM();
   901		mcommoninit(mp);
   902	
   903		// In case of cgo or Solaris, pthread_create will make us a stack.
   904		// Windows and Plan 9 will layout sched stack on OS stack.
   905		if(runtime·iscgo || Solaris || Windows || Plan9)
   906			mp->g0 = runtime·malg(-1);
   907		else
   908			mp->g0 = runtime·malg(8192);
   909		mp->g0->m = mp;
   910	
   911		if(p == g->m->p)
   912			releasep();
   913		g->m->locks--;
   914		if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
   915			g->stackguard0 = StackPreempt;
   916	
   917		return mp;
   918	}
   919	
   920	G *runtime·newG(void); // in proc.go
   921	
   922	static G*
   923	allocg(void)
   924	{
   925		return runtime·newG();
   926	}
   927	
   928	static M* lockextra(bool nilokay);
   929	static void unlockextra(M*);
   930	
   931	// needm is called when a cgo callback happens on a
   932	// thread without an m (a thread not created by Go).
   933	// In this case, needm is expected to find an m to use
   934	// and return with m, g initialized correctly.
   935	// Since m and g are not set now (likely nil, but see below)
   936	// needm is limited in what routines it can call. In particular
   937	// it can only call nosplit functions (textflag 7) and cannot
   938	// do any scheduling that requires an m.
   939	//
   940	// In order to avoid needing heavy lifting here, we adopt
   941	// the following strategy: there is a stack of available m's
   942	// that can be stolen. Using compare-and-swap
   943	// to pop from the stack has ABA races, so we simulate
   944	// a lock by doing an exchange (via casp) to steal the stack
   945	// head and replace the top pointer with MLOCKED (1).
   946	// This serves as a simple spin lock that we can use even
   947	// without an m. The thread that locks the stack in this way
   948	// unlocks the stack by storing a valid stack head pointer.
   949	//
   950	// In order to make sure that there is always an m structure
   951	// available to be stolen, we maintain the invariant that there
   952	// is always one more than needed. At the beginning of the
   953	// program (if cgo is in use) the list is seeded with a single m.
   954	// If needm finds that it has taken the last m off the list, its job
   955	// is - once it has installed its own m so that it can do things like
   956	// allocate memory - to create a spare m and put it on the list.
   957	//
   958	// Each of these extra m's also has a g0 and a curg that are
   959	// pressed into service as the scheduling stack and current
   960	// goroutine for the duration of the cgo callback.
   961	//
   962	// When the callback is done with the m, it calls dropm to
   963	// put the m back on the list.
   964	#pragma textflag NOSPLIT
   965	void
   966	runtime·needm(byte x)
   967	{
   968		M *mp;
   969	
   970		if(runtime·needextram) {
   971			// Can happen if C/C++ code calls Go from a global ctor.
   972			// Can not throw, because scheduler is not initialized yet.
   973			runtime·write(2, "fatal error: cgo callback before cgo call\n",
   974				sizeof("fatal error: cgo callback before cgo call\n")-1);
   975			runtime·exit(1);
   976		}
   977	
   978		// Lock extra list, take head, unlock popped list.
   979		// nilokay=false is safe here because of the invariant above,
   980		// that the extra list always contains or will soon contain
   981		// at least one m.
   982		mp = lockextra(false);
   983	
   984		// Set needextram when we've just emptied the list,
   985		// so that the eventual call into cgocallbackg will
   986		// allocate a new m for the extra list. We delay the
   987		// allocation until then so that it can be done
   988		// after exitsyscall makes sure it is okay to be
   989		// running at all (that is, there's no garbage collection
   990		// running right now).
   991		mp->needextram = mp->schedlink == nil;
   992		unlockextra(mp->schedlink);
   993	
   994		// Install g (= m->g0) and set the stack bounds
   995		// to match the current stack. We don't actually know
   996		// how big the stack is, like we don't know how big any
   997		// scheduling stack is, but we assume there's at least 32 kB,
   998		// which is more than enough for us.
   999		runtime·setg(mp->g0);
  1000		g->stack.hi = (uintptr)(&x + 1024);
  1001		g->stack.lo = (uintptr)(&x - 32*1024);
  1002		g->stackguard0 = g->stack.lo + StackGuard;
  1003	
  1004		// Initialize this thread to use the m.
  1005		runtime·asminit();
  1006		runtime·minit();
  1007	}
  1008	
  1009	// newextram allocates an m and puts it on the extra list.
  1010	// It is called with a working local m, so that it can do things
  1011	// like call schedlock and allocate.
  1012	void
  1013	runtime·newextram(void)
  1014	{
  1015		M *mp, *mnext;
  1016		G *gp;
  1017	
  1018		// Create extra goroutine locked to extra m.
  1019		// The goroutine is the context in which the cgo callback will run.
  1020		// The sched.pc will never be returned to, but setting it to
  1021		// runtime.goexit makes clear to the traceback routines where
  1022		// the goroutine stack ends.
  1023		mp = runtime·allocm(nil);
  1024		gp = runtime·malg(4096);
  1025		gp->sched.pc = (uintptr)runtime·goexit + PCQuantum;
  1026		gp->sched.sp = gp->stack.hi;
  1027		gp->sched.sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
  1028		gp->sched.lr = 0;
  1029		gp->sched.g = gp;
  1030		gp->syscallpc = gp->sched.pc;
  1031		gp->syscallsp = gp->sched.sp;
  1032		// malg returns status as Gidle, change to Gsyscall before adding to allg
  1033		// where GC will see it.
  1034		runtime·casgstatus(gp, Gidle, Gsyscall);
  1035		gp->m = mp;
  1036		mp->curg = gp;
  1037		mp->locked = LockInternal;
  1038		mp->lockedg = gp;
  1039		gp->lockedm = mp;
  1040		gp->goid = runtime·xadd64(&runtime·sched.goidgen, 1);
  1041		if(raceenabled)
  1042			gp->racectx = runtime·racegostart(runtime·newextram);
  1043		// put on allg for garbage collector
  1044		runtime·allgadd(gp);
  1045	
  1046		// Add m to the extra list.
  1047		mnext = lockextra(true);
  1048		mp->schedlink = mnext;
  1049		unlockextra(mp);
  1050	}
  1051	
  1052	// dropm is called when a cgo callback has called needm but is now
  1053	// done with the callback and returning back into the non-Go thread.
  1054	// It puts the current m back onto the extra list.
  1055	//
  1056	// The main expense here is the call to signalstack to release the
  1057	// m's signal stack, and then the call to needm on the next callback
  1058	// from this thread. It is tempting to try to save the m for next time,
  1059	// which would eliminate both these costs, but there might not be
  1060	// a next time: the current thread (which Go does not control) might exit.
  1061	// If we saved the m for that thread, there would be an m leak each time
  1062	// such a thread exited. Instead, we acquire and release an m on each
  1063	// call. These should typically not be scheduling operations, just a few
  1064	// atomics, so the cost should be small.
  1065	//
  1066	// TODO(rsc): An alternative would be to allocate a dummy pthread per-thread
  1067	// variable using pthread_key_create. Unlike the pthread keys we already use
  1068	// on OS X, this dummy key would never be read by Go code. It would exist
  1069	// only so that we could register at thread-exit-time destructor.
  1070	// That destructor would put the m back onto the extra list.
  1071	// This is purely a performance optimization. The current version,
  1072	// in which dropm happens on each cgo call, is still correct too.
  1073	// We may have to keep the current version on systems with cgo
  1074	// but without pthreads, like Windows.
  1075	void
  1076	runtime·dropm(void)
  1077	{
  1078		M *mp, *mnext;
  1079	
  1080		// Undo whatever initialization minit did during needm.
  1081		runtime·unminit();
  1082	
  1083		// Clear m and g, and return m to the extra list.
  1084		// After the call to setmg we can only call nosplit functions.
  1085		mp = g->m;
  1086		runtime·setg(nil);
  1087	
  1088		mnext = lockextra(true);
  1089		mp->schedlink = mnext;
  1090		unlockextra(mp);
  1091	}
  1092	
  1093	#define MLOCKED ((M*)1)
  1094	
  1095	// lockextra locks the extra list and returns the list head.
  1096	// The caller must unlock the list by storing a new list head
  1097	// to runtime.extram. If nilokay is true, then lockextra will
  1098	// return a nil list head if that's what it finds. If nilokay is false,
  1099	// lockextra will keep waiting until the list head is no longer nil.
  1100	#pragma textflag NOSPLIT
  1101	static M*
  1102	lockextra(bool nilokay)
  1103	{
  1104		M *mp;
  1105		void (*yield)(void);
  1106	
  1107		for(;;) {
  1108			mp = runtime·atomicloadp(&runtime·extram);
  1109			if(mp == MLOCKED) {
  1110				yield = runtime·osyield;
  1111				yield();
  1112				continue;
  1113			}
  1114			if(mp == nil && !nilokay) {
  1115				runtime·usleep(1);
  1116				continue;
  1117			}
  1118			if(!runtime·casp(&runtime·extram, mp, MLOCKED)) {
  1119				yield = runtime·osyield;
  1120				yield();
  1121				continue;
  1122			}
  1123			break;
  1124		}
  1125		return mp;
  1126	}
  1127	
  1128	#pragma textflag NOSPLIT
  1129	static void
  1130	unlockextra(M *mp)
  1131	{
  1132		runtime·atomicstorep(&runtime·extram, mp);
  1133	}
  1134	
  1135	
  1136	// Create a new m.  It will start off with a call to fn, or else the scheduler.
  1137	static void
  1138	newm(void(*fn)(void), P *p)
  1139	{
  1140		M *mp;
  1141	
  1142		mp = runtime·allocm(p);
  1143		mp->nextp = p;
  1144		mp->mstartfn = fn;
  1145	
  1146		if(runtime·iscgo) {
  1147			CgoThreadStart ts;
  1148	
  1149			if(_cgo_thread_start == nil)
  1150				runtime·throw("_cgo_thread_start missing");
  1151			ts.g = mp->g0;
  1152			ts.tls = mp->tls;
  1153			ts.fn = runtime·mstart;
  1154			runtime·asmcgocall(_cgo_thread_start, &ts);
  1155			return;
  1156		}
  1157		runtime·newosproc(mp, (byte*)mp->g0->stack.hi);
  1158	}
  1159	
  1160	// Stops execution of the current m until new work is available.
  1161	// Returns with acquired P.
  1162	static void
  1163	stopm(void)
  1164	{
  1165		if(g->m->locks)
  1166			runtime·throw("stopm holding locks");
  1167		if(g->m->p)
  1168			runtime·throw("stopm holding p");
  1169		if(g->m->spinning) {
  1170			g->m->spinning = false;
  1171			runtime·xadd(&runtime·sched.nmspinning, -1);
  1172		}
  1173	
  1174	retry:
  1175		runtime·lock(&runtime·sched.lock);
  1176		mput(g->m);
  1177		runtime·unlock(&runtime·sched.lock);
  1178		runtime·notesleep(&g->m->park);
  1179		runtime·noteclear(&g->m->park);
  1180		if(g->m->helpgc) {
  1181			runtime·gchelper();
  1182			g->m->helpgc = 0;
  1183			g->m->mcache = nil;
  1184			goto retry;
  1185		}
  1186		acquirep(g->m->nextp);
  1187		g->m->nextp = nil;
  1188	}
  1189	
  1190	static void
  1191	mspinning(void)
  1192	{
  1193		g->m->spinning = true;
  1194	}
  1195	
  1196	// Schedules some M to run the p (creates an M if necessary).
  1197	// If p==nil, tries to get an idle P, if no idle P's does nothing.
  1198	static void
  1199	startm(P *p, bool spinning)
  1200	{
  1201		M *mp;
  1202		void (*fn)(void);
  1203	
  1204		runtime·lock(&runtime·sched.lock);
  1205		if(p == nil) {
  1206			p = pidleget();
  1207			if(p == nil) {
  1208				runtime·unlock(&runtime·sched.lock);
  1209				if(spinning)
  1210					runtime·xadd(&runtime·sched.nmspinning, -1);
  1211				return;
  1212			}
  1213		}
  1214		mp = mget();
  1215		runtime·unlock(&runtime·sched.lock);
  1216		if(mp == nil) {
  1217			fn = nil;
  1218			if(spinning)
  1219				fn = mspinning;
  1220			newm(fn, p);
  1221			return;
  1222		}
  1223		if(mp->spinning)
  1224			runtime·throw("startm: m is spinning");
  1225		if(mp->nextp)
  1226			runtime·throw("startm: m has p");
  1227		mp->spinning = spinning;
  1228		mp->nextp = p;
  1229		runtime·notewakeup(&mp->park);
  1230	}
  1231	
  1232	// Hands off P from syscall or locked M.
  1233	static void
  1234	handoffp(P *p)
  1235	{
  1236		// if it has local work, start it straight away
  1237		if(p->runqhead != p->runqtail || runtime·sched.runqsize) {
  1238			startm(p, false);
  1239			return;
  1240		}
  1241		// no local work, check that there are no spinning/idle M's,
  1242		// otherwise our help is not required
  1243		if(runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) == 0 &&  // TODO: fast atomic
  1244			runtime·cas(&runtime·sched.nmspinning, 0, 1)){
  1245			startm(p, true);
  1246			return;
  1247		}
  1248		runtime·lock(&runtime·sched.lock);
  1249		if(runtime·sched.gcwaiting) {
  1250			p->status = Pgcstop;
  1251			if(--runtime·sched.stopwait == 0)
  1252				runtime·notewakeup(&runtime·sched.stopnote);
  1253			runtime·unlock(&runtime·sched.lock);
  1254			return;
  1255		}
  1256		if(runtime·sched.runqsize) {
  1257			runtime·unlock(&runtime·sched.lock);
  1258			startm(p, false);
  1259			return;
  1260		}
  1261		// If this is the last running P and nobody is polling network,
  1262		// need to wakeup another M to poll network.
  1263		if(runtime·sched.npidle == runtime·gomaxprocs-1 && runtime·atomicload64(&runtime·sched.lastpoll) != 0) {
  1264			runtime·unlock(&runtime·sched.lock);
  1265			startm(p, false);
  1266			return;
  1267		}
  1268		pidleput(p);
  1269		runtime·unlock(&runtime·sched.lock);
  1270	}
  1271	
  1272	// Tries to add one more P to execute G's.
  1273	// Called when a G is made runnable (newproc, ready).
  1274	static void
  1275	wakep(void)
  1276	{
  1277		// be conservative about spinning threads
  1278		if(!runtime·cas(&runtime·sched.nmspinning, 0, 1))
  1279			return;
  1280		startm(nil, true);
  1281	}
  1282	
  1283	// Stops execution of the current m that is locked to a g until the g is runnable again.
  1284	// Returns with acquired P.
  1285	static void
  1286	stoplockedm(void)
  1287	{
  1288		P *p;
  1289		uint32 status;
  1290	
  1291		if(g->m->lockedg == nil || g->m->lockedg->lockedm != g->m)
  1292			runtime·throw("stoplockedm: inconsistent locking");
  1293		if(g->m->p) {
  1294			// Schedule another M to run this p.
  1295			p = releasep();
  1296			handoffp(p);
  1297		}
  1298		incidlelocked(1);
  1299		// Wait until another thread schedules lockedg again.
  1300		runtime·notesleep(&g->m->park);
  1301		runtime·noteclear(&g->m->park);
  1302		status = runtime·readgstatus(g->m->lockedg);
  1303		if((status&~Gscan) != Grunnable){
  1304			runtime·printf("runtime:stoplockedm: g is not Grunnable or Gscanrunnable");
  1305			dumpgstatus(g);
  1306			runtime·throw("stoplockedm: not runnable");
  1307		}
  1308		acquirep(g->m->nextp);
  1309		g->m->nextp = nil;
  1310	}
  1311	
  1312	// Schedules the locked m to run the locked gp.
  1313	static void
  1314	startlockedm(G *gp)
  1315	{
  1316		M *mp;
  1317		P *p;
  1318	
  1319		mp = gp->lockedm;
  1320		if(mp == g->m)
  1321			runtime·throw("startlockedm: locked to me");
  1322		if(mp->nextp)
  1323			runtime·throw("startlockedm: m has p");
  1324		// directly handoff current P to the locked m
  1325		incidlelocked(-1);
  1326		p = releasep();
  1327		mp->nextp = p;
  1328		runtime·notewakeup(&mp->park);
  1329		stopm();
  1330	}
  1331	
  1332	// Stops the current m for stoptheworld.
  1333	// Returns when the world is restarted.
  1334	static void
  1335	gcstopm(void)
  1336	{
  1337		P *p;
  1338	
  1339		if(!runtime·sched.gcwaiting)
  1340			runtime·throw("gcstopm: not waiting for gc");
  1341		if(g->m->spinning) {
  1342			g->m->spinning = false;
  1343			runtime·xadd(&runtime·sched.nmspinning, -1);
  1344		}
  1345		p = releasep();
  1346		runtime·lock(&runtime·sched.lock);
  1347		p->status = Pgcstop;
  1348		if(--runtime·sched.stopwait == 0)
  1349			runtime·notewakeup(&runtime·sched.stopnote);
  1350		runtime·unlock(&runtime·sched.lock);
  1351		stopm();
  1352	}
  1353	
  1354	// Schedules gp to run on the current M.
  1355	// Never returns.
  1356	static void
  1357	execute(G *gp)
  1358	{
  1359		int32 hz;
  1360		
  1361		runtime·casgstatus(gp, Grunnable, Grunning);
  1362		gp->waitsince = 0;
  1363		gp->preempt = false;
  1364		gp->stackguard0 = gp->stack.lo + StackGuard;
  1365		g->m->p->schedtick++;
  1366		g->m->curg = gp;
  1367		gp->m = g->m;
  1368	
  1369		// Check whether the profiler needs to be turned on or off.
  1370		hz = runtime·sched.profilehz;
  1371		if(g->m->profilehz != hz)
  1372			runtime·resetcpuprofiler(hz);
  1373	
  1374		runtime·gogo(&gp->sched);
  1375	}
  1376	
  1377	// Finds a runnable goroutine to execute.
  1378	// Tries to steal from other P's, get g from global queue, poll network.
  1379	static G*
  1380	findrunnable(void)
  1381	{
  1382		G *gp;
  1383		P *p;
  1384		int32 i;
  1385	
  1386	top:
  1387		if(runtime·sched.gcwaiting) {
  1388			gcstopm();
  1389			goto top;
  1390		}
  1391		if(runtime·fingwait && runtime·fingwake && (gp = runtime·wakefing()) != nil)
  1392			runtime·ready(gp);
  1393		// local runq
  1394		gp = runqget(g->m->p);
  1395		if(gp)
  1396			return gp;
  1397		// global runq
  1398		if(runtime·sched.runqsize) {
  1399			runtime·lock(&runtime·sched.lock);
  1400			gp = globrunqget(g->m->p, 0);
  1401			runtime·unlock(&runtime·sched.lock);
  1402			if(gp)
  1403				return gp;
  1404		}
  1405		// poll network
  1406		gp = runtime·netpoll(false);  // non-blocking
  1407		if(gp) {
  1408			injectglist(gp->schedlink);
  1409			runtime·casgstatus(gp, Gwaiting, Grunnable);
  1410			return gp;
  1411		}
  1412		// If number of spinning M's >= number of busy P's, block.
  1413		// This is necessary to prevent excessive CPU consumption
  1414		// when GOMAXPROCS>>1 but the program parallelism is low.
  1415		if(!g->m->spinning && 2 * runtime·atomicload(&runtime·sched.nmspinning) >= runtime·gomaxprocs - runtime·atomicload(&runtime·sched.npidle))  // TODO: fast atomic
  1416			goto stop;
  1417		if(!g->m->spinning) {
  1418			g->m->spinning = true;
  1419			runtime·xadd(&runtime·sched.nmspinning, 1);
  1420		}
  1421		// random steal from other P's
  1422		for(i = 0; i < 2*runtime·gomaxprocs; i++) {
  1423			if(runtime·sched.gcwaiting)
  1424				goto top;
  1425			p = runtime·allp[runtime·fastrand1()%runtime·gomaxprocs];
  1426			if(p == g->m->p)
  1427				gp = runqget(p);
  1428			else
  1429				gp = runqsteal(g->m->p, p);
  1430			if(gp)
  1431				return gp;
  1432		}
  1433	stop:
  1434		// return P and block
  1435		runtime·lock(&runtime·sched.lock);
  1436		if(runtime·sched.gcwaiting) {
  1437			runtime·unlock(&runtime·sched.lock);
  1438			goto top;
  1439		}
  1440		if(runtime·sched.runqsize) {
  1441			gp = globrunqget(g->m->p, 0);
  1442			runtime·unlock(&runtime·sched.lock);
  1443			return gp;
  1444		}
  1445		p = releasep();
  1446		pidleput(p);
  1447		runtime·unlock(&runtime·sched.lock);
  1448		if(g->m->spinning) {
  1449			g->m->spinning = false;
  1450			runtime·xadd(&runtime·sched.nmspinning, -1);
  1451		}
  1452		// check all runqueues once again
  1453		for(i = 0; i < runtime·gomaxprocs; i++) {
  1454			p = runtime·allp[i];
  1455			if(p && p->runqhead != p->runqtail) {
  1456				runtime·lock(&runtime·sched.lock);
  1457				p = pidleget();
  1458				runtime·unlock(&runtime·sched.lock);
  1459				if(p) {
  1460					acquirep(p);
  1461					goto top;
  1462				}
  1463				break;
  1464			}
  1465		}
  1466		// poll network
  1467		if(runtime·xchg64(&runtime·sched.lastpoll, 0) != 0) {
  1468			if(g->m->p)
  1469				runtime·throw("findrunnable: netpoll with p");
  1470			if(g->m->spinning)
  1471				runtime·throw("findrunnable: netpoll with spinning");
  1472			gp = runtime·netpoll(true);  // block until new work is available
  1473			runtime·atomicstore64(&runtime·sched.lastpoll, runtime·nanotime());
  1474			if(gp) {
  1475				runtime·lock(&runtime·sched.lock);
  1476				p = pidleget();
  1477				runtime·unlock(&runtime·sched.lock);
  1478				if(p) {
  1479					acquirep(p);
  1480					injectglist(gp->schedlink);
  1481					runtime·casgstatus(gp, Gwaiting, Grunnable);
  1482					return gp;
  1483				}
  1484				injectglist(gp);
  1485			}
  1486		}
  1487		stopm();
  1488		goto top;
  1489	}
  1490	
  1491	static void
  1492	resetspinning(void)
  1493	{
  1494		int32 nmspinning;
  1495	
  1496		if(g->m->spinning) {
  1497			g->m->spinning = false;
  1498			nmspinning = runtime·xadd(&runtime·sched.nmspinning, -1);
  1499			if(nmspinning < 0)
  1500				runtime·throw("findrunnable: negative nmspinning");
  1501		} else
  1502			nmspinning = runtime·atomicload(&runtime·sched.nmspinning);
  1503	
  1504		// M wakeup policy is deliberately somewhat conservative (see nmspinning handling),
  1505		// so see if we need to wakeup another P here.
  1506		if (nmspinning == 0 && runtime·atomicload(&runtime·sched.npidle) > 0)
  1507			wakep();
  1508	}
  1509	
  1510	// Injects the list of runnable G's into the scheduler.
  1511	// Can run concurrently with GC.
  1512	static void
  1513	injectglist(G *glist)
  1514	{
  1515		int32 n;
  1516		G *gp;
  1517	
  1518		if(glist == nil)
  1519			return;
  1520		runtime·lock(&runtime·sched.lock);
  1521		for(n = 0; glist; n++) {
  1522			gp = glist;
  1523			glist = gp->schedlink;
  1524			runtime·casgstatus(gp, Gwaiting, Grunnable); 
  1525			globrunqput(gp);
  1526		}
  1527		runtime·unlock(&runtime·sched.lock);
  1528	
  1529		for(; n && runtime·sched.npidle; n--)
  1530			startm(nil, false);
  1531	}
  1532	
  1533	// One round of scheduler: find a runnable goroutine and execute it.
  1534	// Never returns.
  1535	static void
  1536	schedule(void)
  1537	{
  1538		G *gp;
  1539		uint32 tick;
  1540	
  1541		if(g->m->locks)
  1542			runtime·throw("schedule: holding locks");
  1543	
  1544		if(g->m->lockedg) {
  1545			stoplockedm();
  1546			execute(g->m->lockedg);  // Never returns.
  1547		}
  1548	
  1549	top:
  1550		if(runtime·sched.gcwaiting) {
  1551			gcstopm();
  1552			goto top;
  1553		}
  1554	
  1555		gp = nil;
  1556		// Check the global runnable queue once in a while to ensure fairness.
  1557		// Otherwise two goroutines can completely occupy the local runqueue
  1558		// by constantly respawning each other.
  1559		tick = g->m->p->schedtick;
  1560		// This is a fancy way to say tick%61==0,
  1561		// it uses 2 MUL instructions instead of a single DIV and so is faster on modern processors.
  1562		if(tick - (((uint64)tick*0x4325c53fu)>>36)*61 == 0 && runtime·sched.runqsize > 0) {
  1563			runtime·lock(&runtime·sched.lock);
  1564			gp = globrunqget(g->m->p, 1);
  1565			runtime·unlock(&runtime·sched.lock);
  1566			if(gp)
  1567				resetspinning();
  1568		}
  1569		if(gp == nil) {
  1570			gp = runqget(g->m->p);
  1571			if(gp && g->m->spinning)
  1572				runtime·throw("schedule: spinning with local work");
  1573		}
  1574		if(gp == nil) {
  1575			gp = findrunnable();  // blocks until work is available
  1576			resetspinning();
  1577		}
  1578	
  1579		if(gp->lockedm) {
  1580			// Hands off own p to the locked m,
  1581			// then blocks waiting for a new p.
  1582			startlockedm(gp);
  1583			goto top;
  1584		}
  1585	
  1586		execute(gp);
  1587	}
  1588	
  1589	// dropg removes the association between m and the current goroutine m->curg (gp for short).
  1590	// Typically a caller sets gp's status away from Grunning and then
  1591	// immediately calls dropg to finish the job. The caller is also responsible
  1592	// for arranging that gp will be restarted using runtime·ready at an
  1593	// appropriate time. After calling dropg and arranging for gp to be
  1594	// readied later, the caller can do other work but eventually should
  1595	// call schedule to restart the scheduling of goroutines on this m.
  1596	static void
  1597	dropg(void)
  1598	{
  1599		if(g->m->lockedg == nil) {
  1600			g->m->curg->m = nil;
  1601			g->m->curg = nil;
  1602		}
  1603	}
  1604	
  1605	// Puts the current goroutine into a waiting state and calls unlockf.
  1606	// If unlockf returns false, the goroutine is resumed.
  1607	void
  1608	runtime·park(bool(*unlockf)(G*, void*), void *lock, String reason)
  1609	{
  1610		void (*fn)(G*);
  1611	
  1612		g->m->waitlock = lock;
  1613		g->m->waitunlockf = unlockf;
  1614		g->waitreason = reason;
  1615		fn = runtime·park_m;
  1616		runtime·mcall(&fn);
  1617	}
  1618	
  1619	bool
  1620	runtime·parkunlock_c(G *gp, void *lock)
  1621	{
  1622		USED(gp);
  1623		runtime·unlock(lock);
  1624		return true;
  1625	}
  1626	
  1627	// Puts the current goroutine into a waiting state and unlocks the lock.
  1628	// The goroutine can be made runnable again by calling runtime·ready(gp).
  1629	void
  1630	runtime·parkunlock(Mutex *lock, String reason)
  1631	{
  1632		runtime·park(runtime·parkunlock_c, lock, reason);
  1633	}
  1634	
  1635	// runtime·park continuation on g0.
  1636	void
  1637	runtime·park_m(G *gp)
  1638	{
  1639		bool ok;
  1640	
  1641		runtime·casgstatus(gp, Grunning, Gwaiting);
  1642		dropg();
  1643	
  1644		if(g->m->waitunlockf) {
  1645			ok = g->m->waitunlockf(gp, g->m->waitlock);
  1646			g->m->waitunlockf = nil;
  1647			g->m->waitlock = nil;
  1648			if(!ok) {
  1649				runtime·casgstatus(gp, Gwaiting, Grunnable); 
  1650				execute(gp);  // Schedule it back, never returns.
  1651			}
  1652		}
  1653	
  1654		schedule();
  1655	}
  1656	
  1657	// Gosched continuation on g0.
  1658	void
  1659	runtime·gosched_m(G *gp)
  1660	{
  1661		uint32 status;
  1662	
  1663		status = runtime·readgstatus(gp);
  1664		if((status&~Gscan) != Grunning){
  1665			dumpgstatus(gp);
  1666			runtime·throw("bad g status");
  1667		}
  1668		runtime·casgstatus(gp, Grunning, Grunnable);
  1669		dropg();
  1670		runtime·lock(&runtime·sched.lock);
  1671		globrunqput(gp);
  1672		runtime·unlock(&runtime·sched.lock);
  1673	
  1674		schedule();
  1675	}
  1676	
  1677	// Finishes execution of the current goroutine.
  1678	// Must be NOSPLIT because it is called from Go.
  1679	#pragma textflag NOSPLIT
  1680	void
  1681	runtime·goexit1(void)
  1682	{
  1683		void (*fn)(G*);
  1684	
  1685		if(raceenabled)
  1686			runtime·racegoend();
  1687		fn = goexit0;
  1688		runtime·mcall(&fn);
  1689	}
  1690	
  1691	// runtime·goexit continuation on g0.
  1692	static void
  1693	goexit0(G *gp)
  1694	{
  1695		runtime·casgstatus(gp, Grunning, Gdead);
  1696		gp->m = nil;
  1697		gp->lockedm = nil;
  1698		g->m->lockedg = nil;
  1699		gp->paniconfault = 0;
  1700		gp->defer = nil; // should be true already but just in case.
  1701		gp->panic = nil; // non-nil for Goexit during panic. points at stack-allocated data.
  1702		gp->writebuf.array = nil;
  1703		gp->writebuf.len = 0;
  1704		gp->writebuf.cap = 0;
  1705		gp->waitreason.str = nil;
  1706		gp->waitreason.len = 0;
  1707		gp->param = nil;
  1708	
  1709		dropg();
  1710	
  1711		if(g->m->locked & ~LockExternal) {
  1712			runtime·printf("invalid m->locked = %d\n", g->m->locked);
  1713			runtime·throw("internal lockOSThread error");
  1714		}	
  1715		g->m->locked = 0;
  1716		gfput(g->m->p, gp);
  1717		schedule();
  1718	}
  1719	
  1720	#pragma textflag NOSPLIT
  1721	static void
  1722	save(uintptr pc, uintptr sp)
  1723	{
  1724		g->sched.pc = pc;
  1725		g->sched.sp = sp;
  1726		g->sched.lr = 0;
  1727		g->sched.ret = 0;
  1728		g->sched.ctxt = 0;
  1729		g->sched.g = g;
  1730	}
  1731	
  1732	static void entersyscall_bad(void);
  1733	static void entersyscall_sysmon(void);
  1734	static void entersyscall_gcwait(void);
  1735	
  1736	// The goroutine g is about to enter a system call.
  1737	// Record that it's not using the cpu anymore.
  1738	// This is called only from the go syscall library and cgocall,
  1739	// not from the low-level system calls used by the runtime.
  1740	//
  1741	// Entersyscall cannot split the stack: the runtime·gosave must
  1742	// make g->sched refer to the caller's stack segment, because
  1743	// entersyscall is going to return immediately after.
  1744	//
  1745	// Nothing entersyscall calls can split the stack either.
  1746	// We cannot safely move the stack during an active call to syscall,
  1747	// because we do not know which of the uintptr arguments are
  1748	// really pointers (back into the stack).
  1749	// In practice, this means that we make the fast path run through
  1750	// entersyscall doing no-split things, and the slow path has to use onM
  1751	// to run bigger things on the m stack.
  1752	//
  1753	// reentersyscall is the entry point used by cgo callbacks, where explicitly
  1754	// saved SP and PC are restored. This is needed when exitsyscall will be called
  1755	// from a function further up in the call stack than the parent, as g->syscallsp
  1756	// must always point to a valid stack frame. entersyscall below is the normal
  1757	// entry point for syscalls, which obtains the SP and PC from the caller.
  1758	#pragma textflag NOSPLIT
  1759	void
  1760	runtime·reentersyscall(uintptr pc, uintptr sp)
  1761	{
  1762		void (*fn)(void);
  1763	
  1764		// Disable preemption because during this function g is in Gsyscall status,
  1765		// but can have inconsistent g->sched, do not let GC observe it.
  1766		g->m->locks++;
  1767		
  1768		// Entersyscall must not call any function that might split/grow the stack.
  1769		// (See details in comment above.)
  1770		// Catch calls that might, by replacing the stack guard with something that
  1771		// will trip any stack check and leaving a flag to tell newstack to die.
  1772		g->stackguard0 = StackPreempt;
  1773		g->throwsplit = 1;
  1774	
  1775		// Leave SP around for GC and traceback.
  1776		save(pc, sp);
  1777		g->syscallsp = sp;
  1778		g->syscallpc = pc;
  1779		runtime·casgstatus(g, Grunning, Gsyscall);
  1780		if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
  1781			fn = entersyscall_bad;
  1782			runtime·onM(&fn);
  1783		}
  1784	
  1785		if(runtime·atomicload(&runtime·sched.sysmonwait)) {  // TODO: fast atomic
  1786			fn = entersyscall_sysmon;
  1787			runtime·onM(&fn);
  1788			save(pc, sp);
  1789		}
  1790	
  1791		g->m->mcache = nil;
  1792		g->m->p->m = nil;
  1793		runtime·atomicstore(&g->m->p->status, Psyscall);
  1794		if(runtime·sched.gcwaiting) {
  1795			fn = entersyscall_gcwait;
  1796			runtime·onM(&fn);
  1797			save(pc, sp);
  1798		}
  1799	
  1800		// Goroutines must not split stacks in Gsyscall status (it would corrupt g->sched).
  1801		// We set stackguard to StackPreempt so that first split stack check calls morestack.
  1802		// Morestack detects this case and throws.
  1803		g->stackguard0 = StackPreempt;
  1804		g->m->locks--;
  1805	}
  1806	
  1807	// Standard syscall entry used by the go syscall library and normal cgo calls.
  1808	#pragma textflag NOSPLIT
  1809	void
  1810	·entersyscall(int32 dummy)
  1811	{
  1812		runtime·reentersyscall((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
  1813	}
  1814	
  1815	static void
  1816	entersyscall_bad(void)
  1817	{
  1818		G *gp;
  1819		
  1820		gp = g->m->curg;
  1821		runtime·printf("entersyscall inconsistent %p [%p,%p]\n",
  1822			gp->syscallsp, gp->stack.lo, gp->stack.hi);
  1823		runtime·throw("entersyscall");
  1824	}
  1825	
  1826	static void
  1827	entersyscall_sysmon(void)
  1828	{
  1829		runtime·lock(&runtime·sched.lock);
  1830		if(runtime·atomicload(&runtime·sched.sysmonwait)) {
  1831			runtime·atomicstore(&runtime·sched.sysmonwait, 0);
  1832			runtime·notewakeup(&runtime·sched.sysmonnote);
  1833		}
  1834		runtime·unlock(&runtime·sched.lock);
  1835	}
  1836	
  1837	static void
  1838	entersyscall_gcwait(void)
  1839	{
  1840		runtime·lock(&runtime·sched.lock);
  1841		if (runtime·sched.stopwait > 0 && runtime·cas(&g->m->p->status, Psyscall, Pgcstop)) {
  1842			if(--runtime·sched.stopwait == 0)
  1843				runtime·notewakeup(&runtime·sched.stopnote);
  1844		}
  1845		runtime·unlock(&runtime·sched.lock);
  1846	}
  1847	
  1848	static void entersyscallblock_handoff(void);
  1849	
  1850	// The same as runtime·entersyscall(), but with a hint that the syscall is blocking.
  1851	#pragma textflag NOSPLIT
  1852	void
  1853	·entersyscallblock(int32 dummy)
  1854	{
  1855		void (*fn)(void);
  1856	
  1857		g->m->locks++;  // see comment in entersyscall
  1858		g->throwsplit = 1;
  1859		g->stackguard0 = StackPreempt;  // see comment in entersyscall
  1860	
  1861		// Leave SP around for GC and traceback.
  1862		save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
  1863		g->syscallsp = g->sched.sp;
  1864		g->syscallpc = g->sched.pc;
  1865		runtime·casgstatus(g, Grunning, Gsyscall);
  1866		if(g->syscallsp < g->stack.lo || g->stack.hi < g->syscallsp) {
  1867			fn = entersyscall_bad;
  1868			runtime·onM(&fn);
  1869		}
  1870		
  1871		fn = entersyscallblock_handoff;
  1872		runtime·onM(&fn);
  1873	
  1874		// Resave for traceback during blocked call.
  1875		save((uintptr)runtime·getcallerpc(&dummy), runtime·getcallersp(&dummy));
  1876	
  1877		g->m->locks--;
  1878	}
  1879	
  1880	static void
  1881	entersyscallblock_handoff(void)
  1882	{
  1883		handoffp(releasep());
  1884	}
  1885	
  1886	// The goroutine g exited its system call.
  1887	// Arrange for it to run on a cpu again.
  1888	// This is called only from the go syscall library, not
  1889	// from the low-level system calls used by the runtime.
  1890	#pragma textflag NOSPLIT
  1891	void
  1892	·exitsyscall(int32 dummy)
  1893	{
  1894		void (*fn)(G*);
  1895	
  1896		g->m->locks++;  // see comment in entersyscall
  1897	
  1898		if(runtime·getcallersp(&dummy) > g->syscallsp)
  1899			runtime·throw("exitsyscall: syscall frame is no longer valid");
  1900	
  1901		g->waitsince = 0;
  1902		if(exitsyscallfast()) {
  1903			// There's a cpu for us, so we can run.
  1904			g->m->p->syscalltick++;
  1905			// We need to cas the status and scan before resuming...
  1906			runtime·casgstatus(g, Gsyscall, Grunning);
  1907	
  1908			// Garbage collector isn't running (since we are),
  1909			// so okay to clear syscallsp.
  1910			g->syscallsp = (uintptr)nil;
  1911			g->m->locks--;
  1912			if(g->preempt) {
  1913				// restore the preemption request in case we've cleared it in newstack
  1914				g->stackguard0 = StackPreempt;
  1915			} else {
  1916				// otherwise restore the real stackguard, we've spoiled it in entersyscall/entersyscallblock
  1917				g->stackguard0 = g->stack.lo + StackGuard;
  1918			}
  1919			g->throwsplit = 0;
  1920			return;
  1921		}
  1922	
  1923		g->m->locks--;
  1924	
  1925		// Call the scheduler.
  1926		fn = exitsyscall0;
  1927		runtime·mcall(&fn);
  1928	
  1929		// Scheduler returned, so we're allowed to run now.
  1930		// Delete the syscallsp information that we left for
  1931		// the garbage collector during the system call.
  1932		// Must wait until now because until gosched returns
  1933		// we don't know for sure that the garbage collector
  1934		// is not running.
  1935		g->syscallsp = (uintptr)nil;
  1936		g->m->p->syscalltick++;
  1937		g->throwsplit = 0;
  1938	}
  1939	
  1940	static void exitsyscallfast_pidle(void);
  1941	
  1942	#pragma textflag NOSPLIT
  1943	static bool
  1944	exitsyscallfast(void)
  1945	{
  1946		void (*fn)(void);
  1947	
  1948		// Freezetheworld sets stopwait but does not retake P's.
  1949		if(runtime·sched.stopwait) {
  1950			g->m->p = nil;
  1951			return false;
  1952		}
  1953	
  1954		// Try to re-acquire the last P.
  1955		if(g->m->p && g->m->p->status == Psyscall && runtime·cas(&g->m->p->status, Psyscall, Prunning)) {
  1956			// There's a cpu for us, so we can run.
  1957			g->m->mcache = g->m->p->mcache;
  1958			g->m->p->m = g->m;
  1959			return true;
  1960		}
  1961		// Try to get any other idle P.
  1962		g->m->p = nil;
  1963		if(runtime·sched.pidle) {
  1964			fn = exitsyscallfast_pidle;
  1965			runtime·onM(&fn);
  1966			if(g->m->scalararg[0]) {
  1967				g->m->scalararg[0] = 0;
  1968				return true;
  1969			}
  1970		}
  1971		return false;
  1972	}
  1973	
  1974	static void
  1975	exitsyscallfast_pidle(void)
  1976	{
  1977		P *p;
  1978	
  1979		runtime·lock(&runtime·sched.lock);
  1980		p = pidleget();
  1981		if(p && runtime·atomicload(&runtime·sched.sysmonwait)) {
  1982			runtime·atomicstore(&runtime·sched.sysmonwait, 0);
  1983			runtime·notewakeup(&runtime·sched.sysmonnote);
  1984		}
  1985		runtime·unlock(&runtime·sched.lock);
  1986		if(p) {
  1987			acquirep(p);
  1988			g->m->scalararg[0] = 1;
  1989		} else
  1990			g->m->scalararg[0] = 0;
  1991	}
  1992	
  1993	// runtime·exitsyscall slow path on g0.
  1994	// Failed to acquire P, enqueue gp as runnable.
  1995	static void
  1996	exitsyscall0(G *gp)
  1997	{
  1998		P *p;
  1999	
  2000		runtime·casgstatus(gp, Gsyscall, Grunnable);
  2001		dropg();
  2002		runtime·lock(&runtime·sched.lock);
  2003		p = pidleget();
  2004		if(p == nil)
  2005			globrunqput(gp);
  2006		else if(runtime·atomicload(&runtime·sched.sysmonwait)) {
  2007			runtime·atomicstore(&runtime·sched.sysmonwait, 0);
  2008			runtime·notewakeup(&runtime·sched.sysmonnote);
  2009		}
  2010		runtime·unlock(&runtime·sched.lock);
  2011		if(p) {
  2012			acquirep(p);
  2013			execute(gp);  // Never returns.
  2014		}
  2015		if(g->m->lockedg) {
  2016			// Wait until another thread schedules gp and so m again.
  2017			stoplockedm();
  2018			execute(gp);  // Never returns.
  2019		}
  2020		stopm();
  2021		schedule();  // Never returns.
  2022	}
  2023	
  2024	static void
  2025	beforefork(void)
  2026	{
  2027		G *gp;
  2028		
  2029		gp = g->m->curg;
  2030		// Fork can hang if preempted with signals frequently enough (see issue 5517).
  2031		// Ensure that we stay on the same M where we disable profiling.
  2032		gp->m->locks++;
  2033		if(gp->m->profilehz != 0)
  2034			runtime·resetcpuprofiler(0);
  2035	
  2036		// This function is called before fork in syscall package.
  2037		// Code between fork and exec must not allocate memory nor even try to grow stack.
  2038		// Here we spoil g->stackguard to reliably detect any attempts to grow stack.
  2039		// runtime_AfterFork will undo this in parent process, but not in child.
  2040		gp->stackguard0 = StackFork;
  2041	}
  2042	
  2043	// Called from syscall package before fork.
  2044	#pragma textflag NOSPLIT
  2045	void
  2046	syscall·runtime_BeforeFork(void)
  2047	{
  2048		void (*fn)(void);
  2049		
  2050		fn = beforefork;
  2051		runtime·onM(&fn);
  2052	}
  2053	
  2054	static void
  2055	afterfork(void)
  2056	{
  2057		int32 hz;
  2058		G *gp;
  2059		
  2060		gp = g->m->curg;
  2061		// See the comment in runtime_BeforeFork.
  2062		gp->stackguard0 = gp->stack.lo + StackGuard;
  2063	
  2064		hz = runtime·sched.profilehz;
  2065		if(hz != 0)
  2066			runtime·resetcpuprofiler(hz);
  2067		gp->m->locks--;
  2068	}
  2069	
  2070	// Called from syscall package after fork in parent.
  2071	#pragma textflag NOSPLIT
  2072	void
  2073	syscall·runtime_AfterFork(void)
  2074	{
  2075		void (*fn)(void);
  2076		
  2077		fn = afterfork;
  2078		runtime·onM(&fn);
  2079	}
  2080	
  2081	// Hook used by runtime·malg to call runtime·stackalloc on the
  2082	// scheduler stack.  This exists because runtime·stackalloc insists
  2083	// on being called on the scheduler stack, to avoid trying to grow
  2084	// the stack while allocating a new stack segment.
  2085	static void
  2086	mstackalloc(G *gp)
  2087	{
  2088		G *newg;
  2089		uintptr size;
  2090	
  2091		newg = g->m->ptrarg[0];
  2092		size = g->m->scalararg[0];
  2093	
  2094		newg->stack = runtime·stackalloc(size);
  2095	
  2096		runtime·gogo(&gp->sched);
  2097	}
  2098	
  2099	// Allocate a new g, with a stack big enough for stacksize bytes.
  2100	G*
  2101	runtime·malg(int32 stacksize)
  2102	{
  2103		G *newg;
  2104		void (*fn)(G*);
  2105	
  2106		newg = allocg();
  2107		if(stacksize >= 0) {
  2108			stacksize = runtime·round2(StackSystem + stacksize);
  2109			if(g == g->m->g0) {
  2110				// running on scheduler stack already.
  2111				newg->stack = runtime·stackalloc(stacksize);
  2112			} else {
  2113				// have to call stackalloc on scheduler stack.
  2114				g->m->scalararg[0] = stacksize;
  2115				g->m->ptrarg[0] = newg;
  2116				fn = mstackalloc;
  2117				runtime·mcall(&fn);
  2118				g->m->ptrarg[0] = nil;
  2119			}
  2120			newg->stackguard0 = newg->stack.lo + StackGuard;
  2121			newg->stackguard1 = ~(uintptr)0;
  2122		}
  2123		return newg;
  2124	}
  2125	
  2126	static void
  2127	newproc_m(void)
  2128	{
  2129		byte *argp;
  2130		void *callerpc;
  2131		FuncVal *fn;
  2132		int32 siz;
  2133	
  2134		siz = g->m->scalararg[0];
  2135		callerpc = (void*)g->m->scalararg[1];	
  2136		argp = g->m->ptrarg[0];
  2137		fn = (FuncVal*)g->m->ptrarg[1];
  2138	
  2139		runtime·newproc1(fn, argp, siz, 0, callerpc);
  2140		g->m->ptrarg[0] = nil;
  2141		g->m->ptrarg[1] = nil;
  2142	}
  2143	
  2144	// Create a new g running fn with siz bytes of arguments.
  2145	// Put it on the queue of g's waiting to run.
  2146	// The compiler turns a go statement into a call to this.
  2147	// Cannot split the stack because it assumes that the arguments
  2148	// are available sequentially after &fn; they would not be
  2149	// copied if a stack split occurred.
  2150	#pragma textflag NOSPLIT
  2151	void
  2152	runtime·newproc(int32 siz, FuncVal* fn, ...)
  2153	{
  2154		byte *argp;
  2155		void (*mfn)(void);
  2156	
  2157		if(thechar == '5')
  2158			argp = (byte*)(&fn+2);  // skip caller's saved LR
  2159		else
  2160			argp = (byte*)(&fn+1);
  2161	
  2162		g->m->locks++;
  2163		g->m->scalararg[0] = siz;
  2164		g->m->scalararg[1] = (uintptr)runtime·getcallerpc(&siz);
  2165		g->m->ptrarg[0] = argp;
  2166		g->m->ptrarg[1] = fn;
  2167		mfn = newproc_m;
  2168		runtime·onM(&mfn);
  2169		g->m->locks--;
  2170	}
  2171	
  2172	void runtime·main(void);
  2173	
  2174	// Create a new g running fn with narg bytes of arguments starting
  2175	// at argp and returning nret bytes of results.  callerpc is the
  2176	// address of the go statement that created this.  The new g is put
  2177	// on the queue of g's waiting to run.
  2178	G*
  2179	runtime·newproc1(FuncVal *fn, byte *argp, int32 narg, int32 nret, void *callerpc)
  2180	{
  2181		byte *sp;
  2182		G *newg;
  2183		P *p;
  2184		int32 siz;
  2185	
  2186		if(fn == nil) {
  2187			g->m->throwing = -1;  // do not dump full stacks
  2188			runtime·throw("go of nil func value");
  2189		}
  2190		g->m->locks++;  // disable preemption because it can be holding p in a local var
  2191		siz = narg + nret;
  2192		siz = (siz+7) & ~7;
  2193	
  2194		// We could allocate a larger initial stack if necessary.
  2195		// Not worth it: this is almost always an error.
  2196		// 4*sizeof(uintreg): extra space added below
  2197		// sizeof(uintreg): caller's LR (arm) or return address (x86, in gostartcall).
  2198		if(siz >= StackMin - 4*sizeof(uintreg) - sizeof(uintreg))
  2199			runtime·throw("runtime.newproc: function arguments too large for new goroutine");
  2200	
  2201		p = g->m->p;
  2202		if((newg = gfget(p)) == nil) {
  2203			newg = runtime·malg(StackMin);
  2204			runtime·casgstatus(newg, Gidle, Gdead);
  2205			runtime·allgadd(newg); // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack.
  2206		}
  2207		if(newg->stack.hi == 0)
  2208			runtime·throw("newproc1: newg missing stack");
  2209	
  2210		if(runtime·readgstatus(newg) != Gdead) 
  2211			runtime·throw("newproc1: new g is not Gdead");
  2212	
  2213		sp = (byte*)newg->stack.hi;
  2214		sp -= 4*sizeof(uintreg); // extra space in case of reads slightly beyond frame
  2215		sp -= siz;
  2216		runtime·memmove(sp, argp, narg);
  2217		if(thechar == '5') {
  2218			// caller's LR
  2219			sp -= sizeof(void*);
  2220			*(void**)sp = nil;
  2221		}
  2222	
  2223		runtime·memclr((byte*)&newg->sched, sizeof newg->sched);
  2224		newg->sched.sp = (uintptr)sp;
  2225		newg->sched.pc = (uintptr)runtime·goexit + PCQuantum; // +PCQuantum so that previous instruction is in same function
  2226		newg->sched.g = newg;
  2227		runtime·gostartcallfn(&newg->sched, fn);
  2228		newg->gopc = (uintptr)callerpc;
  2229		runtime·casgstatus(newg, Gdead, Grunnable);
  2230	
  2231		if(p->goidcache == p->goidcacheend) {
  2232			// Sched.goidgen is the last allocated id,
  2233			// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch].
  2234			// At startup sched.goidgen=0, so main goroutine receives goid=1.
  2235			p->goidcache = runtime·xadd64(&runtime·sched.goidgen, GoidCacheBatch);
  2236			p->goidcache -= GoidCacheBatch - 1;
  2237			p->goidcacheend = p->goidcache + GoidCacheBatch;
  2238		}
  2239		newg->goid = p->goidcache++;
  2240		if(raceenabled)
  2241			newg->racectx = runtime·racegostart((void*)callerpc);
  2242		runqput(p, newg);
  2243	
  2244		if(runtime·atomicload(&runtime·sched.npidle) != 0 && runtime·atomicload(&runtime·sched.nmspinning) == 0 && fn->fn != runtime·main)  // TODO: fast atomic
  2245			wakep();
  2246		g->m->locks--;
  2247		if(g->m->locks == 0 && g->preempt)  // restore the preemption request in case we've cleared it in newstack
  2248			g->stackguard0 = StackPreempt;
  2249		return newg;
  2250	}
  2251	
  2252	// Put on gfree list.
  2253	// If local list is too long, transfer a batch to the global list.
  2254	static void
  2255	gfput(P *p, G *gp)
  2256	{
  2257		uintptr stksize;
  2258	
  2259		if(runtime·readgstatus(gp) != Gdead) 
  2260			runtime·throw("gfput: bad status (not Gdead)");
  2261	
  2262		stksize = gp->stack.hi - gp->stack.lo;
  2263		
  2264		if(stksize != FixedStack) {
  2265			// non-standard stack size - free it.
  2266			runtime·stackfree(gp->stack);
  2267			gp->stack.lo = 0;
  2268			gp->stack.hi = 0;
  2269			gp->stackguard0 = 0;
  2270		}
  2271		gp->schedlink = p->gfree;
  2272		p->gfree = gp;
  2273		p->gfreecnt++;
  2274		if(p->gfreecnt >= 64) {
  2275			runtime·lock(&runtime·sched.gflock);
  2276			while(p->gfreecnt >= 32) {
  2277				p->gfreecnt--;
  2278				gp = p->gfree;
  2279				p->gfree = gp->schedlink;
  2280				gp->schedlink = runtime·sched.gfree;
  2281				runtime·sched.gfree = gp;
  2282				runtime·sched.ngfree++;
  2283			}
  2284			runtime·unlock(&runtime·sched.gflock);
  2285		}
  2286	}
  2287	
  2288	// Get from gfree list.
  2289	// If local list is empty, grab a batch from global list.
  2290	static G*
  2291	gfget(P *p)
  2292	{
  2293		G *gp;
  2294		void (*fn)(G*);
  2295	
  2296	retry:
  2297		gp = p->gfree;
  2298		if(gp == nil && runtime·sched.gfree) {
  2299			runtime·lock(&runtime·sched.gflock);
  2300			while(p->gfreecnt < 32 && runtime·sched.gfree != nil) {
  2301				p->gfreecnt++;
  2302				gp = runtime·sched.gfree;
  2303				runtime·sched.gfree = gp->schedlink;
  2304				runtime·sched.ngfree--;
  2305				gp->schedlink = p->gfree;
  2306				p->gfree = gp;
  2307			}
  2308			runtime·unlock(&runtime·sched.gflock);
  2309			goto retry;
  2310		}
  2311		if(gp) {
  2312			p->gfree = gp->schedlink;
  2313			p->gfreecnt--;
  2314	
  2315			if(gp->stack.lo == 0) {
  2316				// Stack was deallocated in gfput.  Allocate a new one.
  2317				if(g == g->m->g0) {
  2318					gp->stack = runtime·stackalloc(FixedStack);
  2319				} else {
  2320					g->m->scalararg[0] = FixedStack;
  2321					g->m->ptrarg[0] = gp;
  2322					fn = mstackalloc;
  2323					runtime·mcall(&fn);
  2324					g->m->ptrarg[0] = nil;
  2325				}
  2326				gp->stackguard0 = gp->stack.lo + StackGuard;
  2327			} else {
  2328				if(raceenabled)
  2329					runtime·racemalloc((void*)gp->stack.lo, gp->stack.hi - gp->stack.lo);
  2330			}
  2331		}
  2332		return gp;
  2333	}
  2334	
  2335	// Purge all cached G's from gfree list to the global list.
  2336	static void
  2337	gfpurge(P *p)
  2338	{
  2339		G *gp;
  2340	
  2341		runtime·lock(&runtime·sched.gflock);
  2342		while(p->gfreecnt != 0) {
  2343			p->gfreecnt--;
  2344			gp = p->gfree;
  2345			p->gfree = gp->schedlink;
  2346			gp->schedlink = runtime·sched.gfree;
  2347			runtime·sched.gfree = gp;
  2348			runtime·sched.ngfree++;
  2349		}
  2350		runtime·unlock(&runtime·sched.gflock);
  2351	}
  2352	
  2353	#pragma textflag NOSPLIT
  2354	void
  2355	runtime·Breakpoint(void)
  2356	{
  2357		runtime·breakpoint();
  2358	}
  2359	
  2360	// lockOSThread is called by runtime.LockOSThread and runtime.lockOSThread below
  2361	// after they modify m->locked. Do not allow preemption during this call,
  2362	// or else the m might be different in this function than in the caller.
  2363	#pragma textflag NOSPLIT
  2364	static void
  2365	lockOSThread(void)
  2366	{
  2367		g->m->lockedg = g;
  2368		g->lockedm = g->m;
  2369	}
  2370	
  2371	#pragma textflag NOSPLIT
  2372	void
  2373	runtime·LockOSThread(void)
  2374	{
  2375		g->m->locked |= LockExternal;
  2376		lockOSThread();
  2377	}
  2378	
  2379	#pragma textflag NOSPLIT
  2380	void
  2381	runtime·lockOSThread(void)
  2382	{
  2383		g->m->locked += LockInternal;
  2384		lockOSThread();
  2385	}
  2386	
  2387	
  2388	// unlockOSThread is called by runtime.UnlockOSThread and runtime.unlockOSThread below
  2389	// after they update m->locked. Do not allow preemption during this call,
  2390	// or else the m might be in different in this function than in the caller.
  2391	#pragma textflag NOSPLIT
  2392	static void
  2393	unlockOSThread(void)
  2394	{
  2395		if(g->m->locked != 0)
  2396			return;
  2397		g->m->lockedg = nil;
  2398		g->lockedm = nil;
  2399	}
  2400	
  2401	#pragma textflag NOSPLIT
  2402	void
  2403	runtime·UnlockOSThread(void)
  2404	{
  2405		g->m->locked &= ~LockExternal;
  2406		unlockOSThread();
  2407	}
  2408	
  2409	static void badunlockOSThread(void);
  2410	
  2411	#pragma textflag NOSPLIT
  2412	void
  2413	runtime·unlockOSThread(void)
  2414	{
  2415		void (*fn)(void);
  2416	
  2417		if(g->m->locked < LockInternal) {
  2418			fn = badunlockOSThread;
  2419			runtime·onM(&fn);
  2420		}
  2421		g->m->locked -= LockInternal;
  2422		unlockOSThread();
  2423	}
  2424	
  2425	static void
  2426	badunlockOSThread(void)
  2427	{
  2428		runtime·throw("runtime: internal error: misuse of lockOSThread/unlockOSThread");
  2429	}
  2430	
  2431	#pragma textflag NOSPLIT
  2432	int32
  2433	runtime·gcount(void)
  2434	{
  2435		P *p, **pp;
  2436		int32 n;
  2437	
  2438		n = runtime·allglen - runtime·sched.ngfree;
  2439		for(pp=runtime·allp; p=*pp; pp++)
  2440			n -= p->gfreecnt;
  2441		// All these variables can be changed concurrently, so the result can be inconsistent.
  2442		// But at least the current goroutine is running.
  2443		if(n < 1)
  2444			n = 1;
  2445		return n;
  2446	}
  2447	
  2448	int32
  2449	runtime·mcount(void)
  2450	{
  2451		return runtime·sched.mcount;
  2452	}
  2453	
  2454	static struct ProfState {
  2455		uint32 lock;
  2456		int32 hz;
  2457	} prof;
  2458	
  2459	static void System(void) { System(); }
  2460	static void ExternalCode(void) { ExternalCode(); }
  2461	static void GC(void) { GC(); }
  2462	
  2463	extern void runtime·cpuproftick(uintptr*, int32);
  2464	extern byte runtime·etext[];
  2465	
  2466	// Called if we receive a SIGPROF signal.
  2467	void
  2468	runtime·sigprof(uint8 *pc, uint8 *sp, uint8 *lr, G *gp, M *mp)
  2469	{
  2470		int32 n;
  2471		bool traceback;
  2472		// Do not use global m in this function, use mp instead.
  2473		// On windows one m is sending reports about all the g's, so m means a wrong thing.
  2474		byte m;
  2475		uintptr stk[100];
  2476	
  2477		m = 0;
  2478		USED(m);
  2479	
  2480		if(prof.hz == 0)
  2481			return;
  2482	
  2483		// Profiling runs concurrently with GC, so it must not allocate.
  2484		mp->mallocing++;
  2485	
  2486		// Define that a "user g" is a user-created goroutine, and a "system g"
  2487		// is one that is m->g0 or m->gsignal. We've only made sure that we
  2488		// can unwind user g's, so exclude the system g's.
  2489		//
  2490		// It is not quite as easy as testing gp == m->curg (the current user g)
  2491		// because we might be interrupted for profiling halfway through a
  2492		// goroutine switch. The switch involves updating three (or four) values:
  2493		// g, PC, SP, and (on arm) LR. The PC must be the last to be updated,
  2494		// because once it gets updated the new g is running.
  2495		//
  2496		// When switching from a user g to a system g, LR is not considered live,
  2497		// so the update only affects g, SP, and PC. Since PC must be last, there
  2498		// the possible partial transitions in ordinary execution are (1) g alone is updated,
  2499		// (2) both g and SP are updated, and (3) SP alone is updated.
  2500		// If g is updated, we'll see a system g and not look closer.
  2501		// If SP alone is updated, we can detect the partial transition by checking
  2502		// whether the SP is within g's stack bounds. (We could also require that SP
  2503		// be changed only after g, but the stack bounds check is needed by other
  2504		// cases, so there is no need to impose an additional requirement.)
  2505		//
  2506		// There is one exceptional transition to a system g, not in ordinary execution.
  2507		// When a signal arrives, the operating system starts the signal handler running
  2508		// with an updated PC and SP. The g is updated last, at the beginning of the
  2509		// handler. There are two reasons this is okay. First, until g is updated the
  2510		// g and SP do not match, so the stack bounds check detects the partial transition.
  2511		// Second, signal handlers currently run with signals disabled, so a profiling
  2512		// signal cannot arrive during the handler.
  2513		//
  2514		// When switching from a system g to a user g, there are three possibilities.
  2515		//
  2516		// First, it may be that the g switch has no PC update, because the SP
  2517		// either corresponds to a user g throughout (as in runtime.asmcgocall)
  2518		// or because it has been arranged to look like a user g frame
  2519		// (as in runtime.cgocallback_gofunc). In this case, since the entire
  2520		// transition is a g+SP update, a partial transition updating just one of 
  2521		// those will be detected by the stack bounds check.
  2522		//
  2523		// Second, when returning from a signal handler, the PC and SP updates
  2524		// are performed by the operating system in an atomic update, so the g
  2525		// update must be done before them. The stack bounds check detects
  2526		// the partial transition here, and (again) signal handlers run with signals
  2527		// disabled, so a profiling signal cannot arrive then anyway.
  2528		//
  2529		// Third, the common case: it may be that the switch updates g, SP, and PC
  2530		// separately, as in runtime.gogo.
  2531		//
  2532		// Because runtime.gogo is the only instance, we check whether the PC lies
  2533		// within that function, and if so, not ask for a traceback. This approach
  2534		// requires knowing the size of the runtime.gogo function, which we
  2535		// record in arch_*.h and check in runtime_test.go.
  2536		//
  2537		// There is another apparently viable approach, recorded here in case
  2538		// the "PC within runtime.gogo" check turns out not to be usable.
  2539		// It would be possible to delay the update of either g or SP until immediately
  2540		// before the PC update instruction. Then, because of the stack bounds check,
  2541		// the only problematic interrupt point is just before that PC update instruction,
  2542		// and the sigprof handler can detect that instruction and simulate stepping past
  2543		// it in order to reach a consistent state. On ARM, the update of g must be made
  2544		// in two places (in R10 and also in a TLS slot), so the delayed update would
  2545		// need to be the SP update. The sigprof handler must read the instruction at
  2546		// the current PC and if it was the known instruction (for example, JMP BX or 
  2547		// MOV R2, PC), use that other register in place of the PC value.
  2548		// The biggest drawback to this solution is that it requires that we can tell
  2549		// whether it's safe to read from the memory pointed at by PC.
  2550		// In a correct program, we can test PC == nil and otherwise read,
  2551		// but if a profiling signal happens at the instant that a program executes
  2552		// a bad jump (before the program manages to handle the resulting fault)
  2553		// the profiling handler could fault trying to read nonexistent memory.
  2554		//
  2555		// To recap, there are no constraints on the assembly being used for the
  2556		// transition. We simply require that g and SP match and that the PC is not
  2557		// in runtime.gogo.
  2558		traceback = true;
  2559		if(gp == nil || gp != mp->curg ||
  2560		   (uintptr)sp < gp->stack.lo || gp->stack.hi < (uintptr)sp ||
  2561		   ((uint8*)runtime·gogo <= pc && pc < (uint8*)runtime·gogo + RuntimeGogoBytes))
  2562			traceback = false;
  2563	
  2564		n = 0;
  2565		if(traceback)
  2566			n = runtime·gentraceback((uintptr)pc, (uintptr)sp, (uintptr)lr, gp, 0, stk, nelem(stk), nil, nil, TraceTrap);
  2567		if(!traceback || n <= 0) {
  2568			// Normal traceback is impossible or has failed.
  2569			// See if it falls into several common cases.
  2570			n = 0;
  2571			if(mp->ncgo > 0 && mp->curg != nil &&
  2572				mp->curg->syscallpc != 0 && mp->curg->syscallsp != 0) {
  2573				// Cgo, we can't unwind and symbolize arbitrary C code,
  2574				// so instead collect Go stack that leads to the cgo call.
  2575				// This is especially important on windows, since all syscalls are cgo calls.
  2576				n = runtime·gentraceback(mp->curg->syscallpc, mp->curg->syscallsp, 0, mp->curg, 0, stk, nelem(stk), nil, nil, 0);
  2577			}
  2578	#ifdef GOOS_windows
  2579			if(n == 0 && mp->libcallg != nil && mp->libcallpc != 0 && mp->libcallsp != 0) {
  2580				// Libcall, i.e. runtime syscall on windows.
  2581				// Collect Go stack that leads to the call.
  2582				n = runtime·gentraceback(mp->libcallpc, mp->libcallsp, 0, mp->libcallg, 0, stk, nelem(stk), nil, nil, 0);
  2583			}
  2584	#endif
  2585			if(n == 0) {
  2586				// If all of the above has failed, account it against abstract "System" or "GC".
  2587				n = 2;
  2588				// "ExternalCode" is better than "etext".
  2589				if((uintptr)pc > (uintptr)runtime·etext)
  2590					pc = (byte*)ExternalCode + PCQuantum;
  2591				stk[0] = (uintptr)pc;
  2592				if(mp->gcing || mp->helpgc)
  2593					stk[1] = (uintptr)GC + PCQuantum;
  2594				else
  2595					stk[1] = (uintptr)System + PCQuantum;
  2596			}
  2597		}
  2598	
  2599		if(prof.hz != 0) {
  2600			// Simple cas-lock to coordinate with setcpuprofilerate.
  2601			while(!runtime·cas(&prof.lock, 0, 1))
  2602				runtime·osyield();
  2603			if(prof.hz != 0)
  2604				runtime·cpuproftick(stk, n);
  2605			runtime·atomicstore(&prof.lock, 0);
  2606		}
  2607		mp->mallocing--;
  2608	}
  2609	
  2610	// Arrange to call fn with a traceback hz times a second.
  2611	void
  2612	runtime·setcpuprofilerate_m(void)
  2613	{
  2614		int32 hz;
  2615		
  2616		hz = g->m->scalararg[0];
  2617		g->m->scalararg[0] = 0;
  2618	
  2619		// Force sane arguments.
  2620		if(hz < 0)
  2621			hz = 0;
  2622	
  2623		// Disable preemption, otherwise we can be rescheduled to another thread
  2624		// that has profiling enabled.
  2625		g->m->locks++;
  2626	
  2627		// Stop profiler on this thread so that it is safe to lock prof.
  2628		// if a profiling signal came in while we had prof locked,
  2629		// it would deadlock.
  2630		runtime·resetcpuprofiler(0);
  2631	
  2632		while(!runtime·cas(&prof.lock, 0, 1))
  2633			runtime·osyield();
  2634		prof.hz = hz;
  2635		runtime·atomicstore(&prof.lock, 0);
  2636	
  2637		runtime·lock(&runtime·sched.lock);
  2638		runtime·sched.profilehz = hz;
  2639		runtime·unlock(&runtime·sched.lock);
  2640	
  2641		if(hz != 0)
  2642			runtime·resetcpuprofiler(hz);
  2643	
  2644		g->m->locks--;
  2645	}
  2646	
  2647	P *runtime·newP(void);
  2648	
  2649	// Change number of processors.  The world is stopped, sched is locked.
  2650	static void
  2651	procresize(int32 new)
  2652	{
  2653		int32 i, old;
  2654		bool empty;
  2655		G *gp;
  2656		P *p;
  2657	
  2658		old = runtime·gomaxprocs;
  2659		if(old < 0 || old > MaxGomaxprocs || new <= 0 || new >MaxGomaxprocs)
  2660			runtime·throw("procresize: invalid arg");
  2661		// initialize new P's
  2662		for(i = 0; i < new; i++) {
  2663			p = runtime·allp[i];
  2664			if(p == nil) {
  2665				p = runtime·newP();
  2666				p->id = i;
  2667				p->status = Pgcstop;
  2668				runtime·atomicstorep(&runtime·allp[i], p);
  2669			}
  2670			if(p->mcache == nil) {
  2671				if(old==0 && i==0)
  2672					p->mcache = g->m->mcache;  // bootstrap
  2673				else
  2674					p->mcache = runtime·allocmcache();
  2675			}
  2676		}
  2677	
  2678		// redistribute runnable G's evenly
  2679		// collect all runnable goroutines in global queue preserving FIFO order
  2680		// FIFO order is required to ensure fairness even during frequent GCs
  2681		// see http://golang.org/issue/7126
  2682		empty = false;
  2683		while(!empty) {
  2684			empty = true;
  2685			for(i = 0; i < old; i++) {
  2686				p = runtime·allp[i];
  2687				if(p->runqhead == p->runqtail)
  2688					continue;
  2689				empty = false;
  2690				// pop from tail of local queue
  2691				p->runqtail--;
  2692				gp = p->runq[p->runqtail%nelem(p->runq)];
  2693				// push onto head of global queue
  2694				gp->schedlink = runtime·sched.runqhead;
  2695				runtime·sched.runqhead = gp;
  2696				if(runtime·sched.runqtail == nil)
  2697					runtime·sched.runqtail = gp;
  2698				runtime·sched.runqsize++;
  2699			}
  2700		}
  2701		// fill local queues with at most nelem(p->runq)/2 goroutines
  2702		// start at 1 because current M already executes some G and will acquire allp[0] below,
  2703		// so if we have a spare G we want to put it into allp[1].
  2704		for(i = 1; i < new * nelem(p->runq)/2 && runtime·sched.runqsize > 0; i++) {
  2705			gp = runtime·sched.runqhead;
  2706			runtime·sched.runqhead = gp->schedlink;
  2707			if(runtime·sched.runqhead == nil)
  2708				runtime·sched.runqtail = nil;
  2709			runtime·sched.runqsize--;
  2710			runqput(runtime·allp[i%new], gp);
  2711		}
  2712	
  2713		// free unused P's
  2714		for(i = new; i < old; i++) {
  2715			p = runtime·allp[i];
  2716			runtime·freemcache(p->mcache);
  2717			p->mcache = nil;
  2718			gfpurge(p);
  2719			p->status = Pdead;
  2720			// can't free P itself because it can be referenced by an M in syscall
  2721		}
  2722	
  2723		if(g->m->p)
  2724			g->m->p->m = nil;
  2725		g->m->p = nil;
  2726		g->m->mcache = nil;
  2727		p = runtime·allp[0];
  2728		p->m = nil;
  2729		p->status = Pidle;
  2730		acquirep(p);
  2731		for(i = new-1; i > 0; i--) {
  2732			p = runtime·allp[i];
  2733			p->status = Pidle;
  2734			pidleput(p);
  2735		}
  2736		runtime·atomicstore((uint32*)&runtime·gomaxprocs, new);
  2737	}
  2738	
  2739	// Associate p and the current m.
  2740	static void
  2741	acquirep(P *p)
  2742	{
  2743		if(g->m->p || g->m->mcache)
  2744			runtime·throw("acquirep: already in go");
  2745		if(p->m || p->status != Pidle) {
  2746			runtime·printf("acquirep: p->m=%p(%d) p->status=%d\n", p->m, p->m ? p->m->id : 0, p->status);
  2747			runtime·throw("acquirep: invalid p state");
  2748		}
  2749		g->m->mcache = p->mcache;
  2750		g->m->p = p;
  2751		p->m = g->m;
  2752		p->status = Prunning;
  2753	}
  2754	
  2755	// Disassociate p and the current m.
  2756	static P*
  2757	releasep(void)
  2758	{
  2759		P *p;
  2760	
  2761		if(g->m->p == nil || g->m->mcache == nil)
  2762			runtime·throw("releasep: invalid arg");
  2763		p = g->m->p;
  2764		if(p->m != g->m || p->mcache != g->m->mcache || p->status != Prunning) {
  2765			runtime·printf("releasep: m=%p m->p=%p p->m=%p m->mcache=%p p->mcache=%p p->status=%d\n",
  2766				g->m, g->m->p, p->m, g->m->mcache, p->mcache, p->status);
  2767			runtime·throw("releasep: invalid p state");
  2768		}
  2769		g->m->p = nil;
  2770		g->m->mcache = nil;
  2771		p->m = nil;
  2772		p->status = Pidle;
  2773		return p;
  2774	}
  2775	
  2776	static void
  2777	incidlelocked(int32 v)
  2778	{
  2779		runtime·lock(&runtime·sched.lock);
  2780		runtime·sched.nmidlelocked += v;
  2781		if(v > 0)
  2782			checkdead();
  2783		runtime·unlock(&runtime·sched.lock);
  2784	}
  2785	
  2786	// Check for deadlock situation.
  2787	// The check is based on number of running M's, if 0 -> deadlock.
  2788	static void
  2789	checkdead(void)
  2790	{
  2791		G *gp;
  2792		P *p;
  2793		M *mp;
  2794		int32 run, grunning, s;
  2795		uintptr i;
  2796	
  2797		// -1 for sysmon
  2798		run = runtime·sched.mcount - runtime·sched.nmidle - runtime·sched.nmidlelocked - 1;
  2799		if(run > 0)
  2800			return;
  2801		// If we are dying because of a signal caught on an already idle thread,
  2802		// freezetheworld will cause all running threads to block.
  2803		// And runtime will essentially enter into deadlock state,
  2804		// except that there is a thread that will call runtime·exit soon.
  2805		if(runtime·panicking > 0)
  2806			return;
  2807		if(run < 0) {
  2808			runtime·printf("runtime: checkdead: nmidle=%d nmidlelocked=%d mcount=%d\n",
  2809				runtime·sched.nmidle, runtime·sched.nmidlelocked, runtime·sched.mcount);
  2810			runtime·throw("checkdead: inconsistent counts");
  2811		}
  2812		grunning = 0;
  2813		runtime·lock(&runtime·allglock);
  2814		for(i = 0; i < runtime·allglen; i++) {
  2815			gp = runtime·allg[i];
  2816			if(gp->issystem)
  2817				continue;
  2818			s = runtime·readgstatus(gp);
  2819			switch(s&~Gscan) {
  2820			case Gwaiting:
  2821				grunning++;
  2822				break;
  2823			case Grunnable:
  2824			case Grunning:
  2825			case Gsyscall:
  2826				runtime·unlock(&runtime·allglock);
  2827				runtime·printf("runtime: checkdead: find g %D in status %d\n", gp->goid, s);
  2828				runtime·throw("checkdead: runnable g");
  2829				break;
  2830			}
  2831		}
  2832		runtime·unlock(&runtime·allglock);
  2833		if(grunning == 0)  // possible if main goroutine calls runtime·Goexit()
  2834			runtime·throw("no goroutines (main called runtime.Goexit) - deadlock!");
  2835	
  2836		// Maybe jump time forward for playground.
  2837		if((gp = runtime·timejump()) != nil) {
  2838			runtime·casgstatus(gp, Gwaiting, Grunnable);
  2839			globrunqput(gp);
  2840	 		p = pidleget();
  2841	 		if(p == nil)
  2842	 			runtime·throw("checkdead: no p for timer");
  2843	 		mp = mget();
  2844	 		if(mp == nil)
  2845	 			newm(nil, p);
  2846	 		else {
  2847	 			mp->nextp = p;
  2848	 			runtime·notewakeup(&mp->park);
  2849	 		}
  2850	 		return;
  2851	 	}
  2852	
  2853		g->m->throwing = -1;  // do not dump full stacks
  2854		runtime·throw("all goroutines are asleep - deadlock!");
  2855	}
  2856	
  2857	static void
  2858	sysmon(void)
  2859	{
  2860		uint32 idle, delay, nscavenge;
  2861		int64 now, unixnow, lastpoll, lasttrace, lastgc;
  2862		int64 forcegcperiod, scavengelimit, lastscavenge, maxsleep;
  2863		G *gp;
  2864	
  2865		// If we go two minutes without a garbage collection, force one to run.
  2866		forcegcperiod = 2*60*1e9;
  2867		// If a heap span goes unused for 5 minutes after a garbage collection,
  2868		// we hand it back to the operating system.
  2869		scavengelimit = 5*60*1e9;
  2870		if(runtime·debug.scavenge > 0) {
  2871			// Scavenge-a-lot for testing.
  2872			forcegcperiod = 10*1e6;
  2873			scavengelimit = 20*1e6;
  2874		}
  2875		lastscavenge = runtime·nanotime();
  2876		nscavenge = 0;
  2877		// Make wake-up period small enough for the sampling to be correct.
  2878		maxsleep = forcegcperiod/2;
  2879		if(scavengelimit < forcegcperiod)
  2880			maxsleep = scavengelimit/2;
  2881	
  2882		lasttrace = 0;
  2883		idle = 0;  // how many cycles in succession we had not wokeup somebody
  2884		delay = 0;
  2885		for(;;) {
  2886			if(idle == 0)  // start with 20us sleep...
  2887				delay = 20;
  2888			else if(idle > 50)  // start doubling the sleep after 1ms...
  2889				delay *= 2;
  2890			if(delay > 10*1000)  // up to 10ms
  2891				delay = 10*1000;
  2892			runtime·usleep(delay);
  2893			if(runtime·debug.schedtrace <= 0 &&
  2894				(runtime·sched.gcwaiting || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs)) {  // TODO: fast atomic
  2895				runtime·lock(&runtime·sched.lock);
  2896				if(runtime·atomicload(&runtime·sched.gcwaiting) || runtime·atomicload(&runtime·sched.npidle) == runtime·gomaxprocs) {
  2897					runtime·atomicstore(&runtime·sched.sysmonwait, 1);
  2898					runtime·unlock(&runtime·sched.lock);
  2899					runtime·notetsleep(&runtime·sched.sysmonnote, maxsleep);
  2900					runtime·lock(&runtime·sched.lock);
  2901					runtime·atomicstore(&runtime·sched.sysmonwait, 0);
  2902					runtime·noteclear(&runtime·sched.sysmonnote);
  2903					idle = 0;
  2904					delay = 20;
  2905				}
  2906				runtime·unlock(&runtime·sched.lock);
  2907			}
  2908			// poll network if not polled for more than 10ms
  2909			lastpoll = runtime·atomicload64(&runtime·sched.lastpoll);
  2910			now = runtime·nanotime();
  2911			unixnow = runtime·unixnanotime();
  2912			if(lastpoll != 0 && lastpoll + 10*1000*1000 < now) {
  2913				runtime·cas64(&runtime·sched.lastpoll, lastpoll, now);
  2914				gp = runtime·netpoll(false);  // non-blocking
  2915				if(gp) {
  2916					// Need to decrement number of idle locked M's
  2917					// (pretending that one more is running) before injectglist.
  2918					// Otherwise it can lead to the following situation:
  2919					// injectglist grabs all P's but before it starts M's to run the P's,
  2920					// another M returns from syscall, finishes running its G,
  2921					// observes that there is no work to do and no other running M's
  2922					// and reports deadlock.
  2923					incidlelocked(-1);
  2924					injectglist(gp);
  2925					incidlelocked(1);
  2926				}
  2927			}
  2928			// retake P's blocked in syscalls
  2929			// and preempt long running G's
  2930			if(retake(now))
  2931				idle = 0;
  2932			else
  2933				idle++;
  2934	
  2935			// check if we need to force a GC
  2936			lastgc = runtime·atomicload64(&mstats.last_gc);
  2937			if(lastgc != 0 && unixnow - lastgc > forcegcperiod && runtime·atomicload(&runtime·forcegc.idle)) {
  2938				runtime·lock(&runtime·forcegc.lock);
  2939				runtime·forcegc.idle = 0;
  2940				runtime·forcegc.g->schedlink = nil;
  2941				injectglist(runtime·forcegc.g);
  2942				runtime·unlock(&runtime·forcegc.lock);
  2943			}
  2944	
  2945			// scavenge heap once in a while
  2946			if(lastscavenge + scavengelimit/2 < now) {
  2947				runtime·MHeap_Scavenge(nscavenge, now, scavengelimit);
  2948				lastscavenge = now;
  2949				nscavenge++;
  2950			}
  2951	
  2952			if(runtime·debug.schedtrace > 0 && lasttrace + runtime·debug.schedtrace*1000000ll <= now) {
  2953				lasttrace = now;
  2954				runtime·schedtrace(runtime·debug.scheddetail);
  2955			}
  2956		}
  2957	}
  2958	
  2959	typedef struct Pdesc Pdesc;
  2960	struct Pdesc
  2961	{
  2962		uint32	schedtick;
  2963		int64	schedwhen;
  2964		uint32	syscalltick;
  2965		int64	syscallwhen;
  2966	};
  2967	#pragma dataflag NOPTR
  2968	static Pdesc pdesc[MaxGomaxprocs];
  2969	
  2970	static uint32
  2971	retake(int64 now)
  2972	{
  2973		uint32 i, s, n;
  2974		int64 t;
  2975		P *p;
  2976		Pdesc *pd;
  2977	
  2978		n = 0;
  2979		for(i = 0; i < runtime·gomaxprocs; i++) {
  2980			p = runtime·allp[i];
  2981			if(p==nil)
  2982				continue;
  2983			pd = &pdesc[i];
  2984			s = p->status;
  2985			if(s == Psyscall) {
  2986				// Retake P from syscall if it's there for more than 1 sysmon tick (at least 20us).
  2987				t = p->syscalltick;
  2988				if(pd->syscalltick != t) {
  2989					pd->syscalltick = t;
  2990					pd->syscallwhen = now;
  2991					continue;
  2992				}
  2993				// On the one hand we don't want to retake Ps if there is no other work to do,
  2994				// but on the other hand we want to retake them eventually
  2995				// because they can prevent the sysmon thread from deep sleep.
  2996				if(p->runqhead == p->runqtail &&
  2997					runtime·atomicload(&runtime·sched.nmspinning) + runtime·atomicload(&runtime·sched.npidle) > 0 &&
  2998					pd->syscallwhen + 10*1000*1000 > now)
  2999					continue;
  3000				// Need to decrement number of idle locked M's
  3001				// (pretending that one more is running) before the CAS.
  3002				// Otherwise the M from which we retake can exit the syscall,
  3003				// increment nmidle and report deadlock.
  3004				incidlelocked(-1);
  3005				if(runtime·cas(&p->status, s, Pidle)) {
  3006					n++;
  3007					handoffp(p);
  3008				}
  3009				incidlelocked(1);
  3010			} else if(s == Prunning) {
  3011				// Preempt G if it's running for more than 10ms.
  3012				t = p->schedtick;
  3013				if(pd->schedtick != t) {
  3014					pd->schedtick = t;
  3015					pd->schedwhen = now;
  3016					continue;
  3017				}
  3018				if(pd->schedwhen + 10*1000*1000 > now)
  3019					continue;
  3020				preemptone(p);
  3021			}
  3022		}
  3023		return n;
  3024	}
  3025	
  3026	// Tell all goroutines that they have been preempted and they should stop.
  3027	// This function is purely best-effort.  It can fail to inform a goroutine if a
  3028	// processor just started running it.
  3029	// No locks need to be held.
  3030	// Returns true if preemption request was issued to at least one goroutine.
  3031	static bool
  3032	preemptall(void)
  3033	{
  3034		P *p;
  3035		int32 i;
  3036		bool res;
  3037	
  3038		res = false;
  3039		for(i = 0; i < runtime·gomaxprocs; i++) {
  3040			p = runtime·allp[i];
  3041			if(p == nil || p->status != Prunning)
  3042				continue;
  3043			res |= preemptone(p);
  3044		}
  3045		return res;
  3046	}
  3047	
  3048	// Tell the goroutine running on processor P to stop.
  3049	// This function is purely best-effort.  It can incorrectly fail to inform the
  3050	// goroutine.  It can send inform the wrong goroutine.  Even if it informs the
  3051	// correct goroutine, that goroutine might ignore the request if it is
  3052	// simultaneously executing runtime·newstack.
  3053	// No lock needs to be held.
  3054	// Returns true if preemption request was issued.
  3055	// The actual preemption will happen at some point in the future
  3056	// and will be indicated by the gp->status no longer being
  3057	// Grunning
  3058	static bool
  3059	preemptone(P *p)
  3060	{
  3061		M *mp;
  3062		G *gp;
  3063	
  3064		mp = p->m;
  3065		if(mp == nil || mp == g->m)
  3066			return false;
  3067		gp = mp->curg;
  3068		if(gp == nil || gp == mp->g0)
  3069			return false;
  3070		gp->preempt = true;
  3071		// Every call in a go routine checks for stack overflow by
  3072		// comparing the current stack pointer to gp->stackguard0.
  3073		// Setting gp->stackguard0 to StackPreempt folds
  3074		// preemption into the normal stack overflow check.
  3075		gp->stackguard0 = StackPreempt;
  3076		return true;
  3077	}
  3078	
  3079	void
  3080	runtime·schedtrace(bool detailed)
  3081	{
  3082		static int64 starttime;
  3083		int64 now;
  3084		int64 id1, id2, id3;
  3085		int32 i, t, h;
  3086		uintptr gi;
  3087		int8 *fmt;
  3088		M *mp, *lockedm;
  3089		G *gp, *lockedg;
  3090		P *p;
  3091	
  3092		now = runtime·nanotime();
  3093		if(starttime == 0)
  3094			starttime = now;
  3095	
  3096		runtime·lock(&runtime·sched.lock);
  3097		runtime·printf("SCHED %Dms: gomaxprocs=%d idleprocs=%d threads=%d spinningthreads=%d idlethreads=%d runqueue=%d",
  3098			(now-starttime)/1000000, runtime·gomaxprocs, runtime·sched.npidle, runtime·sched.mcount,
  3099			runtime·sched.nmspinning, runtime·sched.nmidle, runtime·sched.runqsize);
  3100		if(detailed) {
  3101			runtime·printf(" gcwaiting=%d nmidlelocked=%d stopwait=%d sysmonwait=%d\n",
  3102				runtime·sched.gcwaiting, runtime·sched.nmidlelocked,
  3103				runtime·sched.stopwait, runtime·sched.sysmonwait);
  3104		}
  3105		// We must be careful while reading data from P's, M's and G's.
  3106		// Even if we hold schedlock, most data can be changed concurrently.
  3107		// E.g. (p->m ? p->m->id : -1) can crash if p->m changes from non-nil to nil.
  3108		for(i = 0; i < runtime·gomaxprocs; i++) {
  3109			p = runtime·allp[i];
  3110			if(p == nil)
  3111				continue;
  3112			mp = p->m;
  3113			h = runtime·atomicload(&p->runqhead);
  3114			t = runtime·atomicload(&p->runqtail);
  3115			if(detailed)
  3116				runtime·printf("  P%d: status=%d schedtick=%d syscalltick=%d m=%d runqsize=%d gfreecnt=%d\n",
  3117					i, p->status, p->schedtick, p->syscalltick, mp ? mp->id : -1, t-h, p->gfreecnt);
  3118			else {
  3119				// In non-detailed mode format lengths of per-P run queues as:
  3120				// [len1 len2 len3 len4]
  3121				fmt = " %d";
  3122				if(runtime·gomaxprocs == 1)
  3123					fmt = " [%d]\n";
  3124				else if(i == 0)
  3125					fmt = " [%d";
  3126				else if(i == runtime·gomaxprocs-1)
  3127					fmt = " %d]\n";
  3128				runtime·printf(fmt, t-h);
  3129			}
  3130		}
  3131		if(!detailed) {
  3132			runtime·unlock(&runtime·sched.lock);
  3133			return;
  3134		}
  3135		for(mp = runtime·allm; mp; mp = mp->alllink) {
  3136			p = mp->p;
  3137			gp = mp->curg;
  3138			lockedg = mp->lockedg;
  3139			id1 = -1;
  3140			if(p)
  3141				id1 = p->id;
  3142			id2 = -1;
  3143			if(gp)
  3144				id2 = gp->goid;
  3145			id3 = -1;
  3146			if(lockedg)
  3147				id3 = lockedg->goid;
  3148			runtime·printf("  M%d: p=%D curg=%D mallocing=%d throwing=%d gcing=%d"
  3149				" locks=%d dying=%d helpgc=%d spinning=%d blocked=%d lockedg=%D\n",
  3150				mp->id, id1, id2,
  3151				mp->mallocing, mp->throwing, mp->gcing, mp->locks, mp->dying, mp->helpgc,
  3152				mp->spinning, g->m->blocked, id3);
  3153		}
  3154		runtime·lock(&runtime·allglock);
  3155		for(gi = 0; gi < runtime·allglen; gi++) {
  3156			gp = runtime·allg[gi];
  3157			mp = gp->m;
  3158			lockedm = gp->lockedm;
  3159			runtime·printf("  G%D: status=%d(%S) m=%d lockedm=%d\n",
  3160				gp->goid, runtime·readgstatus(gp), gp->waitreason, mp ? mp->id : -1,
  3161				lockedm ? lockedm->id : -1);
  3162		}
  3163		runtime·unlock(&runtime·allglock);
  3164		runtime·unlock(&runtime·sched.lock);
  3165	}
  3166	
  3167	// Put mp on midle list.
  3168	// Sched must be locked.
  3169	static void
  3170	mput(M *mp)
  3171	{
  3172		mp->schedlink = runtime·sched.midle;
  3173		runtime·sched.midle = mp;
  3174		runtime·sched.nmidle++;
  3175		checkdead();
  3176	}
  3177	
  3178	// Try to get an m from midle list.
  3179	// Sched must be locked.
  3180	static M*
  3181	mget(void)
  3182	{
  3183		M *mp;
  3184	
  3185		if((mp = runtime·sched.midle) != nil){
  3186			runtime·sched.midle = mp->schedlink;
  3187			runtime·sched.nmidle--;
  3188		}
  3189		return mp;
  3190	}
  3191	
  3192	// Put gp on the global runnable queue.
  3193	// Sched must be locked.
  3194	static void
  3195	globrunqput(G *gp)
  3196	{
  3197		gp->schedlink = nil;
  3198		if(runtime·sched.runqtail)
  3199			runtime·sched.runqtail->schedlink = gp;
  3200		else
  3201			runtime·sched.runqhead = gp;
  3202		runtime·sched.runqtail = gp;
  3203		runtime·sched.runqsize++;
  3204	}
  3205	
  3206	// Put a batch of runnable goroutines on the global runnable queue.
  3207	// Sched must be locked.
  3208	static void
  3209	globrunqputbatch(G *ghead, G *gtail, int32 n)
  3210	{
  3211		gtail->schedlink = nil;
  3212		if(runtime·sched.runqtail)
  3213			runtime·sched.runqtail->schedlink = ghead;
  3214		else
  3215			runtime·sched.runqhead = ghead;
  3216		runtime·sched.runqtail = gtail;
  3217		runtime·sched.runqsize += n;
  3218	}
  3219	
  3220	// Try get a batch of G's from the global runnable queue.
  3221	// Sched must be locked.
  3222	static G*
  3223	globrunqget(P *p, int32 max)
  3224	{
  3225		G *gp, *gp1;
  3226		int32 n;
  3227	
  3228		if(runtime·sched.runqsize == 0)
  3229			return nil;
  3230		n = runtime·sched.runqsize/runtime·gomaxprocs+1;
  3231		if(n > runtime·sched.runqsize)
  3232			n = runtime·sched.runqsize;
  3233		if(max > 0 && n > max)
  3234			n = max;
  3235		if(n > nelem(p->runq)/2)
  3236			n = nelem(p->runq)/2;
  3237		runtime·sched.runqsize -= n;
  3238		if(runtime·sched.runqsize == 0)
  3239			runtime·sched.runqtail = nil;
  3240		gp = runtime·sched.runqhead;
  3241		runtime·sched.runqhead = gp->schedlink;
  3242		n--;
  3243		while(n--) {
  3244			gp1 = runtime·sched.runqhead;
  3245			runtime·sched.runqhead = gp1->schedlink;
  3246			runqput(p, gp1);
  3247		}
  3248		return gp;
  3249	}
  3250	
  3251	// Put p to on pidle list.
  3252	// Sched must be locked.
  3253	static void
  3254	pidleput(P *p)
  3255	{
  3256		p->link = runtime·sched.pidle;
  3257		runtime·sched.pidle = p;
  3258		runtime·xadd(&runtime·sched.npidle, 1);  // TODO: fast atomic
  3259	}
  3260	
  3261	// Try get a p from pidle list.
  3262	// Sched must be locked.
  3263	static P*
  3264	pidleget(void)
  3265	{
  3266		P *p;
  3267	
  3268		p = runtime·sched.pidle;
  3269		if(p) {
  3270			runtime·sched.pidle = p->link;
  3271			runtime·xadd(&runtime·sched.npidle, -1);  // TODO: fast atomic
  3272		}
  3273		return p;
  3274	}
  3275	
  3276	// Try to put g on local runnable queue.
  3277	// If it's full, put onto global queue.
  3278	// Executed only by the owner P.
  3279	static void
  3280	runqput(P *p, G *gp)
  3281	{
  3282		uint32 h, t;
  3283	
  3284	retry:
  3285		h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
  3286		t = p->runqtail;
  3287		if(t - h < nelem(p->runq)) {
  3288			p->runq[t%nelem(p->runq)] = gp;
  3289			runtime·atomicstore(&p->runqtail, t+1);  // store-release, makes the item available for consumption
  3290			return;
  3291		}
  3292		if(runqputslow(p, gp, h, t))
  3293			return;
  3294		// the queue is not full, now the put above must suceed
  3295		goto retry;
  3296	}
  3297	
  3298	// Put g and a batch of work from local runnable queue on global queue.
  3299	// Executed only by the owner P.
  3300	static bool
  3301	runqputslow(P *p, G *gp, uint32 h, uint32 t)
  3302	{
  3303		G *batch[nelem(p->runq)/2+1];
  3304		uint32 n, i;
  3305	
  3306		// First, grab a batch from local queue.
  3307		n = t-h;
  3308		n = n/2;
  3309		if(n != nelem(p->runq)/2)
  3310			runtime·throw("runqputslow: queue is not full");
  3311		for(i=0; i<n; i++)
  3312			batch[i] = p->runq[(h+i)%nelem(p->runq)];
  3313		if(!runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
  3314			return false;
  3315		batch[n] = gp;
  3316		// Link the goroutines.
  3317		for(i=0; i<n; i++)
  3318			batch[i]->schedlink = batch[i+1];
  3319		// Now put the batch on global queue.
  3320		runtime·lock(&runtime·sched.lock);
  3321		globrunqputbatch(batch[0], batch[n], n+1);
  3322		runtime·unlock(&runtime·sched.lock);
  3323		return true;
  3324	}
  3325	
  3326	// Get g from local runnable queue.
  3327	// Executed only by the owner P.
  3328	static G*
  3329	runqget(P *p)
  3330	{
  3331		G *gp;
  3332		uint32 t, h;
  3333	
  3334		for(;;) {
  3335			h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
  3336			t = p->runqtail;
  3337			if(t == h)
  3338				return nil;
  3339			gp = p->runq[h%nelem(p->runq)];
  3340			if(runtime·cas(&p->runqhead, h, h+1))  // cas-release, commits consume
  3341				return gp;
  3342		}
  3343	}
  3344	
  3345	// Grabs a batch of goroutines from local runnable queue.
  3346	// batch array must be of size nelem(p->runq)/2. Returns number of grabbed goroutines.
  3347	// Can be executed by any P.
  3348	static uint32
  3349	runqgrab(P *p, G **batch)
  3350	{
  3351		uint32 t, h, n, i;
  3352	
  3353		for(;;) {
  3354			h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with other consumers
  3355			t = runtime·atomicload(&p->runqtail);  // load-acquire, synchronize with the producer
  3356			n = t-h;
  3357			n = n - n/2;
  3358			if(n == 0)
  3359				break;
  3360			if(n > nelem(p->runq)/2)  // read inconsistent h and t
  3361				continue;
  3362			for(i=0; i<n; i++)
  3363				batch[i] = p->runq[(h+i)%nelem(p->runq)];
  3364			if(runtime·cas(&p->runqhead, h, h+n))  // cas-release, commits consume
  3365				break;
  3366		}
  3367		return n;
  3368	}
  3369	
  3370	// Steal half of elements from local runnable queue of p2
  3371	// and put onto local runnable queue of p.
  3372	// Returns one of the stolen elements (or nil if failed).
  3373	static G*
  3374	runqsteal(P *p, P *p2)
  3375	{
  3376		G *gp;
  3377		G *batch[nelem(p->runq)/2];
  3378		uint32 t, h, n, i;
  3379	
  3380		n = runqgrab(p2, batch);
  3381		if(n == 0)
  3382			return nil;
  3383		n--;
  3384		gp = batch[n];
  3385		if(n == 0)
  3386			return gp;
  3387		h = runtime·atomicload(&p->runqhead);  // load-acquire, synchronize with consumers
  3388		t = p->runqtail;
  3389		if(t - h + n >= nelem(p->runq))
  3390			runtime·throw("runqsteal: runq overflow");
  3391		for(i=0; i<n; i++, t++)
  3392			p->runq[t%nelem(p->runq)] = batch[i];
  3393		runtime·atomicstore(&p->runqtail, t);  // store-release, makes the item available for consumption
  3394		return gp;
  3395	}
  3396	
  3397	void
  3398	runtime·testSchedLocalQueue(void)
  3399	{
  3400		P *p;
  3401		G *gs;
  3402		int32 i, j;
  3403	
  3404		p = (P*)runtime·mallocgc(sizeof(*p), nil, FlagNoScan);
  3405		gs = (G*)runtime·mallocgc(nelem(p->runq)*sizeof(*gs), nil, FlagNoScan);
  3406	
  3407		for(i = 0; i < nelem(p->runq); i++) {
  3408			if(runqget(p) != nil)
  3409				runtime·throw("runq is not empty initially");
  3410			for(j = 0; j < i; j++)
  3411				runqput(p, &gs[i]);
  3412			for(j = 0; j < i; j++) {
  3413				if(runqget(p) != &gs[i]) {
  3414					runtime·printf("bad element at iter %d/%d\n", i, j);
  3415					runtime·throw("bad element");
  3416				}
  3417			}
  3418			if(runqget(p) != nil)
  3419				runtime·throw("runq is not empty afterwards");
  3420		}
  3421	}
  3422	
  3423	void
  3424	runtime·testSchedLocalQueueSteal(void)
  3425	{
  3426		P *p1, *p2;
  3427		G *gs, *gp;
  3428		int32 i, j, s;
  3429	
  3430		p1 = (P*)runtime·mallocgc(sizeof(*p1), nil, FlagNoScan);
  3431		p2 = (P*)runtime·mallocgc(sizeof(*p2), nil, FlagNoScan);
  3432		gs = (G*)runtime·mallocgc(nelem(p1->runq)*sizeof(*gs), nil, FlagNoScan);
  3433	
  3434		for(i = 0; i < nelem(p1->runq); i++) {
  3435			for(j = 0; j < i; j++) {
  3436				gs[j].sig = 0;
  3437				runqput(p1, &gs[j]);
  3438			}
  3439			gp = runqsteal(p2, p1);
  3440			s = 0;
  3441			if(gp) {
  3442				s++;
  3443				gp->sig++;
  3444			}
  3445			while(gp = runqget(p2)) {
  3446				s++;
  3447				gp->sig++;
  3448			}
  3449			while(gp = runqget(p1))
  3450				gp->sig++;
  3451			for(j = 0; j < i; j++) {
  3452				if(gs[j].sig != 1) {
  3453					runtime·printf("bad element %d(%d) at iter %d\n", j, gs[j].sig, i);
  3454					runtime·throw("bad element");
  3455				}
  3456			}
  3457			if(s != i/2 && s != i/2+1) {
  3458				runtime·printf("bad steal %d, want %d or %d, iter %d\n",
  3459					s, i/2, i/2+1, i);
  3460				runtime·throw("bad steal");
  3461			}
  3462		}
  3463	}
  3464	
  3465	void
  3466	runtime·setmaxthreads_m(void)
  3467	{
  3468		int32 in;
  3469		int32 out;
  3470	
  3471		in = g->m->scalararg[0];
  3472	
  3473		runtime·lock(&runtime·sched.lock);
  3474		out = runtime·sched.maxmcount;
  3475		runtime·sched.maxmcount = in;
  3476		checkmcount();
  3477		runtime·unlock(&runtime·sched.lock);
  3478	
  3479		g->m->scalararg[0] = out;
  3480	}
  3481	
  3482	static int8 experiment[] = GOEXPERIMENT; // defined in zaexperiment.h
  3483	
  3484	static bool
  3485	haveexperiment(int8 *name)
  3486	{
  3487		int32 i, j;
  3488		
  3489		for(i=0; i<sizeof(experiment); i++) {
  3490			if((i == 0 || experiment[i-1] == ',') && experiment[i] == name[0]) {
  3491				for(j=0; name[j]; j++)
  3492					if(experiment[i+j] != name[j])
  3493						goto nomatch;
  3494				if(experiment[i+j] != '\0' && experiment[i+j] != ',')
  3495					goto nomatch;
  3496				return 1;
  3497			}
  3498		nomatch:;
  3499		}
  3500		return 0;
  3501	}
  3502	
  3503	#pragma textflag NOSPLIT
  3504	void
  3505	sync·runtime_procPin(intptr p)
  3506	{
  3507		M *mp;
  3508	
  3509		mp = g->m;
  3510		// Disable preemption.
  3511		mp->locks++;
  3512		p = mp->p->id;
  3513		FLUSH(&p);
  3514	}
  3515	
  3516	#pragma textflag NOSPLIT
  3517	void
  3518	sync·runtime_procUnpin()
  3519	{
  3520		g->m->locks--;
  3521	}

View as plain text