Source file
src/runtime/mcentral.go
Documentation: runtime
1
2
3
4
5
6
7
8
9
10
11
12
13 package runtime
14
15 import "runtime/internal/atomic"
16
17
18
19
20 type mcentral struct {
21 lock mutex
22 spanclass spanClass
23
24
25 nonempty mSpanList
26 empty mSpanList
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46 partial [2]spanSet
47 full [2]spanSet
48
49
50
51
52 nmalloc uint64
53 }
54
55
56 func (c *mcentral) init(spc spanClass) {
57 c.spanclass = spc
58 if go115NewMCentralImpl {
59 lockInit(&c.partial[0].spineLock, lockRankSpanSetSpine)
60 lockInit(&c.partial[1].spineLock, lockRankSpanSetSpine)
61 lockInit(&c.full[0].spineLock, lockRankSpanSetSpine)
62 lockInit(&c.full[1].spineLock, lockRankSpanSetSpine)
63 } else {
64 c.nonempty.init()
65 c.empty.init()
66 lockInit(&c.lock, lockRankMcentral)
67 }
68 }
69
70
71
72 func (c *mcentral) partialUnswept(sweepgen uint32) *spanSet {
73 return &c.partial[1-sweepgen/2%2]
74 }
75
76
77
78 func (c *mcentral) partialSwept(sweepgen uint32) *spanSet {
79 return &c.partial[sweepgen/2%2]
80 }
81
82
83
84 func (c *mcentral) fullUnswept(sweepgen uint32) *spanSet {
85 return &c.full[1-sweepgen/2%2]
86 }
87
88
89
90 func (c *mcentral) fullSwept(sweepgen uint32) *spanSet {
91 return &c.full[sweepgen/2%2]
92 }
93
94
95 func (c *mcentral) cacheSpan() *mspan {
96 if !go115NewMCentralImpl {
97 return c.oldCacheSpan()
98 }
99
100 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
101 deductSweepCredit(spanBytes, 0)
102
103 sg := mheap_.sweepgen
104
105 traceDone := false
106 if trace.enabled {
107 traceGCSweepStart()
108 }
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123 spanBudget := 100
124
125 var s *mspan
126
127
128 if s = c.partialSwept(sg).pop(); s != nil {
129 goto havespan
130 }
131
132
133 for ; spanBudget >= 0; spanBudget-- {
134 s = c.partialUnswept(sg).pop()
135 if s == nil {
136 break
137 }
138 if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
139
140 s.sweep(true)
141 goto havespan
142 }
143
144
145
146
147
148
149 }
150
151
152 for ; spanBudget >= 0; spanBudget-- {
153 s = c.fullUnswept(sg).pop()
154 if s == nil {
155 break
156 }
157 if atomic.Load(&s.sweepgen) == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
158
159 s.sweep(true)
160
161 freeIndex := s.nextFreeIndex()
162 if freeIndex != s.nelems {
163 s.freeindex = freeIndex
164 goto havespan
165 }
166
167 c.fullSwept(sg).push(s)
168 }
169
170 }
171 if trace.enabled {
172 traceGCSweepDone()
173 traceDone = true
174 }
175
176
177 s = c.grow()
178 if s == nil {
179 return nil
180 }
181
182
183 havespan:
184 if trace.enabled && !traceDone {
185 traceGCSweepDone()
186 }
187 n := int(s.nelems) - int(s.allocCount)
188 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
189 throw("span has no free objects")
190 }
191
192
193 atomic.Xadd64(&c.nmalloc, int64(n))
194 usedBytes := uintptr(s.allocCount) * s.elemsize
195 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
196 if trace.enabled {
197
198 traceHeapAlloc()
199 }
200 if gcBlackenEnabled != 0 {
201
202 gcController.revise()
203 }
204 freeByteBase := s.freeindex &^ (64 - 1)
205 whichByte := freeByteBase / 8
206
207 s.refillAllocCache(whichByte)
208
209
210
211 s.allocCache >>= s.freeindex % 64
212
213 return s
214 }
215
216
217
218
219 func (c *mcentral) oldCacheSpan() *mspan {
220
221 spanBytes := uintptr(class_to_allocnpages[c.spanclass.sizeclass()]) * _PageSize
222 deductSweepCredit(spanBytes, 0)
223
224 lock(&c.lock)
225 traceDone := false
226 if trace.enabled {
227 traceGCSweepStart()
228 }
229 sg := mheap_.sweepgen
230 retry:
231 var s *mspan
232 for s = c.nonempty.first; s != nil; s = s.next {
233 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
234 c.nonempty.remove(s)
235 c.empty.insertBack(s)
236 unlock(&c.lock)
237 s.sweep(true)
238 goto havespan
239 }
240 if s.sweepgen == sg-1 {
241
242 continue
243 }
244
245 c.nonempty.remove(s)
246 c.empty.insertBack(s)
247 unlock(&c.lock)
248 goto havespan
249 }
250
251 for s = c.empty.first; s != nil; s = s.next {
252 if s.sweepgen == sg-2 && atomic.Cas(&s.sweepgen, sg-2, sg-1) {
253
254
255 c.empty.remove(s)
256
257 c.empty.insertBack(s)
258 unlock(&c.lock)
259 s.sweep(true)
260 freeIndex := s.nextFreeIndex()
261 if freeIndex != s.nelems {
262 s.freeindex = freeIndex
263 goto havespan
264 }
265 lock(&c.lock)
266
267
268 goto retry
269 }
270 if s.sweepgen == sg-1 {
271
272 continue
273 }
274
275
276 break
277 }
278 if trace.enabled {
279 traceGCSweepDone()
280 traceDone = true
281 }
282 unlock(&c.lock)
283
284
285 s = c.grow()
286 if s == nil {
287 return nil
288 }
289 lock(&c.lock)
290 c.empty.insertBack(s)
291 unlock(&c.lock)
292
293
294
295 havespan:
296 if trace.enabled && !traceDone {
297 traceGCSweepDone()
298 }
299 n := int(s.nelems) - int(s.allocCount)
300 if n == 0 || s.freeindex == s.nelems || uintptr(s.allocCount) == s.nelems {
301 throw("span has no free objects")
302 }
303
304
305 atomic.Xadd64(&c.nmalloc, int64(n))
306 usedBytes := uintptr(s.allocCount) * s.elemsize
307 atomic.Xadd64(&memstats.heap_live, int64(spanBytes)-int64(usedBytes))
308 if trace.enabled {
309
310 traceHeapAlloc()
311 }
312 if gcBlackenEnabled != 0 {
313
314 gcController.revise()
315 }
316 freeByteBase := s.freeindex &^ (64 - 1)
317 whichByte := freeByteBase / 8
318
319 s.refillAllocCache(whichByte)
320
321
322
323 s.allocCache >>= s.freeindex % 64
324
325 return s
326 }
327
328
329
330
331
332 func (c *mcentral) uncacheSpan(s *mspan) {
333 if !go115NewMCentralImpl {
334 c.oldUncacheSpan(s)
335 return
336 }
337 if s.allocCount == 0 {
338 throw("uncaching span but s.allocCount == 0")
339 }
340
341 sg := mheap_.sweepgen
342 stale := s.sweepgen == sg+1
343
344
345 if stale {
346
347
348
349
350
351
352 atomic.Store(&s.sweepgen, sg-1)
353 } else {
354
355 atomic.Store(&s.sweepgen, sg)
356 }
357 n := int(s.nelems) - int(s.allocCount)
358
359
360 if n > 0 {
361
362
363
364
365 atomic.Xadd64(&c.nmalloc, -int64(n))
366
367 if !stale {
368
369
370
371
372
373
374
375 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
376 }
377 }
378
379
380 if stale {
381
382
383 s.sweep(false)
384 } else {
385 if n > 0 {
386
387 c.partialSwept(sg).push(s)
388 } else {
389
390
391 c.fullSwept(sg).push(s)
392 }
393 }
394 }
395
396
397
398
399 func (c *mcentral) oldUncacheSpan(s *mspan) {
400 if s.allocCount == 0 {
401 throw("uncaching span but s.allocCount == 0")
402 }
403
404 sg := mheap_.sweepgen
405 stale := s.sweepgen == sg+1
406 if stale {
407
408
409
410
411
412
413 atomic.Store(&s.sweepgen, sg-1)
414 } else {
415
416 atomic.Store(&s.sweepgen, sg)
417 }
418
419 n := int(s.nelems) - int(s.allocCount)
420 if n > 0 {
421
422
423
424
425 atomic.Xadd64(&c.nmalloc, -int64(n))
426
427 lock(&c.lock)
428 c.empty.remove(s)
429 c.nonempty.insert(s)
430 if !stale {
431
432
433
434
435
436
437
438 atomic.Xadd64(&memstats.heap_live, -int64(n)*int64(s.elemsize))
439 }
440 unlock(&c.lock)
441 }
442
443 if stale {
444
445
446 s.sweep(false)
447 }
448 }
449
450
451
452
453
454
455
456
457
458
459
460 func (c *mcentral) freeSpan(s *mspan, preserve bool, wasempty bool) bool {
461 if sg := mheap_.sweepgen; s.sweepgen == sg+1 || s.sweepgen == sg+3 {
462 throw("freeSpan given cached span")
463 }
464 s.needzero = 1
465
466 if preserve {
467
468
469 if !s.inList() {
470 throw("can't preserve unlinked span")
471 }
472 atomic.Store(&s.sweepgen, mheap_.sweepgen)
473 return false
474 }
475
476 lock(&c.lock)
477
478
479 if wasempty {
480 c.empty.remove(s)
481 c.nonempty.insert(s)
482 }
483
484
485
486
487
488 atomic.Store(&s.sweepgen, mheap_.sweepgen)
489
490 if s.allocCount != 0 {
491 unlock(&c.lock)
492 return false
493 }
494
495 c.nonempty.remove(s)
496 unlock(&c.lock)
497 mheap_.freeSpan(s)
498 return true
499 }
500
501
502 func (c *mcentral) grow() *mspan {
503 npages := uintptr(class_to_allocnpages[c.spanclass.sizeclass()])
504 size := uintptr(class_to_size[c.spanclass.sizeclass()])
505
506 s := mheap_.alloc(npages, c.spanclass, true)
507 if s == nil {
508 return nil
509 }
510
511
512
513 n := (npages << _PageShift) >> s.divShift * uintptr(s.divMul) >> s.divShift2
514 s.limit = s.base() + size*n
515 heapBitsForAddr(s.base()).initSpan(s)
516 return s
517 }
518
View as plain text