Source file
src/reflect/type.go
Documentation: reflect
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16 package reflect
17
18 import (
19 "runtime"
20 "strconv"
21 "sync"
22 "unicode"
23 "unicode/utf8"
24 "unsafe"
25 )
26
27
28
29
30
31
32
33
34
35
36
37
38 type Type interface {
39
40
41
42
43 Align() int
44
45
46
47 FieldAlign() int
48
49
50
51
52
53
54
55
56
57 Method(int) Method
58
59
60
61
62
63
64
65
66
67 MethodByName(string) (Method, bool)
68
69
70 NumMethod() int
71
72
73
74 Name() string
75
76
77
78
79
80
81 PkgPath() string
82
83
84
85 Size() uintptr
86
87
88
89
90
91
92 String() string
93
94
95 Kind() Kind
96
97
98 Implements(u Type) bool
99
100
101 AssignableTo(u Type) bool
102
103
104 ConvertibleTo(u Type) bool
105
106
107 Comparable() bool
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124 Bits() int
125
126
127
128 ChanDir() ChanDir
129
130
131
132
133
134
135
136
137
138
139
140
141
142 IsVariadic() bool
143
144
145
146 Elem() Type
147
148
149
150
151 Field(i int) StructField
152
153
154
155
156
157 FieldByIndex(index []int) StructField
158
159
160
161 FieldByName(name string) (StructField, bool)
162
163
164
165
166
167
168
169
170
171
172
173
174
175 FieldByNameFunc(match func(string) bool) (StructField, bool)
176
177
178
179
180 In(i int) Type
181
182
183
184 Key() Type
185
186
187
188 Len() int
189
190
191
192 NumField() int
193
194
195
196 NumIn() int
197
198
199
200 NumOut() int
201
202
203
204
205 Out(i int) Type
206
207 common() *rtype
208 uncommon() *uncommonType
209 }
210
211
212
213
214
215
216
217
218
219
224
225
226
227 type Kind uint
228
229 const (
230 Invalid Kind = iota
231 Bool
232 Int
233 Int8
234 Int16
235 Int32
236 Int64
237 Uint
238 Uint8
239 Uint16
240 Uint32
241 Uint64
242 Uintptr
243 Float32
244 Float64
245 Complex64
246 Complex128
247 Array
248 Chan
249 Func
250 Interface
251 Map
252 Ptr
253 Slice
254 String
255 Struct
256 UnsafePointer
257 )
258
259
260
261
262
263
264
265
266 type tflag uint8
267
268 const (
269
270
271
272
273
274
275
276
277
278
279
280 tflagUncommon tflag = 1 << 0
281
282
283
284
285
286 tflagExtraStar tflag = 1 << 1
287
288
289 tflagNamed tflag = 1 << 2
290 )
291
292
293
294
295
296 type rtype struct {
297 size uintptr
298 ptrdata uintptr
299 hash uint32
300 tflag tflag
301 align uint8
302 fieldAlign uint8
303 kind uint8
304 alg *typeAlg
305 gcdata *byte
306 str nameOff
307 ptrToThis typeOff
308 }
309
310
311 type typeAlg struct {
312
313
314 hash func(unsafe.Pointer, uintptr) uintptr
315
316
317 equal func(unsafe.Pointer, unsafe.Pointer) bool
318 }
319
320
321 type method struct {
322 name nameOff
323 mtyp typeOff
324 ifn textOff
325 tfn textOff
326 }
327
328
329
330
331
332 type uncommonType struct {
333 pkgPath nameOff
334 mcount uint16
335 xcount uint16
336 moff uint32
337 _ uint32
338 }
339
340
341 type ChanDir int
342
343 const (
344 RecvDir ChanDir = 1 << iota
345 SendDir
346 BothDir = RecvDir | SendDir
347 )
348
349
350 type arrayType struct {
351 rtype
352 elem *rtype
353 slice *rtype
354 len uintptr
355 }
356
357
358 type chanType struct {
359 rtype
360 elem *rtype
361 dir uintptr
362 }
363
364
365
366
367
368
369
370
371
372
373
374
375 type funcType struct {
376 rtype
377 inCount uint16
378 outCount uint16
379 }
380
381
382 type imethod struct {
383 name nameOff
384 typ typeOff
385 }
386
387
388 type interfaceType struct {
389 rtype
390 pkgPath name
391 methods []imethod
392 }
393
394
395 type mapType struct {
396 rtype
397 key *rtype
398 elem *rtype
399 bucket *rtype
400 keysize uint8
401 indirectkey uint8
402 valuesize uint8
403 indirectvalue uint8
404 bucketsize uint16
405 reflexivekey bool
406 needkeyupdate bool
407 }
408
409
410 type ptrType struct {
411 rtype
412 elem *rtype
413 }
414
415
416 type sliceType struct {
417 rtype
418 elem *rtype
419 }
420
421
422 type structField struct {
423 name name
424 typ *rtype
425 offsetEmbed uintptr
426 }
427
428 func (f *structField) offset() uintptr {
429 return f.offsetEmbed >> 1
430 }
431
432 func (f *structField) embedded() bool {
433 return f.offsetEmbed&1 != 0
434 }
435
436
437 type structType struct {
438 rtype
439 pkgPath name
440 fields []structField
441 }
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466 type name struct {
467 bytes *byte
468 }
469
470 func (n name) data(off int, whySafe string) *byte {
471 return (*byte)(add(unsafe.Pointer(n.bytes), uintptr(off), whySafe))
472 }
473
474 func (n name) isExported() bool {
475 return (*n.bytes)&(1<<0) != 0
476 }
477
478 func (n name) nameLen() int {
479 return int(uint16(*n.data(1, "name len field"))<<8 | uint16(*n.data(2, "name len field")))
480 }
481
482 func (n name) tagLen() int {
483 if *n.data(0, "name flag field")&(1<<1) == 0 {
484 return 0
485 }
486 off := 3 + n.nameLen()
487 return int(uint16(*n.data(off, "name taglen field"))<<8 | uint16(*n.data(off+1, "name taglen field")))
488 }
489
490 func (n name) name() (s string) {
491 if n.bytes == nil {
492 return
493 }
494 b := (*[4]byte)(unsafe.Pointer(n.bytes))
495
496 hdr := (*stringHeader)(unsafe.Pointer(&s))
497 hdr.Data = unsafe.Pointer(&b[3])
498 hdr.Len = int(b[1])<<8 | int(b[2])
499 return s
500 }
501
502 func (n name) tag() (s string) {
503 tl := n.tagLen()
504 if tl == 0 {
505 return ""
506 }
507 nl := n.nameLen()
508 hdr := (*stringHeader)(unsafe.Pointer(&s))
509 hdr.Data = unsafe.Pointer(n.data(3+nl+2, "non-empty string"))
510 hdr.Len = tl
511 return s
512 }
513
514 func (n name) pkgPath() string {
515 if n.bytes == nil || *n.data(0, "name flag field")&(1<<2) == 0 {
516 return ""
517 }
518 off := 3 + n.nameLen()
519 if tl := n.tagLen(); tl > 0 {
520 off += 2 + tl
521 }
522 var nameOff int32
523
524
525 copy((*[4]byte)(unsafe.Pointer(&nameOff))[:], (*[4]byte)(unsafe.Pointer(n.data(off, "name offset field")))[:])
526 pkgPathName := name{(*byte)(resolveTypeOff(unsafe.Pointer(n.bytes), nameOff))}
527 return pkgPathName.name()
528 }
529
530
531 func round(n, a uintptr) uintptr {
532 return (n + a - 1) &^ (a - 1)
533 }
534
535 func newName(n, tag string, exported bool) name {
536 if len(n) > 1<<16-1 {
537 panic("reflect.nameFrom: name too long: " + n)
538 }
539 if len(tag) > 1<<16-1 {
540 panic("reflect.nameFrom: tag too long: " + tag)
541 }
542
543 var bits byte
544 l := 1 + 2 + len(n)
545 if exported {
546 bits |= 1 << 0
547 }
548 if len(tag) > 0 {
549 l += 2 + len(tag)
550 bits |= 1 << 1
551 }
552
553 b := make([]byte, l)
554 b[0] = bits
555 b[1] = uint8(len(n) >> 8)
556 b[2] = uint8(len(n))
557 copy(b[3:], n)
558 if len(tag) > 0 {
559 tb := b[3+len(n):]
560 tb[0] = uint8(len(tag) >> 8)
561 tb[1] = uint8(len(tag))
562 copy(tb[2:], tag)
563 }
564
565 return name{bytes: &b[0]}
566 }
567
568
572
573
574 type Method struct {
575
576
577
578
579
580
581 Name string
582 PkgPath string
583
584 Type Type
585 Func Value
586 Index int
587 }
588
589 const (
590 kindDirectIface = 1 << 5
591 kindGCProg = 1 << 6
592 kindNoPointers = 1 << 7
593 kindMask = (1 << 5) - 1
594 )
595
596 func (k Kind) String() string {
597 if int(k) < len(kindNames) {
598 return kindNames[k]
599 }
600 return "kind" + strconv.Itoa(int(k))
601 }
602
603 var kindNames = []string{
604 Invalid: "invalid",
605 Bool: "bool",
606 Int: "int",
607 Int8: "int8",
608 Int16: "int16",
609 Int32: "int32",
610 Int64: "int64",
611 Uint: "uint",
612 Uint8: "uint8",
613 Uint16: "uint16",
614 Uint32: "uint32",
615 Uint64: "uint64",
616 Uintptr: "uintptr",
617 Float32: "float32",
618 Float64: "float64",
619 Complex64: "complex64",
620 Complex128: "complex128",
621 Array: "array",
622 Chan: "chan",
623 Func: "func",
624 Interface: "interface",
625 Map: "map",
626 Ptr: "ptr",
627 Slice: "slice",
628 String: "string",
629 Struct: "struct",
630 UnsafePointer: "unsafe.Pointer",
631 }
632
633 func (t *uncommonType) methods() []method {
634 if t.mcount == 0 {
635 return nil
636 }
637 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.mcount > 0"))[:t.mcount:t.mcount]
638 }
639
640 func (t *uncommonType) exportedMethods() []method {
641 if t.xcount == 0 {
642 return nil
643 }
644 return (*[1 << 16]method)(add(unsafe.Pointer(t), uintptr(t.moff), "t.xcount > 0"))[:t.xcount:t.xcount]
645 }
646
647
648
649
650 func resolveNameOff(ptrInModule unsafe.Pointer, off int32) unsafe.Pointer
651
652
653
654
655 func resolveTypeOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
656
657
658
659
660 func resolveTextOff(rtype unsafe.Pointer, off int32) unsafe.Pointer
661
662
663
664
665 func addReflectOff(ptr unsafe.Pointer) int32
666
667
668
669 func resolveReflectName(n name) nameOff {
670 return nameOff(addReflectOff(unsafe.Pointer(n.bytes)))
671 }
672
673
674
675 func resolveReflectType(t *rtype) typeOff {
676 return typeOff(addReflectOff(unsafe.Pointer(t)))
677 }
678
679
680
681
682 func resolveReflectText(ptr unsafe.Pointer) textOff {
683 return textOff(addReflectOff(ptr))
684 }
685
686 type nameOff int32
687 type typeOff int32
688 type textOff int32
689
690 func (t *rtype) nameOff(off nameOff) name {
691 return name{(*byte)(resolveNameOff(unsafe.Pointer(t), int32(off)))}
692 }
693
694 func (t *rtype) typeOff(off typeOff) *rtype {
695 return (*rtype)(resolveTypeOff(unsafe.Pointer(t), int32(off)))
696 }
697
698 func (t *rtype) textOff(off textOff) unsafe.Pointer {
699 return resolveTextOff(unsafe.Pointer(t), int32(off))
700 }
701
702 func (t *rtype) uncommon() *uncommonType {
703 if t.tflag&tflagUncommon == 0 {
704 return nil
705 }
706 switch t.Kind() {
707 case Struct:
708 return &(*structTypeUncommon)(unsafe.Pointer(t)).u
709 case Ptr:
710 type u struct {
711 ptrType
712 u uncommonType
713 }
714 return &(*u)(unsafe.Pointer(t)).u
715 case Func:
716 type u struct {
717 funcType
718 u uncommonType
719 }
720 return &(*u)(unsafe.Pointer(t)).u
721 case Slice:
722 type u struct {
723 sliceType
724 u uncommonType
725 }
726 return &(*u)(unsafe.Pointer(t)).u
727 case Array:
728 type u struct {
729 arrayType
730 u uncommonType
731 }
732 return &(*u)(unsafe.Pointer(t)).u
733 case Chan:
734 type u struct {
735 chanType
736 u uncommonType
737 }
738 return &(*u)(unsafe.Pointer(t)).u
739 case Map:
740 type u struct {
741 mapType
742 u uncommonType
743 }
744 return &(*u)(unsafe.Pointer(t)).u
745 case Interface:
746 type u struct {
747 interfaceType
748 u uncommonType
749 }
750 return &(*u)(unsafe.Pointer(t)).u
751 default:
752 type u struct {
753 rtype
754 u uncommonType
755 }
756 return &(*u)(unsafe.Pointer(t)).u
757 }
758 }
759
760 func (t *rtype) String() string {
761 s := t.nameOff(t.str).name()
762 if t.tflag&tflagExtraStar != 0 {
763 return s[1:]
764 }
765 return s
766 }
767
768 func (t *rtype) Size() uintptr { return t.size }
769
770 func (t *rtype) Bits() int {
771 if t == nil {
772 panic("reflect: Bits of nil Type")
773 }
774 k := t.Kind()
775 if k < Int || k > Complex128 {
776 panic("reflect: Bits of non-arithmetic Type " + t.String())
777 }
778 return int(t.size) * 8
779 }
780
781 func (t *rtype) Align() int { return int(t.align) }
782
783 func (t *rtype) FieldAlign() int { return int(t.fieldAlign) }
784
785 func (t *rtype) Kind() Kind { return Kind(t.kind & kindMask) }
786
787 func (t *rtype) pointers() bool { return t.kind&kindNoPointers == 0 }
788
789 func (t *rtype) common() *rtype { return t }
790
791 func (t *rtype) exportedMethods() []method {
792 ut := t.uncommon()
793 if ut == nil {
794 return nil
795 }
796 return ut.exportedMethods()
797 }
798
799 func (t *rtype) NumMethod() int {
800 if t.Kind() == Interface {
801 tt := (*interfaceType)(unsafe.Pointer(t))
802 return tt.NumMethod()
803 }
804 return len(t.exportedMethods())
805 }
806
807 func (t *rtype) Method(i int) (m Method) {
808 if t.Kind() == Interface {
809 tt := (*interfaceType)(unsafe.Pointer(t))
810 return tt.Method(i)
811 }
812 methods := t.exportedMethods()
813 if i < 0 || i >= len(methods) {
814 panic("reflect: Method index out of range")
815 }
816 p := methods[i]
817 pname := t.nameOff(p.name)
818 m.Name = pname.name()
819 fl := flag(Func)
820 mtyp := t.typeOff(p.mtyp)
821 ft := (*funcType)(unsafe.Pointer(mtyp))
822 in := make([]Type, 0, 1+len(ft.in()))
823 in = append(in, t)
824 for _, arg := range ft.in() {
825 in = append(in, arg)
826 }
827 out := make([]Type, 0, len(ft.out()))
828 for _, ret := range ft.out() {
829 out = append(out, ret)
830 }
831 mt := FuncOf(in, out, ft.IsVariadic())
832 m.Type = mt
833 tfn := t.textOff(p.tfn)
834 fn := unsafe.Pointer(&tfn)
835 m.Func = Value{mt.(*rtype), fn, fl}
836
837 m.Index = i
838 return m
839 }
840
841 func (t *rtype) MethodByName(name string) (m Method, ok bool) {
842 if t.Kind() == Interface {
843 tt := (*interfaceType)(unsafe.Pointer(t))
844 return tt.MethodByName(name)
845 }
846 ut := t.uncommon()
847 if ut == nil {
848 return Method{}, false
849 }
850
851 for i, p := range ut.exportedMethods() {
852 if t.nameOff(p.name).name() == name {
853 return t.Method(i), true
854 }
855 }
856 return Method{}, false
857 }
858
859 func (t *rtype) PkgPath() string {
860 if t.tflag&tflagNamed == 0 {
861 return ""
862 }
863 ut := t.uncommon()
864 if ut == nil {
865 return ""
866 }
867 return t.nameOff(ut.pkgPath).name()
868 }
869
870 func hasPrefix(s, prefix string) bool {
871 return len(s) >= len(prefix) && s[:len(prefix)] == prefix
872 }
873
874 func (t *rtype) Name() string {
875 if t.tflag&tflagNamed == 0 {
876 return ""
877 }
878 s := t.String()
879 i := len(s) - 1
880 for i >= 0 {
881 if s[i] == '.' {
882 break
883 }
884 i--
885 }
886 return s[i+1:]
887 }
888
889 func (t *rtype) ChanDir() ChanDir {
890 if t.Kind() != Chan {
891 panic("reflect: ChanDir of non-chan type")
892 }
893 tt := (*chanType)(unsafe.Pointer(t))
894 return ChanDir(tt.dir)
895 }
896
897 func (t *rtype) IsVariadic() bool {
898 if t.Kind() != Func {
899 panic("reflect: IsVariadic of non-func type")
900 }
901 tt := (*funcType)(unsafe.Pointer(t))
902 return tt.outCount&(1<<15) != 0
903 }
904
905 func (t *rtype) Elem() Type {
906 switch t.Kind() {
907 case Array:
908 tt := (*arrayType)(unsafe.Pointer(t))
909 return toType(tt.elem)
910 case Chan:
911 tt := (*chanType)(unsafe.Pointer(t))
912 return toType(tt.elem)
913 case Map:
914 tt := (*mapType)(unsafe.Pointer(t))
915 return toType(tt.elem)
916 case Ptr:
917 tt := (*ptrType)(unsafe.Pointer(t))
918 return toType(tt.elem)
919 case Slice:
920 tt := (*sliceType)(unsafe.Pointer(t))
921 return toType(tt.elem)
922 }
923 panic("reflect: Elem of invalid type")
924 }
925
926 func (t *rtype) Field(i int) StructField {
927 if t.Kind() != Struct {
928 panic("reflect: Field of non-struct type")
929 }
930 tt := (*structType)(unsafe.Pointer(t))
931 return tt.Field(i)
932 }
933
934 func (t *rtype) FieldByIndex(index []int) StructField {
935 if t.Kind() != Struct {
936 panic("reflect: FieldByIndex of non-struct type")
937 }
938 tt := (*structType)(unsafe.Pointer(t))
939 return tt.FieldByIndex(index)
940 }
941
942 func (t *rtype) FieldByName(name string) (StructField, bool) {
943 if t.Kind() != Struct {
944 panic("reflect: FieldByName of non-struct type")
945 }
946 tt := (*structType)(unsafe.Pointer(t))
947 return tt.FieldByName(name)
948 }
949
950 func (t *rtype) FieldByNameFunc(match func(string) bool) (StructField, bool) {
951 if t.Kind() != Struct {
952 panic("reflect: FieldByNameFunc of non-struct type")
953 }
954 tt := (*structType)(unsafe.Pointer(t))
955 return tt.FieldByNameFunc(match)
956 }
957
958 func (t *rtype) In(i int) Type {
959 if t.Kind() != Func {
960 panic("reflect: In of non-func type")
961 }
962 tt := (*funcType)(unsafe.Pointer(t))
963 return toType(tt.in()[i])
964 }
965
966 func (t *rtype) Key() Type {
967 if t.Kind() != Map {
968 panic("reflect: Key of non-map type")
969 }
970 tt := (*mapType)(unsafe.Pointer(t))
971 return toType(tt.key)
972 }
973
974 func (t *rtype) Len() int {
975 if t.Kind() != Array {
976 panic("reflect: Len of non-array type")
977 }
978 tt := (*arrayType)(unsafe.Pointer(t))
979 return int(tt.len)
980 }
981
982 func (t *rtype) NumField() int {
983 if t.Kind() != Struct {
984 panic("reflect: NumField of non-struct type")
985 }
986 tt := (*structType)(unsafe.Pointer(t))
987 return len(tt.fields)
988 }
989
990 func (t *rtype) NumIn() int {
991 if t.Kind() != Func {
992 panic("reflect: NumIn of non-func type")
993 }
994 tt := (*funcType)(unsafe.Pointer(t))
995 return int(tt.inCount)
996 }
997
998 func (t *rtype) NumOut() int {
999 if t.Kind() != Func {
1000 panic("reflect: NumOut of non-func type")
1001 }
1002 tt := (*funcType)(unsafe.Pointer(t))
1003 return len(tt.out())
1004 }
1005
1006 func (t *rtype) Out(i int) Type {
1007 if t.Kind() != Func {
1008 panic("reflect: Out of non-func type")
1009 }
1010 tt := (*funcType)(unsafe.Pointer(t))
1011 return toType(tt.out()[i])
1012 }
1013
1014 func (t *funcType) in() []*rtype {
1015 uadd := unsafe.Sizeof(*t)
1016 if t.tflag&tflagUncommon != 0 {
1017 uadd += unsafe.Sizeof(uncommonType{})
1018 }
1019 if t.inCount == 0 {
1020 return nil
1021 }
1022 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "t.inCount > 0"))[:t.inCount]
1023 }
1024
1025 func (t *funcType) out() []*rtype {
1026 uadd := unsafe.Sizeof(*t)
1027 if t.tflag&tflagUncommon != 0 {
1028 uadd += unsafe.Sizeof(uncommonType{})
1029 }
1030 outCount := t.outCount & (1<<15 - 1)
1031 if outCount == 0 {
1032 return nil
1033 }
1034 return (*[1 << 20]*rtype)(add(unsafe.Pointer(t), uadd, "outCount > 0"))[t.inCount : t.inCount+outCount]
1035 }
1036
1037
1038
1039
1040
1041
1042
1043
1044 func add(p unsafe.Pointer, x uintptr, whySafe string) unsafe.Pointer {
1045 return unsafe.Pointer(uintptr(p) + x)
1046 }
1047
1048 func (d ChanDir) String() string {
1049 switch d {
1050 case SendDir:
1051 return "chan<-"
1052 case RecvDir:
1053 return "<-chan"
1054 case BothDir:
1055 return "chan"
1056 }
1057 return "ChanDir" + strconv.Itoa(int(d))
1058 }
1059
1060
1061 func (t *interfaceType) Method(i int) (m Method) {
1062 if i < 0 || i >= len(t.methods) {
1063 return
1064 }
1065 p := &t.methods[i]
1066 pname := t.nameOff(p.name)
1067 m.Name = pname.name()
1068 if !pname.isExported() {
1069 m.PkgPath = pname.pkgPath()
1070 if m.PkgPath == "" {
1071 m.PkgPath = t.pkgPath.name()
1072 }
1073 }
1074 m.Type = toType(t.typeOff(p.typ))
1075 m.Index = i
1076 return
1077 }
1078
1079
1080 func (t *interfaceType) NumMethod() int { return len(t.methods) }
1081
1082
1083 func (t *interfaceType) MethodByName(name string) (m Method, ok bool) {
1084 if t == nil {
1085 return
1086 }
1087 var p *imethod
1088 for i := range t.methods {
1089 p = &t.methods[i]
1090 if t.nameOff(p.name).name() == name {
1091 return t.Method(i), true
1092 }
1093 }
1094 return
1095 }
1096
1097
1098 type StructField struct {
1099
1100 Name string
1101
1102
1103
1104 PkgPath string
1105
1106 Type Type
1107 Tag StructTag
1108 Offset uintptr
1109 Index []int
1110 Anonymous bool
1111 }
1112
1113
1114
1115
1116
1117
1118
1119
1120
1121 type StructTag string
1122
1123
1124
1125
1126
1127
1128 func (tag StructTag) Get(key string) string {
1129 v, _ := tag.Lookup(key)
1130 return v
1131 }
1132
1133
1134
1135
1136
1137
1138
1139 func (tag StructTag) Lookup(key string) (value string, ok bool) {
1140
1141
1142
1143 for tag != "" {
1144
1145 i := 0
1146 for i < len(tag) && tag[i] == ' ' {
1147 i++
1148 }
1149 tag = tag[i:]
1150 if tag == "" {
1151 break
1152 }
1153
1154
1155
1156
1157
1158 i = 0
1159 for i < len(tag) && tag[i] > ' ' && tag[i] != ':' && tag[i] != '"' && tag[i] != 0x7f {
1160 i++
1161 }
1162 if i == 0 || i+1 >= len(tag) || tag[i] != ':' || tag[i+1] != '"' {
1163 break
1164 }
1165 name := string(tag[:i])
1166 tag = tag[i+1:]
1167
1168
1169 i = 1
1170 for i < len(tag) && tag[i] != '"' {
1171 if tag[i] == '\\' {
1172 i++
1173 }
1174 i++
1175 }
1176 if i >= len(tag) {
1177 break
1178 }
1179 qvalue := string(tag[:i+1])
1180 tag = tag[i+1:]
1181
1182 if key == name {
1183 value, err := strconv.Unquote(qvalue)
1184 if err != nil {
1185 break
1186 }
1187 return value, true
1188 }
1189 }
1190 return "", false
1191 }
1192
1193
1194 func (t *structType) Field(i int) (f StructField) {
1195 if i < 0 || i >= len(t.fields) {
1196 panic("reflect: Field index out of bounds")
1197 }
1198 p := &t.fields[i]
1199 f.Type = toType(p.typ)
1200 f.Name = p.name.name()
1201 f.Anonymous = p.embedded()
1202 if !p.name.isExported() {
1203 f.PkgPath = t.pkgPath.name()
1204 }
1205 if tag := p.name.tag(); tag != "" {
1206 f.Tag = StructTag(tag)
1207 }
1208 f.Offset = p.offset()
1209
1210
1211
1212
1213
1214
1215
1216
1217 f.Index = []int{i}
1218 return
1219 }
1220
1221
1222
1223
1224
1225 func (t *structType) FieldByIndex(index []int) (f StructField) {
1226 f.Type = toType(&t.rtype)
1227 for i, x := range index {
1228 if i > 0 {
1229 ft := f.Type
1230 if ft.Kind() == Ptr && ft.Elem().Kind() == Struct {
1231 ft = ft.Elem()
1232 }
1233 f.Type = ft
1234 }
1235 f = f.Type.Field(x)
1236 }
1237 return
1238 }
1239
1240
1241 type fieldScan struct {
1242 typ *structType
1243 index []int
1244 }
1245
1246
1247
1248 func (t *structType) FieldByNameFunc(match func(string) bool) (result StructField, ok bool) {
1249
1250
1251
1252
1253
1254
1255
1256
1257 current := []fieldScan{}
1258 next := []fieldScan{{typ: t}}
1259
1260
1261
1262
1263
1264
1265
1266 var nextCount map[*structType]int
1267
1268
1269
1270
1271
1272
1273 visited := map[*structType]bool{}
1274
1275 for len(next) > 0 {
1276 current, next = next, current[:0]
1277 count := nextCount
1278 nextCount = nil
1279
1280
1281
1282
1283
1284 for _, scan := range current {
1285 t := scan.typ
1286 if visited[t] {
1287
1288
1289
1290 continue
1291 }
1292 visited[t] = true
1293 for i := range t.fields {
1294 f := &t.fields[i]
1295
1296 fname := f.name.name()
1297 var ntyp *rtype
1298 if f.embedded() {
1299
1300 ntyp = f.typ
1301 if ntyp.Kind() == Ptr {
1302 ntyp = ntyp.Elem().common()
1303 }
1304 }
1305
1306
1307 if match(fname) {
1308
1309 if count[t] > 1 || ok {
1310
1311 return StructField{}, false
1312 }
1313 result = t.Field(i)
1314 result.Index = nil
1315 result.Index = append(result.Index, scan.index...)
1316 result.Index = append(result.Index, i)
1317 ok = true
1318 continue
1319 }
1320
1321
1322
1323
1324 if ok || ntyp == nil || ntyp.Kind() != Struct {
1325 continue
1326 }
1327 styp := (*structType)(unsafe.Pointer(ntyp))
1328 if nextCount[styp] > 0 {
1329 nextCount[styp] = 2
1330 continue
1331 }
1332 if nextCount == nil {
1333 nextCount = map[*structType]int{}
1334 }
1335 nextCount[styp] = 1
1336 if count[t] > 1 {
1337 nextCount[styp] = 2
1338 }
1339 var index []int
1340 index = append(index, scan.index...)
1341 index = append(index, i)
1342 next = append(next, fieldScan{styp, index})
1343 }
1344 }
1345 if ok {
1346 break
1347 }
1348 }
1349 return
1350 }
1351
1352
1353
1354 func (t *structType) FieldByName(name string) (f StructField, present bool) {
1355
1356 hasEmbeds := false
1357 if name != "" {
1358 for i := range t.fields {
1359 tf := &t.fields[i]
1360 if tf.name.name() == name {
1361 return t.Field(i), true
1362 }
1363 if tf.embedded() {
1364 hasEmbeds = true
1365 }
1366 }
1367 }
1368 if !hasEmbeds {
1369 return
1370 }
1371 return t.FieldByNameFunc(func(s string) bool { return s == name })
1372 }
1373
1374
1375
1376 func TypeOf(i interface{}) Type {
1377 eface := *(*emptyInterface)(unsafe.Pointer(&i))
1378 return toType(eface.typ)
1379 }
1380
1381
1382 var ptrMap sync.Map
1383
1384
1385
1386 func PtrTo(t Type) Type {
1387 return t.(*rtype).ptrTo()
1388 }
1389
1390 func (t *rtype) ptrTo() *rtype {
1391 if t.ptrToThis != 0 {
1392 return t.typeOff(t.ptrToThis)
1393 }
1394
1395
1396 if pi, ok := ptrMap.Load(t); ok {
1397 return &pi.(*ptrType).rtype
1398 }
1399
1400
1401 s := "*" + t.String()
1402 for _, tt := range typesByString(s) {
1403 p := (*ptrType)(unsafe.Pointer(tt))
1404 if p.elem != t {
1405 continue
1406 }
1407 pi, _ := ptrMap.LoadOrStore(t, p)
1408 return &pi.(*ptrType).rtype
1409 }
1410
1411
1412
1413 var iptr interface{} = (*unsafe.Pointer)(nil)
1414 prototype := *(**ptrType)(unsafe.Pointer(&iptr))
1415 pp := *prototype
1416
1417 pp.str = resolveReflectName(newName(s, "", false))
1418 pp.ptrToThis = 0
1419
1420
1421
1422
1423
1424
1425 pp.hash = fnv1(t.hash, '*')
1426
1427 pp.elem = t
1428
1429 pi, _ := ptrMap.LoadOrStore(t, &pp)
1430 return &pi.(*ptrType).rtype
1431 }
1432
1433
1434 func fnv1(x uint32, list ...byte) uint32 {
1435 for _, b := range list {
1436 x = x*16777619 ^ uint32(b)
1437 }
1438 return x
1439 }
1440
1441 func (t *rtype) Implements(u Type) bool {
1442 if u == nil {
1443 panic("reflect: nil type passed to Type.Implements")
1444 }
1445 if u.Kind() != Interface {
1446 panic("reflect: non-interface type passed to Type.Implements")
1447 }
1448 return implements(u.(*rtype), t)
1449 }
1450
1451 func (t *rtype) AssignableTo(u Type) bool {
1452 if u == nil {
1453 panic("reflect: nil type passed to Type.AssignableTo")
1454 }
1455 uu := u.(*rtype)
1456 return directlyAssignable(uu, t) || implements(uu, t)
1457 }
1458
1459 func (t *rtype) ConvertibleTo(u Type) bool {
1460 if u == nil {
1461 panic("reflect: nil type passed to Type.ConvertibleTo")
1462 }
1463 uu := u.(*rtype)
1464 return convertOp(uu, t) != nil
1465 }
1466
1467 func (t *rtype) Comparable() bool {
1468 return t.alg != nil && t.alg.equal != nil
1469 }
1470
1471
1472 func implements(T, V *rtype) bool {
1473 if T.Kind() != Interface {
1474 return false
1475 }
1476 t := (*interfaceType)(unsafe.Pointer(T))
1477 if len(t.methods) == 0 {
1478 return true
1479 }
1480
1481
1482
1483
1484
1485
1486
1487
1488
1489
1490
1491
1492
1493 if V.Kind() == Interface {
1494 v := (*interfaceType)(unsafe.Pointer(V))
1495 i := 0
1496 for j := 0; j < len(v.methods); j++ {
1497 tm := &t.methods[i]
1498 tmName := t.nameOff(tm.name)
1499 vm := &v.methods[j]
1500 vmName := V.nameOff(vm.name)
1501 if vmName.name() == tmName.name() && V.typeOff(vm.typ) == t.typeOff(tm.typ) {
1502 if !tmName.isExported() {
1503 tmPkgPath := tmName.pkgPath()
1504 if tmPkgPath == "" {
1505 tmPkgPath = t.pkgPath.name()
1506 }
1507 vmPkgPath := vmName.pkgPath()
1508 if vmPkgPath == "" {
1509 vmPkgPath = v.pkgPath.name()
1510 }
1511 if tmPkgPath != vmPkgPath {
1512 continue
1513 }
1514 }
1515 if i++; i >= len(t.methods) {
1516 return true
1517 }
1518 }
1519 }
1520 return false
1521 }
1522
1523 v := V.uncommon()
1524 if v == nil {
1525 return false
1526 }
1527 i := 0
1528 vmethods := v.methods()
1529 for j := 0; j < int(v.mcount); j++ {
1530 tm := &t.methods[i]
1531 tmName := t.nameOff(tm.name)
1532 vm := vmethods[j]
1533 vmName := V.nameOff(vm.name)
1534 if vmName.name() == tmName.name() && V.typeOff(vm.mtyp) == t.typeOff(tm.typ) {
1535 if !tmName.isExported() {
1536 tmPkgPath := tmName.pkgPath()
1537 if tmPkgPath == "" {
1538 tmPkgPath = t.pkgPath.name()
1539 }
1540 vmPkgPath := vmName.pkgPath()
1541 if vmPkgPath == "" {
1542 vmPkgPath = V.nameOff(v.pkgPath).name()
1543 }
1544 if tmPkgPath != vmPkgPath {
1545 continue
1546 }
1547 }
1548 if i++; i >= len(t.methods) {
1549 return true
1550 }
1551 }
1552 }
1553 return false
1554 }
1555
1556
1557
1558
1559
1560
1561 func directlyAssignable(T, V *rtype) bool {
1562
1563 if T == V {
1564 return true
1565 }
1566
1567
1568
1569 if T.Name() != "" && V.Name() != "" || T.Kind() != V.Kind() {
1570 return false
1571 }
1572
1573
1574 return haveIdenticalUnderlyingType(T, V, true)
1575 }
1576
1577 func haveIdenticalType(T, V Type, cmpTags bool) bool {
1578 if cmpTags {
1579 return T == V
1580 }
1581
1582 if T.Name() != V.Name() || T.Kind() != V.Kind() {
1583 return false
1584 }
1585
1586 return haveIdenticalUnderlyingType(T.common(), V.common(), false)
1587 }
1588
1589 func haveIdenticalUnderlyingType(T, V *rtype, cmpTags bool) bool {
1590 if T == V {
1591 return true
1592 }
1593
1594 kind := T.Kind()
1595 if kind != V.Kind() {
1596 return false
1597 }
1598
1599
1600
1601 if Bool <= kind && kind <= Complex128 || kind == String || kind == UnsafePointer {
1602 return true
1603 }
1604
1605
1606 switch kind {
1607 case Array:
1608 return T.Len() == V.Len() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1609
1610 case Chan:
1611
1612
1613
1614 if V.ChanDir() == BothDir && haveIdenticalType(T.Elem(), V.Elem(), cmpTags) {
1615 return true
1616 }
1617
1618
1619 return V.ChanDir() == T.ChanDir() && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1620
1621 case Func:
1622 t := (*funcType)(unsafe.Pointer(T))
1623 v := (*funcType)(unsafe.Pointer(V))
1624 if t.outCount != v.outCount || t.inCount != v.inCount {
1625 return false
1626 }
1627 for i := 0; i < t.NumIn(); i++ {
1628 if !haveIdenticalType(t.In(i), v.In(i), cmpTags) {
1629 return false
1630 }
1631 }
1632 for i := 0; i < t.NumOut(); i++ {
1633 if !haveIdenticalType(t.Out(i), v.Out(i), cmpTags) {
1634 return false
1635 }
1636 }
1637 return true
1638
1639 case Interface:
1640 t := (*interfaceType)(unsafe.Pointer(T))
1641 v := (*interfaceType)(unsafe.Pointer(V))
1642 if len(t.methods) == 0 && len(v.methods) == 0 {
1643 return true
1644 }
1645
1646
1647 return false
1648
1649 case Map:
1650 return haveIdenticalType(T.Key(), V.Key(), cmpTags) && haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1651
1652 case Ptr, Slice:
1653 return haveIdenticalType(T.Elem(), V.Elem(), cmpTags)
1654
1655 case Struct:
1656 t := (*structType)(unsafe.Pointer(T))
1657 v := (*structType)(unsafe.Pointer(V))
1658 if len(t.fields) != len(v.fields) {
1659 return false
1660 }
1661 if t.pkgPath.name() != v.pkgPath.name() {
1662 return false
1663 }
1664 for i := range t.fields {
1665 tf := &t.fields[i]
1666 vf := &v.fields[i]
1667 if tf.name.name() != vf.name.name() {
1668 return false
1669 }
1670 if !haveIdenticalType(tf.typ, vf.typ, cmpTags) {
1671 return false
1672 }
1673 if cmpTags && tf.name.tag() != vf.name.tag() {
1674 return false
1675 }
1676 if tf.offsetEmbed != vf.offsetEmbed {
1677 return false
1678 }
1679 }
1680 return true
1681 }
1682
1683 return false
1684 }
1685
1686
1687
1688
1689
1690
1691
1692
1693
1694
1695
1696
1697
1698
1699
1700
1701
1702
1703
1704
1705 func typelinks() (sections []unsafe.Pointer, offset [][]int32)
1706
1707 func rtypeOff(section unsafe.Pointer, off int32) *rtype {
1708 return (*rtype)(add(section, uintptr(off), "sizeof(rtype) > 0"))
1709 }
1710
1711
1712
1713
1714
1715 func typesByString(s string) []*rtype {
1716 sections, offset := typelinks()
1717 var ret []*rtype
1718
1719 for offsI, offs := range offset {
1720 section := sections[offsI]
1721
1722
1723
1724 i, j := 0, len(offs)
1725 for i < j {
1726 h := i + (j-i)/2
1727
1728 if !(rtypeOff(section, offs[h]).String() >= s) {
1729 i = h + 1
1730 } else {
1731 j = h
1732 }
1733 }
1734
1735
1736
1737
1738
1739 for j := i; j < len(offs); j++ {
1740 typ := rtypeOff(section, offs[j])
1741 if typ.String() != s {
1742 break
1743 }
1744 ret = append(ret, typ)
1745 }
1746 }
1747 return ret
1748 }
1749
1750
1751 var lookupCache sync.Map
1752
1753
1754
1755
1756 type cacheKey struct {
1757 kind Kind
1758 t1 *rtype
1759 t2 *rtype
1760 extra uintptr
1761 }
1762
1763
1764
1765
1766 var funcLookupCache struct {
1767 sync.Mutex
1768
1769
1770
1771 m sync.Map
1772 }
1773
1774
1775
1776
1777
1778
1779 func ChanOf(dir ChanDir, t Type) Type {
1780 typ := t.(*rtype)
1781
1782
1783 ckey := cacheKey{Chan, typ, nil, uintptr(dir)}
1784 if ch, ok := lookupCache.Load(ckey); ok {
1785 return ch.(*rtype)
1786 }
1787
1788
1789 if typ.size >= 1<<16 {
1790 panic("reflect.ChanOf: element size too large")
1791 }
1792
1793
1794
1795 var s string
1796 switch dir {
1797 default:
1798 panic("reflect.ChanOf: invalid dir")
1799 case SendDir:
1800 s = "chan<- " + typ.String()
1801 case RecvDir:
1802 s = "<-chan " + typ.String()
1803 case BothDir:
1804 s = "chan " + typ.String()
1805 }
1806 for _, tt := range typesByString(s) {
1807 ch := (*chanType)(unsafe.Pointer(tt))
1808 if ch.elem == typ && ch.dir == uintptr(dir) {
1809 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1810 return ti.(Type)
1811 }
1812 }
1813
1814
1815 var ichan interface{} = (chan unsafe.Pointer)(nil)
1816 prototype := *(**chanType)(unsafe.Pointer(&ichan))
1817 ch := *prototype
1818 ch.tflag = 0
1819 ch.dir = uintptr(dir)
1820 ch.str = resolveReflectName(newName(s, "", false))
1821 ch.hash = fnv1(typ.hash, 'c', byte(dir))
1822 ch.elem = typ
1823
1824 ti, _ := lookupCache.LoadOrStore(ckey, &ch.rtype)
1825 return ti.(Type)
1826 }
1827
1828 func ismapkey(*rtype) bool
1829
1830
1831
1832
1833
1834
1835
1836 func MapOf(key, elem Type) Type {
1837 ktyp := key.(*rtype)
1838 etyp := elem.(*rtype)
1839
1840 if !ismapkey(ktyp) {
1841 panic("reflect.MapOf: invalid key type " + ktyp.String())
1842 }
1843
1844
1845 ckey := cacheKey{Map, ktyp, etyp, 0}
1846 if mt, ok := lookupCache.Load(ckey); ok {
1847 return mt.(Type)
1848 }
1849
1850
1851 s := "map[" + ktyp.String() + "]" + etyp.String()
1852 for _, tt := range typesByString(s) {
1853 mt := (*mapType)(unsafe.Pointer(tt))
1854 if mt.key == ktyp && mt.elem == etyp {
1855 ti, _ := lookupCache.LoadOrStore(ckey, tt)
1856 return ti.(Type)
1857 }
1858 }
1859
1860
1861 var imap interface{} = (map[unsafe.Pointer]unsafe.Pointer)(nil)
1862 mt := **(**mapType)(unsafe.Pointer(&imap))
1863 mt.str = resolveReflectName(newName(s, "", false))
1864 mt.tflag = 0
1865 mt.hash = fnv1(etyp.hash, 'm', byte(ktyp.hash>>24), byte(ktyp.hash>>16), byte(ktyp.hash>>8), byte(ktyp.hash))
1866 mt.key = ktyp
1867 mt.elem = etyp
1868 mt.bucket = bucketOf(ktyp, etyp)
1869 if ktyp.size > maxKeySize {
1870 mt.keysize = uint8(ptrSize)
1871 mt.indirectkey = 1
1872 } else {
1873 mt.keysize = uint8(ktyp.size)
1874 mt.indirectkey = 0
1875 }
1876 if etyp.size > maxValSize {
1877 mt.valuesize = uint8(ptrSize)
1878 mt.indirectvalue = 1
1879 } else {
1880 mt.valuesize = uint8(etyp.size)
1881 mt.indirectvalue = 0
1882 }
1883 mt.bucketsize = uint16(mt.bucket.size)
1884 mt.reflexivekey = isReflexive(ktyp)
1885 mt.needkeyupdate = needKeyUpdate(ktyp)
1886 mt.ptrToThis = 0
1887
1888 ti, _ := lookupCache.LoadOrStore(ckey, &mt.rtype)
1889 return ti.(Type)
1890 }
1891
1892 type funcTypeFixed4 struct {
1893 funcType
1894 args [4]*rtype
1895 }
1896 type funcTypeFixed8 struct {
1897 funcType
1898 args [8]*rtype
1899 }
1900 type funcTypeFixed16 struct {
1901 funcType
1902 args [16]*rtype
1903 }
1904 type funcTypeFixed32 struct {
1905 funcType
1906 args [32]*rtype
1907 }
1908 type funcTypeFixed64 struct {
1909 funcType
1910 args [64]*rtype
1911 }
1912 type funcTypeFixed128 struct {
1913 funcType
1914 args [128]*rtype
1915 }
1916
1917
1918
1919
1920
1921
1922
1923
1924 func FuncOf(in, out []Type, variadic bool) Type {
1925 if variadic && (len(in) == 0 || in[len(in)-1].Kind() != Slice) {
1926 panic("reflect.FuncOf: last arg of variadic func must be slice")
1927 }
1928
1929
1930 var ifunc interface{} = (func())(nil)
1931 prototype := *(**funcType)(unsafe.Pointer(&ifunc))
1932 n := len(in) + len(out)
1933
1934 var ft *funcType
1935 var args []*rtype
1936 switch {
1937 case n <= 4:
1938 fixed := new(funcTypeFixed4)
1939 args = fixed.args[:0:len(fixed.args)]
1940 ft = &fixed.funcType
1941 case n <= 8:
1942 fixed := new(funcTypeFixed8)
1943 args = fixed.args[:0:len(fixed.args)]
1944 ft = &fixed.funcType
1945 case n <= 16:
1946 fixed := new(funcTypeFixed16)
1947 args = fixed.args[:0:len(fixed.args)]
1948 ft = &fixed.funcType
1949 case n <= 32:
1950 fixed := new(funcTypeFixed32)
1951 args = fixed.args[:0:len(fixed.args)]
1952 ft = &fixed.funcType
1953 case n <= 64:
1954 fixed := new(funcTypeFixed64)
1955 args = fixed.args[:0:len(fixed.args)]
1956 ft = &fixed.funcType
1957 case n <= 128:
1958 fixed := new(funcTypeFixed128)
1959 args = fixed.args[:0:len(fixed.args)]
1960 ft = &fixed.funcType
1961 default:
1962 panic("reflect.FuncOf: too many arguments")
1963 }
1964 *ft = *prototype
1965
1966
1967 var hash uint32
1968 for _, in := range in {
1969 t := in.(*rtype)
1970 args = append(args, t)
1971 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
1972 }
1973 if variadic {
1974 hash = fnv1(hash, 'v')
1975 }
1976 hash = fnv1(hash, '.')
1977 for _, out := range out {
1978 t := out.(*rtype)
1979 args = append(args, t)
1980 hash = fnv1(hash, byte(t.hash>>24), byte(t.hash>>16), byte(t.hash>>8), byte(t.hash))
1981 }
1982 if len(args) > 50 {
1983 panic("reflect.FuncOf does not support more than 50 arguments")
1984 }
1985 ft.tflag = 0
1986 ft.hash = hash
1987 ft.inCount = uint16(len(in))
1988 ft.outCount = uint16(len(out))
1989 if variadic {
1990 ft.outCount |= 1 << 15
1991 }
1992
1993
1994 if ts, ok := funcLookupCache.m.Load(hash); ok {
1995 for _, t := range ts.([]*rtype) {
1996 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
1997 return t
1998 }
1999 }
2000 }
2001
2002
2003 funcLookupCache.Lock()
2004 defer funcLookupCache.Unlock()
2005 if ts, ok := funcLookupCache.m.Load(hash); ok {
2006 for _, t := range ts.([]*rtype) {
2007 if haveIdenticalUnderlyingType(&ft.rtype, t, true) {
2008 return t
2009 }
2010 }
2011 }
2012
2013 addToCache := func(tt *rtype) Type {
2014 var rts []*rtype
2015 if rti, ok := funcLookupCache.m.Load(hash); ok {
2016 rts = rti.([]*rtype)
2017 }
2018 funcLookupCache.m.Store(hash, append(rts, tt))
2019 return tt
2020 }
2021
2022
2023 str := funcStr(ft)
2024 for _, tt := range typesByString(str) {
2025 if haveIdenticalUnderlyingType(&ft.rtype, tt, true) {
2026 return addToCache(tt)
2027 }
2028 }
2029
2030
2031 ft.str = resolveReflectName(newName(str, "", false))
2032 ft.ptrToThis = 0
2033 return addToCache(&ft.rtype)
2034 }
2035
2036
2037 func funcStr(ft *funcType) string {
2038 repr := make([]byte, 0, 64)
2039 repr = append(repr, "func("...)
2040 for i, t := range ft.in() {
2041 if i > 0 {
2042 repr = append(repr, ", "...)
2043 }
2044 if ft.IsVariadic() && i == int(ft.inCount)-1 {
2045 repr = append(repr, "..."...)
2046 repr = append(repr, (*sliceType)(unsafe.Pointer(t)).elem.String()...)
2047 } else {
2048 repr = append(repr, t.String()...)
2049 }
2050 }
2051 repr = append(repr, ')')
2052 out := ft.out()
2053 if len(out) == 1 {
2054 repr = append(repr, ' ')
2055 } else if len(out) > 1 {
2056 repr = append(repr, " ("...)
2057 }
2058 for i, t := range out {
2059 if i > 0 {
2060 repr = append(repr, ", "...)
2061 }
2062 repr = append(repr, t.String()...)
2063 }
2064 if len(out) > 1 {
2065 repr = append(repr, ')')
2066 }
2067 return string(repr)
2068 }
2069
2070
2071
2072 func isReflexive(t *rtype) bool {
2073 switch t.Kind() {
2074 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, String, UnsafePointer:
2075 return true
2076 case Float32, Float64, Complex64, Complex128, Interface:
2077 return false
2078 case Array:
2079 tt := (*arrayType)(unsafe.Pointer(t))
2080 return isReflexive(tt.elem)
2081 case Struct:
2082 tt := (*structType)(unsafe.Pointer(t))
2083 for _, f := range tt.fields {
2084 if !isReflexive(f.typ) {
2085 return false
2086 }
2087 }
2088 return true
2089 default:
2090
2091 panic("isReflexive called on non-key type " + t.String())
2092 }
2093 }
2094
2095
2096 func needKeyUpdate(t *rtype) bool {
2097 switch t.Kind() {
2098 case Bool, Int, Int8, Int16, Int32, Int64, Uint, Uint8, Uint16, Uint32, Uint64, Uintptr, Chan, Ptr, UnsafePointer:
2099 return false
2100 case Float32, Float64, Complex64, Complex128, Interface, String:
2101
2102
2103
2104 return true
2105 case Array:
2106 tt := (*arrayType)(unsafe.Pointer(t))
2107 return needKeyUpdate(tt.elem)
2108 case Struct:
2109 tt := (*structType)(unsafe.Pointer(t))
2110 for _, f := range tt.fields {
2111 if needKeyUpdate(f.typ) {
2112 return true
2113 }
2114 }
2115 return false
2116 default:
2117
2118 panic("needKeyUpdate called on non-key type " + t.String())
2119 }
2120 }
2121
2122
2123
2124
2125
2126 const (
2127 bucketSize uintptr = 8
2128 maxKeySize uintptr = 128
2129 maxValSize uintptr = 128
2130 )
2131
2132 func bucketOf(ktyp, etyp *rtype) *rtype {
2133
2134 var kind uint8
2135 if ktyp.kind&kindNoPointers != 0 && etyp.kind&kindNoPointers != 0 &&
2136 ktyp.size <= maxKeySize && etyp.size <= maxValSize {
2137 kind = kindNoPointers
2138 }
2139
2140 if ktyp.size > maxKeySize {
2141 ktyp = PtrTo(ktyp).(*rtype)
2142 }
2143 if etyp.size > maxValSize {
2144 etyp = PtrTo(etyp).(*rtype)
2145 }
2146
2147
2148
2149
2150
2151
2152 var gcdata *byte
2153 var ptrdata uintptr
2154 var overflowPad uintptr
2155
2156
2157
2158 if runtime.GOARCH == "amd64p32" && (ktyp.align > ptrSize || etyp.align > ptrSize) {
2159 overflowPad = ptrSize
2160 }
2161 size := bucketSize*(1+ktyp.size+etyp.size) + overflowPad + ptrSize
2162 if size&uintptr(ktyp.align-1) != 0 || size&uintptr(etyp.align-1) != 0 {
2163 panic("reflect: bad size computation in MapOf")
2164 }
2165
2166 if kind != kindNoPointers {
2167 nptr := (bucketSize*(1+ktyp.size+etyp.size) + ptrSize) / ptrSize
2168 mask := make([]byte, (nptr+7)/8)
2169 base := bucketSize / ptrSize
2170
2171 if ktyp.kind&kindNoPointers == 0 {
2172 if ktyp.kind&kindGCProg != 0 {
2173 panic("reflect: unexpected GC program in MapOf")
2174 }
2175 kmask := (*[16]byte)(unsafe.Pointer(ktyp.gcdata))
2176 for i := uintptr(0); i < ktyp.ptrdata/ptrSize; i++ {
2177 if (kmask[i/8]>>(i%8))&1 != 0 {
2178 for j := uintptr(0); j < bucketSize; j++ {
2179 word := base + j*ktyp.size/ptrSize + i
2180 mask[word/8] |= 1 << (word % 8)
2181 }
2182 }
2183 }
2184 }
2185 base += bucketSize * ktyp.size / ptrSize
2186
2187 if etyp.kind&kindNoPointers == 0 {
2188 if etyp.kind&kindGCProg != 0 {
2189 panic("reflect: unexpected GC program in MapOf")
2190 }
2191 emask := (*[16]byte)(unsafe.Pointer(etyp.gcdata))
2192 for i := uintptr(0); i < etyp.ptrdata/ptrSize; i++ {
2193 if (emask[i/8]>>(i%8))&1 != 0 {
2194 for j := uintptr(0); j < bucketSize; j++ {
2195 word := base + j*etyp.size/ptrSize + i
2196 mask[word/8] |= 1 << (word % 8)
2197 }
2198 }
2199 }
2200 }
2201 base += bucketSize * etyp.size / ptrSize
2202 base += overflowPad / ptrSize
2203
2204 word := base
2205 mask[word/8] |= 1 << (word % 8)
2206 gcdata = &mask[0]
2207 ptrdata = (word + 1) * ptrSize
2208
2209
2210 if ptrdata != size {
2211 panic("reflect: bad layout computation in MapOf")
2212 }
2213 }
2214
2215 b := &rtype{
2216 align: ptrSize,
2217 size: size,
2218 kind: kind,
2219 ptrdata: ptrdata,
2220 gcdata: gcdata,
2221 }
2222 if overflowPad > 0 {
2223 b.align = 8
2224 }
2225 s := "bucket(" + ktyp.String() + "," + etyp.String() + ")"
2226 b.str = resolveReflectName(newName(s, "", false))
2227 return b
2228 }
2229
2230
2231
2232 func SliceOf(t Type) Type {
2233 typ := t.(*rtype)
2234
2235
2236 ckey := cacheKey{Slice, typ, nil, 0}
2237 if slice, ok := lookupCache.Load(ckey); ok {
2238 return slice.(Type)
2239 }
2240
2241
2242 s := "[]" + typ.String()
2243 for _, tt := range typesByString(s) {
2244 slice := (*sliceType)(unsafe.Pointer(tt))
2245 if slice.elem == typ {
2246 ti, _ := lookupCache.LoadOrStore(ckey, tt)
2247 return ti.(Type)
2248 }
2249 }
2250
2251
2252 var islice interface{} = ([]unsafe.Pointer)(nil)
2253 prototype := *(**sliceType)(unsafe.Pointer(&islice))
2254 slice := *prototype
2255 slice.tflag = 0
2256 slice.str = resolveReflectName(newName(s, "", false))
2257 slice.hash = fnv1(typ.hash, '[')
2258 slice.elem = typ
2259 slice.ptrToThis = 0
2260
2261 ti, _ := lookupCache.LoadOrStore(ckey, &slice.rtype)
2262 return ti.(Type)
2263 }
2264
2265
2266
2267
2268 var structLookupCache struct {
2269 sync.Mutex
2270
2271
2272
2273 m sync.Map
2274 }
2275
2276 type structTypeUncommon struct {
2277 structType
2278 u uncommonType
2279 }
2280
2281
2282
2283
2284
2285
2286
2287
2288
2289
2290
2291
2292
2293 type structTypeFixed4 struct {
2294 structType
2295 u uncommonType
2296 m [4]method
2297 }
2298
2299 type structTypeFixed8 struct {
2300 structType
2301 u uncommonType
2302 m [8]method
2303 }
2304
2305 type structTypeFixed16 struct {
2306 structType
2307 u uncommonType
2308 m [16]method
2309 }
2310
2311 type structTypeFixed32 struct {
2312 structType
2313 u uncommonType
2314 m [32]method
2315 }
2316
2317
2318 func isLetter(ch rune) bool {
2319 return 'a' <= ch && ch <= 'z' || 'A' <= ch && ch <= 'Z' || ch == '_' || ch >= utf8.RuneSelf && unicode.IsLetter(ch)
2320 }
2321
2322
2323
2324
2325
2326
2327
2328 func isValidFieldName(fieldName string) bool {
2329 for i, c := range fieldName {
2330 if i == 0 && !isLetter(c) {
2331 return false
2332 }
2333
2334 if !(isLetter(c) || unicode.IsDigit(c)) {
2335 return false
2336 }
2337 }
2338
2339 return len(fieldName) > 0
2340 }
2341
2342
2343
2344
2345
2346
2347
2348
2349 func StructOf(fields []StructField) Type {
2350 var (
2351 hash = fnv1(0, []byte("struct {")...)
2352 size uintptr
2353 typalign uint8
2354 comparable = true
2355 hashable = true
2356 methods []method
2357
2358 fs = make([]structField, len(fields))
2359 repr = make([]byte, 0, 64)
2360 fset = map[string]struct{}{}
2361
2362 hasPtr = false
2363 hasGCProg = false
2364 )
2365
2366 lastzero := uintptr(0)
2367 repr = append(repr, "struct {"...)
2368 for i, field := range fields {
2369 if field.Name == "" {
2370 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no name")
2371 }
2372 if !isValidFieldName(field.Name) {
2373 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has invalid name")
2374 }
2375 if field.Type == nil {
2376 panic("reflect.StructOf: field " + strconv.Itoa(i) + " has no type")
2377 }
2378 f := runtimeStructField(field)
2379 ft := f.typ
2380 if ft.kind&kindGCProg != 0 {
2381 hasGCProg = true
2382 }
2383 if ft.pointers() {
2384 hasPtr = true
2385 }
2386
2387
2388 name := f.name.name()
2389 hash = fnv1(hash, []byte(name)...)
2390 repr = append(repr, (" " + name)...)
2391 if f.embedded() {
2392
2393 if f.typ.Kind() == Ptr {
2394
2395 elem := ft.Elem()
2396 if k := elem.Kind(); k == Ptr || k == Interface {
2397 panic("reflect.StructOf: illegal embedded field type " + ft.String())
2398 }
2399 }
2400
2401 switch f.typ.Kind() {
2402 case Interface:
2403 ift := (*interfaceType)(unsafe.Pointer(ft))
2404 for im, m := range ift.methods {
2405 if ift.nameOff(m.name).pkgPath() != "" {
2406
2407 panic("reflect: embedded interface with unexported method(s) not implemented")
2408 }
2409
2410 var (
2411 mtyp = ift.typeOff(m.typ)
2412 ifield = i
2413 imethod = im
2414 ifn Value
2415 tfn Value
2416 )
2417
2418 if ft.kind&kindDirectIface != 0 {
2419 tfn = MakeFunc(mtyp, func(in []Value) []Value {
2420 var args []Value
2421 var recv = in[0]
2422 if len(in) > 1 {
2423 args = in[1:]
2424 }
2425 return recv.Field(ifield).Method(imethod).Call(args)
2426 })
2427 ifn = MakeFunc(mtyp, func(in []Value) []Value {
2428 var args []Value
2429 var recv = in[0]
2430 if len(in) > 1 {
2431 args = in[1:]
2432 }
2433 return recv.Field(ifield).Method(imethod).Call(args)
2434 })
2435 } else {
2436 tfn = MakeFunc(mtyp, func(in []Value) []Value {
2437 var args []Value
2438 var recv = in[0]
2439 if len(in) > 1 {
2440 args = in[1:]
2441 }
2442 return recv.Field(ifield).Method(imethod).Call(args)
2443 })
2444 ifn = MakeFunc(mtyp, func(in []Value) []Value {
2445 var args []Value
2446 var recv = Indirect(in[0])
2447 if len(in) > 1 {
2448 args = in[1:]
2449 }
2450 return recv.Field(ifield).Method(imethod).Call(args)
2451 })
2452 }
2453
2454 methods = append(methods, method{
2455 name: resolveReflectName(ift.nameOff(m.name)),
2456 mtyp: resolveReflectType(mtyp),
2457 ifn: resolveReflectText(unsafe.Pointer(&ifn)),
2458 tfn: resolveReflectText(unsafe.Pointer(&tfn)),
2459 })
2460 }
2461 case Ptr:
2462 ptr := (*ptrType)(unsafe.Pointer(ft))
2463 if unt := ptr.uncommon(); unt != nil {
2464 if i > 0 && unt.mcount > 0 {
2465
2466 panic("reflect: embedded type with methods not implemented if type is not first field")
2467 }
2468 if len(fields) > 1 {
2469 panic("reflect: embedded type with methods not implemented if there is more than one field")
2470 }
2471 for _, m := range unt.methods() {
2472 mname := ptr.nameOff(m.name)
2473 if mname.pkgPath() != "" {
2474
2475
2476 panic("reflect: embedded interface with unexported method(s) not implemented")
2477 }
2478 methods = append(methods, method{
2479 name: resolveReflectName(mname),
2480 mtyp: resolveReflectType(ptr.typeOff(m.mtyp)),
2481 ifn: resolveReflectText(ptr.textOff(m.ifn)),
2482 tfn: resolveReflectText(ptr.textOff(m.tfn)),
2483 })
2484 }
2485 }
2486 if unt := ptr.elem.uncommon(); unt != nil {
2487 for _, m := range unt.methods() {
2488 mname := ptr.nameOff(m.name)
2489 if mname.pkgPath() != "" {
2490
2491
2492 panic("reflect: embedded interface with unexported method(s) not implemented")
2493 }
2494 methods = append(methods, method{
2495 name: resolveReflectName(mname),
2496 mtyp: resolveReflectType(ptr.elem.typeOff(m.mtyp)),
2497 ifn: resolveReflectText(ptr.elem.textOff(m.ifn)),
2498 tfn: resolveReflectText(ptr.elem.textOff(m.tfn)),
2499 })
2500 }
2501 }
2502 default:
2503 if unt := ft.uncommon(); unt != nil {
2504 if i > 0 && unt.mcount > 0 {
2505
2506 panic("reflect: embedded type with methods not implemented if type is not first field")
2507 }
2508 if len(fields) > 1 && ft.kind&kindDirectIface != 0 {
2509 panic("reflect: embedded type with methods not implemented for non-pointer type")
2510 }
2511 for _, m := range unt.methods() {
2512 mname := ft.nameOff(m.name)
2513 if mname.pkgPath() != "" {
2514
2515
2516 panic("reflect: embedded interface with unexported method(s) not implemented")
2517 }
2518 methods = append(methods, method{
2519 name: resolveReflectName(mname),
2520 mtyp: resolveReflectType(ft.typeOff(m.mtyp)),
2521 ifn: resolveReflectText(ft.textOff(m.ifn)),
2522 tfn: resolveReflectText(ft.textOff(m.tfn)),
2523 })
2524
2525 }
2526 }
2527 }
2528 }
2529 if _, dup := fset[name]; dup {
2530 panic("reflect.StructOf: duplicate field " + name)
2531 }
2532 fset[name] = struct{}{}
2533
2534 hash = fnv1(hash, byte(ft.hash>>24), byte(ft.hash>>16), byte(ft.hash>>8), byte(ft.hash))
2535
2536 repr = append(repr, (" " + ft.String())...)
2537 if f.name.tagLen() > 0 {
2538 hash = fnv1(hash, []byte(f.name.tag())...)
2539 repr = append(repr, (" " + strconv.Quote(f.name.tag()))...)
2540 }
2541 if i < len(fields)-1 {
2542 repr = append(repr, ';')
2543 }
2544
2545 comparable = comparable && (ft.alg.equal != nil)
2546 hashable = hashable && (ft.alg.hash != nil)
2547
2548 offset := align(size, uintptr(ft.align))
2549 if ft.align > typalign {
2550 typalign = ft.align
2551 }
2552 size = offset + ft.size
2553 f.offsetEmbed |= offset << 1
2554
2555 if ft.size == 0 {
2556 lastzero = size
2557 }
2558
2559 fs[i] = f
2560 }
2561
2562 if size > 0 && lastzero == size {
2563
2564
2565
2566
2567
2568 size++
2569 }
2570
2571 var typ *structType
2572 var ut *uncommonType
2573
2574 switch {
2575 case len(methods) == 0:
2576 t := new(structTypeUncommon)
2577 typ = &t.structType
2578 ut = &t.u
2579 case len(methods) <= 4:
2580 t := new(structTypeFixed4)
2581 typ = &t.structType
2582 ut = &t.u
2583 copy(t.m[:], methods)
2584 case len(methods) <= 8:
2585 t := new(structTypeFixed8)
2586 typ = &t.structType
2587 ut = &t.u
2588 copy(t.m[:], methods)
2589 case len(methods) <= 16:
2590 t := new(structTypeFixed16)
2591 typ = &t.structType
2592 ut = &t.u
2593 copy(t.m[:], methods)
2594 case len(methods) <= 32:
2595 t := new(structTypeFixed32)
2596 typ = &t.structType
2597 ut = &t.u
2598 copy(t.m[:], methods)
2599 default:
2600 panic("reflect.StructOf: too many methods")
2601 }
2602
2603
2604
2605
2606 ut.mcount = uint16(len(methods))
2607 ut.xcount = ut.mcount
2608 ut.moff = uint32(unsafe.Sizeof(uncommonType{}))
2609
2610 if len(fs) > 0 {
2611 repr = append(repr, ' ')
2612 }
2613 repr = append(repr, '}')
2614 hash = fnv1(hash, '}')
2615 str := string(repr)
2616
2617
2618 size = align(size, uintptr(typalign))
2619
2620
2621 var istruct interface{} = struct{}{}
2622 prototype := *(**structType)(unsafe.Pointer(&istruct))
2623 *typ = *prototype
2624 typ.fields = fs
2625
2626
2627 if ts, ok := structLookupCache.m.Load(hash); ok {
2628 for _, st := range ts.([]Type) {
2629 t := st.common()
2630 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2631 return t
2632 }
2633 }
2634 }
2635
2636
2637 structLookupCache.Lock()
2638 defer structLookupCache.Unlock()
2639 if ts, ok := structLookupCache.m.Load(hash); ok {
2640 for _, st := range ts.([]Type) {
2641 t := st.common()
2642 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2643 return t
2644 }
2645 }
2646 }
2647
2648 addToCache := func(t Type) Type {
2649 var ts []Type
2650 if ti, ok := structLookupCache.m.Load(hash); ok {
2651 ts = ti.([]Type)
2652 }
2653 structLookupCache.m.Store(hash, append(ts, t))
2654 return t
2655 }
2656
2657
2658 for _, t := range typesByString(str) {
2659 if haveIdenticalUnderlyingType(&typ.rtype, t, true) {
2660
2661
2662
2663 return addToCache(t)
2664 }
2665 }
2666
2667 typ.str = resolveReflectName(newName(str, "", false))
2668 typ.tflag = 0
2669 typ.hash = hash
2670 typ.size = size
2671 typ.align = typalign
2672 typ.fieldAlign = typalign
2673 typ.ptrToThis = 0
2674 if len(methods) > 0 {
2675 typ.tflag |= tflagUncommon
2676 }
2677 if !hasPtr {
2678 typ.kind |= kindNoPointers
2679 } else {
2680 typ.kind &^= kindNoPointers
2681 }
2682
2683 if hasGCProg {
2684 lastPtrField := 0
2685 for i, ft := range fs {
2686 if ft.typ.pointers() {
2687 lastPtrField = i
2688 }
2689 }
2690 prog := []byte{0, 0, 0, 0}
2691 for i, ft := range fs {
2692 if i > lastPtrField {
2693
2694
2695 break
2696 }
2697
2698 elemGC := (*[1 << 30]byte)(unsafe.Pointer(ft.typ.gcdata))[:]
2699 elemPtrs := ft.typ.ptrdata / ptrSize
2700 switch {
2701 case ft.typ.kind&kindGCProg == 0 && ft.typ.ptrdata != 0:
2702
2703 mask := elemGC
2704
2705 var n uintptr
2706 for n := elemPtrs; n > 120; n -= 120 {
2707 prog = append(prog, 120)
2708 prog = append(prog, mask[:15]...)
2709 mask = mask[15:]
2710 }
2711 prog = append(prog, byte(n))
2712 prog = append(prog, mask[:(n+7)/8]...)
2713 case ft.typ.kind&kindGCProg != 0:
2714
2715 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2716 prog = append(prog, elemProg...)
2717 }
2718
2719 elemWords := ft.typ.size / ptrSize
2720 if elemPtrs < elemWords {
2721
2722 prog = append(prog, 0x01, 0x00)
2723 if elemPtrs+1 < elemWords {
2724 prog = append(prog, 0x81)
2725 prog = appendVarint(prog, elemWords-elemPtrs-1)
2726 }
2727 }
2728 }
2729 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2730 typ.kind |= kindGCProg
2731 typ.gcdata = &prog[0]
2732 } else {
2733 typ.kind &^= kindGCProg
2734 bv := new(bitVector)
2735 addTypeBits(bv, 0, typ.common())
2736 if len(bv.data) > 0 {
2737 typ.gcdata = &bv.data[0]
2738 }
2739 }
2740 typ.ptrdata = typeptrdata(typ.common())
2741 typ.alg = new(typeAlg)
2742 if hashable {
2743 typ.alg.hash = func(p unsafe.Pointer, seed uintptr) uintptr {
2744 o := seed
2745 for _, ft := range typ.fields {
2746 pi := add(p, ft.offset(), "&x.field safe")
2747 o = ft.typ.alg.hash(pi, o)
2748 }
2749 return o
2750 }
2751 }
2752
2753 if comparable {
2754 typ.alg.equal = func(p, q unsafe.Pointer) bool {
2755 for _, ft := range typ.fields {
2756 pi := add(p, ft.offset(), "&x.field safe")
2757 qi := add(q, ft.offset(), "&x.field safe")
2758 if !ft.typ.alg.equal(pi, qi) {
2759 return false
2760 }
2761 }
2762 return true
2763 }
2764 }
2765
2766 switch {
2767 case len(fs) == 1 && !ifaceIndir(fs[0].typ):
2768
2769 typ.kind |= kindDirectIface
2770 default:
2771 typ.kind &^= kindDirectIface
2772 }
2773
2774 return addToCache(&typ.rtype)
2775 }
2776
2777 func runtimeStructField(field StructField) structField {
2778 if field.PkgPath != "" {
2779 panic("reflect.StructOf: StructOf does not allow unexported fields")
2780 }
2781
2782
2783
2784 c := field.Name[0]
2785 if 'a' <= c && c <= 'z' || c == '_' {
2786 panic("reflect.StructOf: field \"" + field.Name + "\" is unexported but missing PkgPath")
2787 }
2788
2789 offsetEmbed := uintptr(0)
2790 if field.Anonymous {
2791 offsetEmbed |= 1
2792 }
2793
2794 resolveReflectType(field.Type.common())
2795 return structField{
2796 name: newName(field.Name, string(field.Tag), true),
2797 typ: field.Type.common(),
2798 offsetEmbed: offsetEmbed,
2799 }
2800 }
2801
2802
2803
2804
2805 func typeptrdata(t *rtype) uintptr {
2806 if !t.pointers() {
2807 return 0
2808 }
2809 switch t.Kind() {
2810 case Struct:
2811 st := (*structType)(unsafe.Pointer(t))
2812
2813 field := 0
2814 for i := range st.fields {
2815 ft := st.fields[i].typ
2816 if ft.pointers() {
2817 field = i
2818 }
2819 }
2820 f := st.fields[field]
2821 return f.offset() + f.typ.ptrdata
2822
2823 default:
2824 panic("reflect.typeptrdata: unexpected type, " + t.String())
2825 }
2826 }
2827
2828
2829 const maxPtrmaskBytes = 2048
2830
2831
2832
2833
2834
2835
2836 func ArrayOf(count int, elem Type) Type {
2837 typ := elem.(*rtype)
2838
2839
2840 ckey := cacheKey{Array, typ, nil, uintptr(count)}
2841 if array, ok := lookupCache.Load(ckey); ok {
2842 return array.(Type)
2843 }
2844
2845
2846 s := "[" + strconv.Itoa(count) + "]" + typ.String()
2847 for _, tt := range typesByString(s) {
2848 array := (*arrayType)(unsafe.Pointer(tt))
2849 if array.elem == typ {
2850 ti, _ := lookupCache.LoadOrStore(ckey, tt)
2851 return ti.(Type)
2852 }
2853 }
2854
2855
2856 var iarray interface{} = [1]unsafe.Pointer{}
2857 prototype := *(**arrayType)(unsafe.Pointer(&iarray))
2858 array := *prototype
2859 array.tflag = 0
2860 array.str = resolveReflectName(newName(s, "", false))
2861 array.hash = fnv1(typ.hash, '[')
2862 for n := uint32(count); n > 0; n >>= 8 {
2863 array.hash = fnv1(array.hash, byte(n))
2864 }
2865 array.hash = fnv1(array.hash, ']')
2866 array.elem = typ
2867 array.ptrToThis = 0
2868 if typ.size > 0 {
2869 max := ^uintptr(0) / typ.size
2870 if uintptr(count) > max {
2871 panic("reflect.ArrayOf: array size would exceed virtual address space")
2872 }
2873 }
2874 array.size = typ.size * uintptr(count)
2875 if count > 0 && typ.ptrdata != 0 {
2876 array.ptrdata = typ.size*uintptr(count-1) + typ.ptrdata
2877 }
2878 array.align = typ.align
2879 array.fieldAlign = typ.fieldAlign
2880 array.len = uintptr(count)
2881 array.slice = SliceOf(elem).(*rtype)
2882
2883 array.kind &^= kindNoPointers
2884 switch {
2885 case typ.kind&kindNoPointers != 0 || array.size == 0:
2886
2887 array.kind |= kindNoPointers
2888 array.gcdata = nil
2889 array.ptrdata = 0
2890
2891 case count == 1:
2892
2893 array.kind |= typ.kind & kindGCProg
2894 array.gcdata = typ.gcdata
2895 array.ptrdata = typ.ptrdata
2896
2897 case typ.kind&kindGCProg == 0 && array.size <= maxPtrmaskBytes*8*ptrSize:
2898
2899
2900
2901 mask := make([]byte, (array.ptrdata/ptrSize+7)/8)
2902 elemMask := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2903 elemWords := typ.size / ptrSize
2904 for j := uintptr(0); j < typ.ptrdata/ptrSize; j++ {
2905 if (elemMask[j/8]>>(j%8))&1 != 0 {
2906 for i := uintptr(0); i < array.len; i++ {
2907 k := i*elemWords + j
2908 mask[k/8] |= 1 << (k % 8)
2909 }
2910 }
2911 }
2912 array.gcdata = &mask[0]
2913
2914 default:
2915
2916
2917 prog := []byte{0, 0, 0, 0}
2918 elemGC := (*[1 << 30]byte)(unsafe.Pointer(typ.gcdata))[:]
2919 elemPtrs := typ.ptrdata / ptrSize
2920 if typ.kind&kindGCProg == 0 {
2921
2922 mask := elemGC
2923
2924 var n uintptr
2925 for n = elemPtrs; n > 120; n -= 120 {
2926 prog = append(prog, 120)
2927 prog = append(prog, mask[:15]...)
2928 mask = mask[15:]
2929 }
2930 prog = append(prog, byte(n))
2931 prog = append(prog, mask[:(n+7)/8]...)
2932 } else {
2933
2934 elemProg := elemGC[4 : 4+*(*uint32)(unsafe.Pointer(&elemGC[0]))-1]
2935 prog = append(prog, elemProg...)
2936 }
2937
2938 elemWords := typ.size / ptrSize
2939 if elemPtrs < elemWords {
2940
2941 prog = append(prog, 0x01, 0x00)
2942 if elemPtrs+1 < elemWords {
2943 prog = append(prog, 0x81)
2944 prog = appendVarint(prog, elemWords-elemPtrs-1)
2945 }
2946 }
2947
2948 if elemWords < 0x80 {
2949 prog = append(prog, byte(elemWords|0x80))
2950 } else {
2951 prog = append(prog, 0x80)
2952 prog = appendVarint(prog, elemWords)
2953 }
2954 prog = appendVarint(prog, uintptr(count)-1)
2955 prog = append(prog, 0)
2956 *(*uint32)(unsafe.Pointer(&prog[0])) = uint32(len(prog) - 4)
2957 array.kind |= kindGCProg
2958 array.gcdata = &prog[0]
2959 array.ptrdata = array.size
2960 }
2961
2962 etyp := typ.common()
2963 esize := etyp.Size()
2964 ealg := etyp.alg
2965
2966 array.alg = new(typeAlg)
2967 if ealg.equal != nil {
2968 eequal := ealg.equal
2969 array.alg.equal = func(p, q unsafe.Pointer) bool {
2970 for i := 0; i < count; i++ {
2971 pi := arrayAt(p, i, esize, "i < count")
2972 qi := arrayAt(q, i, esize, "i < count")
2973 if !eequal(pi, qi) {
2974 return false
2975 }
2976
2977 }
2978 return true
2979 }
2980 }
2981 if ealg.hash != nil {
2982 ehash := ealg.hash
2983 array.alg.hash = func(ptr unsafe.Pointer, seed uintptr) uintptr {
2984 o := seed
2985 for i := 0; i < count; i++ {
2986 o = ehash(arrayAt(ptr, i, esize, "i < count"), o)
2987 }
2988 return o
2989 }
2990 }
2991
2992 switch {
2993 case count == 1 && !ifaceIndir(typ):
2994
2995 array.kind |= kindDirectIface
2996 default:
2997 array.kind &^= kindDirectIface
2998 }
2999
3000 ti, _ := lookupCache.LoadOrStore(ckey, &array.rtype)
3001 return ti.(Type)
3002 }
3003
3004 func appendVarint(x []byte, v uintptr) []byte {
3005 for ; v >= 0x80; v >>= 7 {
3006 x = append(x, byte(v|0x80))
3007 }
3008 x = append(x, byte(v))
3009 return x
3010 }
3011
3012
3013
3014
3015
3016
3017 func toType(t *rtype) Type {
3018 if t == nil {
3019 return nil
3020 }
3021 return t
3022 }
3023
3024 type layoutKey struct {
3025 ftyp *funcType
3026 rcvr *rtype
3027 }
3028
3029 type layoutType struct {
3030 t *rtype
3031 argSize uintptr
3032 retOffset uintptr
3033 stack *bitVector
3034 framePool *sync.Pool
3035 }
3036
3037 var layoutCache sync.Map
3038
3039
3040
3041
3042
3043
3044
3045 func funcLayout(t *funcType, rcvr *rtype) (frametype *rtype, argSize, retOffset uintptr, stk *bitVector, framePool *sync.Pool) {
3046 if t.Kind() != Func {
3047 panic("reflect: funcLayout of non-func type")
3048 }
3049 if rcvr != nil && rcvr.Kind() == Interface {
3050 panic("reflect: funcLayout with interface receiver " + rcvr.String())
3051 }
3052 k := layoutKey{t, rcvr}
3053 if lti, ok := layoutCache.Load(k); ok {
3054 lt := lti.(layoutType)
3055 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
3056 }
3057
3058
3059 ptrmap := new(bitVector)
3060 var offset uintptr
3061 if rcvr != nil {
3062
3063
3064
3065 if ifaceIndir(rcvr) || rcvr.pointers() {
3066 ptrmap.append(1)
3067 } else {
3068 ptrmap.append(0)
3069 }
3070 offset += ptrSize
3071 }
3072 for _, arg := range t.in() {
3073 offset += -offset & uintptr(arg.align-1)
3074 addTypeBits(ptrmap, offset, arg)
3075 offset += arg.size
3076 }
3077 argSize = offset
3078 if runtime.GOARCH == "amd64p32" {
3079 offset += -offset & (8 - 1)
3080 }
3081 offset += -offset & (ptrSize - 1)
3082 retOffset = offset
3083 for _, res := range t.out() {
3084 offset += -offset & uintptr(res.align-1)
3085 addTypeBits(ptrmap, offset, res)
3086 offset += res.size
3087 }
3088 offset += -offset & (ptrSize - 1)
3089
3090
3091 x := &rtype{
3092 align: ptrSize,
3093 size: offset,
3094 ptrdata: uintptr(ptrmap.n) * ptrSize,
3095 }
3096 if runtime.GOARCH == "amd64p32" {
3097 x.align = 8
3098 }
3099 if ptrmap.n > 0 {
3100 x.gcdata = &ptrmap.data[0]
3101 } else {
3102 x.kind |= kindNoPointers
3103 }
3104
3105 var s string
3106 if rcvr != nil {
3107 s = "methodargs(" + rcvr.String() + ")(" + t.String() + ")"
3108 } else {
3109 s = "funcargs(" + t.String() + ")"
3110 }
3111 x.str = resolveReflectName(newName(s, "", false))
3112
3113
3114 framePool = &sync.Pool{New: func() interface{} {
3115 return unsafe_New(x)
3116 }}
3117 lti, _ := layoutCache.LoadOrStore(k, layoutType{
3118 t: x,
3119 argSize: argSize,
3120 retOffset: retOffset,
3121 stack: ptrmap,
3122 framePool: framePool,
3123 })
3124 lt := lti.(layoutType)
3125 return lt.t, lt.argSize, lt.retOffset, lt.stack, lt.framePool
3126 }
3127
3128
3129 func ifaceIndir(t *rtype) bool {
3130 return t.kind&kindDirectIface == 0
3131 }
3132
3133
3134 type bitVector struct {
3135 n uint32
3136 data []byte
3137 }
3138
3139
3140 func (bv *bitVector) append(bit uint8) {
3141 if bv.n%8 == 0 {
3142 bv.data = append(bv.data, 0)
3143 }
3144 bv.data[bv.n/8] |= bit << (bv.n % 8)
3145 bv.n++
3146 }
3147
3148 func addTypeBits(bv *bitVector, offset uintptr, t *rtype) {
3149 if t.kind&kindNoPointers != 0 {
3150 return
3151 }
3152
3153 switch Kind(t.kind & kindMask) {
3154 case Chan, Func, Map, Ptr, Slice, String, UnsafePointer:
3155
3156 for bv.n < uint32(offset/uintptr(ptrSize)) {
3157 bv.append(0)
3158 }
3159 bv.append(1)
3160
3161 case Interface:
3162
3163 for bv.n < uint32(offset/uintptr(ptrSize)) {
3164 bv.append(0)
3165 }
3166 bv.append(1)
3167 bv.append(1)
3168
3169 case Array:
3170
3171 tt := (*arrayType)(unsafe.Pointer(t))
3172 for i := 0; i < int(tt.len); i++ {
3173 addTypeBits(bv, offset+uintptr(i)*tt.elem.size, tt.elem)
3174 }
3175
3176 case Struct:
3177
3178 tt := (*structType)(unsafe.Pointer(t))
3179 for i := range tt.fields {
3180 f := &tt.fields[i]
3181 addTypeBits(bv, offset+f.offset(), f.typ)
3182 }
3183 }
3184 }
3185
View as plain text