Run Format

Text file src/pkg/runtime/hashmap.c

     1	// Copyright 2009 The Go Authors. All rights reserved.
     2	// Use of this source code is governed by a BSD-style
     3	// license that can be found in the LICENSE file.
     4	
     5	#include "runtime.h"
     6	#include "arch_GOARCH.h"
     7	#include "malloc.h"
     8	#include "type.h"
     9	#include "race.h"
    10	#include "../../cmd/ld/textflag.h"
    11	
    12	// This file contains the implementation of Go's map type.
    13	//
    14	// The map is just a hash table.  The data is arranged
    15	// into an array of buckets.  Each bucket contains up to
    16	// 8 key/value pairs.  The low-order bits of the hash are
    17	// used to select a bucket.  Each bucket contains a few
    18	// high-order bits of each hash to distinguish the entries
    19	// within a single bucket.
    20	//
    21	// If more than 8 keys hash to a bucket, we chain on
    22	// extra buckets.
    23	//
    24	// When the hashtable grows, we allocate a new array
    25	// of buckets twice as big.  Buckets are incrementally
    26	// copied from the old bucket array to the new bucket array.
    27	//
    28	// Map iterators walk through the array of buckets and
    29	// return the keys in walk order (bucket #, then overflow
    30	// chain order, then bucket index).  To maintain iteration
    31	// semantics, we never move keys within their bucket (if
    32	// we did, keys might be returned 0 or 2 times).  When
    33	// growing the table, iterators remain iterating through the
    34	// old table and must check the new table if the bucket
    35	// they are iterating through has been moved ("evacuated")
    36	// to the new table.
    37	
    38	// Maximum number of key/value pairs a bucket can hold.
    39	#define BUCKETSIZE 8
    40	
    41	// Maximum average load of a bucket that triggers growth.
    42	#define LOAD 6.5
    43	
    44	// Picking LOAD: too large and we have lots of overflow
    45	// buckets, too small and we waste a lot of space.  I wrote
    46	// a simple program to check some stats for different loads:
    47	// (64-bit, 8 byte keys and values)
    48	//        LOAD    %overflow  bytes/entry     hitprobe    missprobe
    49	//        4.00         2.13        20.77         3.00         4.00
    50	//        4.50         4.05        17.30         3.25         4.50
    51	//        5.00         6.85        14.77         3.50         5.00
    52	//        5.50        10.55        12.94         3.75         5.50
    53	//        6.00        15.27        11.67         4.00         6.00
    54	//        6.50        20.90        10.79         4.25         6.50
    55	//        7.00        27.14        10.15         4.50         7.00
    56	//        7.50        34.03         9.73         4.75         7.50
    57	//        8.00        41.10         9.40         5.00         8.00
    58	//
    59	// %overflow   = percentage of buckets which have an overflow bucket
    60	// bytes/entry = overhead bytes used per key/value pair
    61	// hitprobe    = # of entries to check when looking up a present key
    62	// missprobe   = # of entries to check when looking up an absent key
    63	//
    64	// Keep in mind this data is for maximally loaded tables, i.e. just
    65	// before the table grows.  Typical tables will be somewhat less loaded.
    66	
    67	// Maximum key or value size to keep inline (instead of mallocing per element).
    68	// Must fit in a uint8.
    69	// Fast versions cannot handle big values - the cutoff size for
    70	// fast versions in ../../cmd/gc/walk.c must be at most this value.
    71	#define MAXKEYSIZE 128
    72	#define MAXVALUESIZE 128
    73	
    74	typedef struct Bucket Bucket;
    75	struct Bucket
    76	{
    77		// Note: the format of the Bucket is encoded in ../../cmd/gc/reflect.c and
    78		// ../reflect/type.go.  Don't change this structure without also changing that code!
    79		uint8  tophash[BUCKETSIZE]; // top 8 bits of hash of each entry (0 = empty)
    80		Bucket *overflow;           // overflow bucket, if any
    81		byte   data[1];             // BUCKETSIZE keys followed by BUCKETSIZE values
    82	};
    83	// NOTE: packing all the keys together and then all the values together makes the
    84	// code a bit more complicated than alternating key/value/key/value/... but it allows
    85	// us to eliminate padding which would be needed for, e.g., map[int64]int8.
    86	
    87	// Low-order bit of overflow field is used to mark a bucket as already evacuated
    88	// without destroying the overflow pointer.
    89	// Only buckets in oldbuckets will be marked as evacuated.
    90	// Evacuated bit will be set identically on the base bucket and any overflow buckets.
    91	#define evacuated(b) (((uintptr)(b)->overflow & 1) != 0)
    92	#define overflowptr(b) ((Bucket*)((uintptr)(b)->overflow & ~(uintptr)1))
    93	
    94	struct Hmap
    95	{
    96		// Note: the format of the Hmap is encoded in ../../cmd/gc/reflect.c and
    97		// ../reflect/type.go.  Don't change this structure without also changing that code!
    98		uintgo  count;        // # live cells == size of map.  Must be first (used by len() builtin)
    99		uint32  flags;
   100		uint32  hash0;        // hash seed
   101		uint8   B;            // log_2 of # of buckets (can hold up to LOAD * 2^B items)
   102		uint8   keysize;      // key size in bytes
   103		uint8   valuesize;    // value size in bytes
   104		uint16  bucketsize;   // bucket size in bytes
   105	
   106		byte    *buckets;     // array of 2^B Buckets. may be nil if count==0.
   107		byte    *oldbuckets;  // previous bucket array of half the size, non-nil only when growing
   108		uintptr nevacuate;    // progress counter for evacuation (buckets less than this have been evacuated)
   109	};
   110	
   111	// possible flags
   112	enum
   113	{
   114		IndirectKey = 1,    // storing pointers to keys
   115		IndirectValue = 2,  // storing pointers to values
   116		Iterator = 4,       // there may be an iterator using buckets
   117		OldIterator = 8,    // there may be an iterator using oldbuckets
   118	};
   119	
   120	// Macros for dereferencing indirect keys
   121	#define IK(h, p) (((h)->flags & IndirectKey) != 0 ? *(byte**)(p) : (p))
   122	#define IV(h, p) (((h)->flags & IndirectValue) != 0 ? *(byte**)(p) : (p))
   123	
   124	enum
   125	{
   126		docheck = 0,  // check invariants before and after every op.  Slow!!!
   127		debug = 0,    // print every operation
   128		checkgc = 0 || docheck,  // check interaction of mallocgc() with the garbage collector
   129	};
   130	static void
   131	check(MapType *t, Hmap *h)
   132	{
   133		uintptr bucket, oldbucket;
   134		Bucket *b;
   135		uintptr i;
   136		uintptr hash;
   137		uintgo cnt;
   138		uint8 top;
   139		bool eq;
   140		byte *k, *v;
   141	
   142		cnt = 0;
   143	
   144		// check buckets
   145		for(bucket = 0; bucket < (uintptr)1 << h->B; bucket++) {
   146			if(h->oldbuckets != nil) {
   147				oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   148				b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   149				if(!evacuated(b))
   150					continue; // b is still uninitialized
   151			}
   152			for(b = (Bucket*)(h->buckets + bucket * h->bucketsize); b != nil; b = b->overflow) {
   153				for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   154					if(b->tophash[i] == 0)
   155						continue;
   156					cnt++;
   157					t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   158					if(!eq)
   159						continue; // NaN!
   160					hash = h->hash0;
   161					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   162					top = hash >> (8*sizeof(uintptr) - 8);
   163					if(top == 0)
   164						top = 1;
   165					if(top != b->tophash[i])
   166						runtime·throw("bad hash");
   167				}
   168			}
   169		}
   170	
   171		// check oldbuckets
   172		if(h->oldbuckets != nil) {
   173			for(oldbucket = 0; oldbucket < (uintptr)1 << (h->B - 1); oldbucket++) {
   174				b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   175				if(evacuated(b))
   176					continue;
   177				if(oldbucket < h->nevacuate)
   178					runtime·throw("bucket became unevacuated");
   179				for(; b != nil; b = overflowptr(b)) {
   180					for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   181						if(b->tophash[i] == 0)
   182							continue;
   183						cnt++;
   184						t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   185						if(!eq)
   186							continue; // NaN!
   187						hash = h->hash0;
   188						t->key->alg->hash(&hash, t->key->size, IK(h, k));
   189						top = hash >> (8*sizeof(uintptr) - 8);
   190						if(top == 0)
   191							top = 1;
   192						if(top != b->tophash[i])
   193							runtime·throw("bad hash (old)");
   194					}
   195				}
   196			}
   197		}
   198	
   199		if(cnt != h->count) {
   200			runtime·printf("%D %D\n", (uint64)cnt, (uint64)h->count);
   201			runtime·throw("entries missing");
   202		}
   203	}
   204	
   205	static void
   206	hash_init(MapType *t, Hmap *h, uint32 hint)
   207	{
   208		uint8 B;
   209		byte *buckets;
   210		uintptr keysize, valuesize, bucketsize;
   211		uint8 flags;
   212	
   213		flags = 0;
   214	
   215		// figure out how big we have to make everything
   216		keysize = t->key->size;
   217		if(keysize > MAXKEYSIZE) {
   218			flags |= IndirectKey;
   219			keysize = sizeof(byte*);
   220		}
   221		valuesize = t->elem->size;
   222		if(valuesize > MAXVALUESIZE) {
   223			flags |= IndirectValue;
   224			valuesize = sizeof(byte*);
   225		}
   226		bucketsize = offsetof(Bucket, data[0]) + (keysize + valuesize) * BUCKETSIZE;
   227	
   228		// invariants we depend on.  We should probably check these at compile time
   229		// somewhere, but for now we'll do it here.
   230		if(t->key->align > BUCKETSIZE)
   231			runtime·throw("key align too big");
   232		if(t->elem->align > BUCKETSIZE)
   233			runtime·throw("value align too big");
   234		if(t->key->size % t->key->align != 0)
   235			runtime·throw("key size not a multiple of key align");
   236		if(t->elem->size % t->elem->align != 0)
   237			runtime·throw("value size not a multiple of value align");
   238		if(BUCKETSIZE < 8)
   239			runtime·throw("bucketsize too small for proper alignment");
   240		if(sizeof(void*) == 4 && t->key->align > 4)
   241			runtime·throw("need padding in bucket (key)");
   242		if(sizeof(void*) == 4 && t->elem->align > 4)
   243			runtime·throw("need padding in bucket (value)");
   244	
   245		// find size parameter which will hold the requested # of elements
   246		B = 0;
   247		while(hint > BUCKETSIZE && hint > LOAD * ((uintptr)1 << B))
   248			B++;
   249	
   250		// allocate initial hash table
   251		// If hint is large zeroing this memory could take a while.
   252		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   253		if(B == 0) {
   254			// done lazily later.
   255			buckets = nil;
   256		} else {
   257			buckets = runtime·cnewarray(t->bucket, (uintptr)1 << B);
   258		}
   259	
   260		// initialize Hmap
   261		h->count = 0;
   262		h->B = B;
   263		h->flags = flags;
   264		h->keysize = keysize;
   265		h->valuesize = valuesize;
   266		h->bucketsize = bucketsize;
   267		h->hash0 = runtime·fastrand1();
   268		h->buckets = buckets;
   269		h->oldbuckets = nil;
   270		h->nevacuate = 0;
   271		if(docheck)
   272			check(t, h);
   273	}
   274	
   275	// Moves entries in oldbuckets[i] to buckets[i] and buckets[i+2^k].
   276	// We leave the original bucket intact, except for the evacuated marks, so that
   277	// iterators can still iterate through the old buckets.
   278	static void
   279	evacuate(MapType *t, Hmap *h, uintptr oldbucket)
   280	{
   281		Bucket *b;
   282		Bucket *nextb;
   283		Bucket *x, *y;
   284		Bucket *newx, *newy;
   285		uintptr xi, yi;
   286		uintptr newbit;
   287		uintptr hash;
   288		uintptr i;
   289		byte *k, *v;
   290		byte *xk, *yk, *xv, *yv;
   291		uint8 top;
   292		bool eq;
   293	
   294		b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   295		newbit = (uintptr)1 << (h->B - 1);
   296	
   297		if(!evacuated(b)) {
   298			// TODO: reuse overflow buckets instead of using new ones, if there
   299			// is no iterator using the old buckets.  (If !OldIterator.)
   300	
   301			x = (Bucket*)(h->buckets + oldbucket * h->bucketsize);
   302			y = (Bucket*)(h->buckets + (oldbucket + newbit) * h->bucketsize);
   303			xi = 0;
   304			yi = 0;
   305			xk = x->data;
   306			yk = y->data;
   307			xv = xk + h->keysize * BUCKETSIZE;
   308			yv = yk + h->keysize * BUCKETSIZE;
   309			do {
   310				for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   311					top = b->tophash[i];
   312					if(top == 0)
   313						continue;
   314	
   315					// Compute hash to make our evacuation decision (whether we need
   316					// to send this key/value to bucket x or bucket y).
   317					hash = h->hash0;
   318					t->key->alg->hash(&hash, t->key->size, IK(h, k));
   319					if((h->flags & Iterator) != 0) {
   320						t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   321						if(!eq) {
   322							// If key != key (NaNs), then the hash could be (and probably
   323							// will be) entirely different from the old hash.  Moreover,
   324							// it isn't reproducible.  Reproducibility is required in the
   325							// presence of iterators, as our evacuation decision must
   326							// match whatever decision the iterator made.
   327							// Fortunately, we have the freedom to send these keys either
   328							// way.  Also, tophash is meaningless for these kinds of keys.
   329							// We let the low bit of tophash drive the evacuation decision.
   330							// We recompute a new random tophash for the next level so
   331							// these keys will get evenly distributed across all buckets
   332							// after multiple grows.
   333							if((top & 1) != 0)
   334								hash |= newbit;
   335							else
   336								hash &= ~newbit;
   337							top = hash >> (8*sizeof(uintptr)-8);
   338							if(top == 0)
   339								top = 1;
   340						}
   341					}
   342	
   343					if((hash & newbit) == 0) {
   344						if(xi == BUCKETSIZE) {
   345							if(checkgc) mstats.next_gc = mstats.heap_alloc;
   346							newx = runtime·cnew(t->bucket);
   347							x->overflow = newx;
   348							x = newx;
   349							xi = 0;
   350							xk = x->data;
   351							xv = xk + h->keysize * BUCKETSIZE;
   352						}
   353						x->tophash[xi] = top;
   354						if((h->flags & IndirectKey) != 0) {
   355							*(byte**)xk = *(byte**)k;               // copy pointer
   356						} else {
   357							t->key->alg->copy(t->key->size, xk, k); // copy value
   358						}
   359						if((h->flags & IndirectValue) != 0) {
   360							*(byte**)xv = *(byte**)v;
   361						} else {
   362							t->elem->alg->copy(t->elem->size, xv, v);
   363						}
   364						xi++;
   365						xk += h->keysize;
   366						xv += h->valuesize;
   367					} else {
   368						if(yi == BUCKETSIZE) {
   369							if(checkgc) mstats.next_gc = mstats.heap_alloc;
   370							newy = runtime·cnew(t->bucket);
   371							y->overflow = newy;
   372							y = newy;
   373							yi = 0;
   374							yk = y->data;
   375							yv = yk + h->keysize * BUCKETSIZE;
   376						}
   377						y->tophash[yi] = top;
   378						if((h->flags & IndirectKey) != 0) {
   379							*(byte**)yk = *(byte**)k;
   380						} else {
   381							t->key->alg->copy(t->key->size, yk, k);
   382						}
   383						if((h->flags & IndirectValue) != 0) {
   384							*(byte**)yv = *(byte**)v;
   385						} else {
   386							t->elem->alg->copy(t->elem->size, yv, v);
   387						}
   388						yi++;
   389						yk += h->keysize;
   390						yv += h->valuesize;
   391					}
   392				}
   393	
   394				// mark as evacuated so we don't do it again.
   395				// this also tells any iterators that this data isn't golden anymore.
   396				nextb = b->overflow;
   397				b->overflow = (Bucket*)((uintptr)nextb + 1);
   398	
   399				b = nextb;
   400			} while(b != nil);
   401	
   402			// Unlink the overflow buckets to help GC.
   403			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   404			if((h->flags & OldIterator) == 0)
   405				b->overflow = (Bucket*)1;
   406		}
   407	
   408		// advance evacuation mark
   409		if(oldbucket == h->nevacuate) {
   410			h->nevacuate = oldbucket + 1;
   411			if(oldbucket + 1 == newbit) // newbit == # of oldbuckets
   412				// free main bucket array
   413				h->oldbuckets = nil;
   414		}
   415		if(docheck)
   416			check(t, h);
   417	}
   418	
   419	static void
   420	grow_work(MapType *t, Hmap *h, uintptr bucket)
   421	{
   422		uintptr noldbuckets;
   423	
   424		noldbuckets = (uintptr)1 << (h->B - 1);
   425	
   426		// make sure we evacuate the oldbucket corresponding
   427		// to the bucket we're about to use
   428		evacuate(t, h, bucket & (noldbuckets - 1));
   429	
   430		// evacuate one more oldbucket to make progress on growing
   431		if(h->oldbuckets != nil)
   432			evacuate(t, h, h->nevacuate);
   433	}
   434	
   435	static void
   436	hash_grow(MapType *t, Hmap *h)
   437	{
   438		byte *old_buckets;
   439		byte *new_buckets;
   440		uint8 flags;
   441	
   442		// allocate a bigger hash table
   443		if(h->oldbuckets != nil)
   444			runtime·throw("evacuation not done in time");
   445		old_buckets = h->buckets;
   446		// NOTE: this could be a big malloc, but since we don't need zeroing it is probably fast.
   447		if(checkgc) mstats.next_gc = mstats.heap_alloc;
   448		new_buckets = runtime·cnewarray(t->bucket, (uintptr)1 << (h->B + 1));
   449		flags = (h->flags & ~(Iterator | OldIterator));
   450		if((h->flags & Iterator) != 0)
   451			flags |= OldIterator;
   452	
   453		// commit the grow (atomic wrt gc)
   454		h->B++;
   455		h->flags = flags;
   456		h->oldbuckets = old_buckets;
   457		h->buckets = new_buckets;
   458		h->nevacuate = 0;
   459	
   460		// the actual copying of the hash table data is done incrementally
   461		// by grow_work() and evacuate().
   462		if(docheck)
   463			check(t, h);
   464	}
   465	
   466	// returns ptr to value associated with key *keyp, or nil if none.
   467	// if it returns non-nil, updates *keyp to point to the currently stored key.
   468	static byte*
   469	hash_lookup(MapType *t, Hmap *h, byte **keyp)
   470	{
   471		void *key;
   472		uintptr hash;
   473		uintptr bucket, oldbucket;
   474		Bucket *b;
   475		uint8 top;
   476		uintptr i;
   477		bool eq;
   478		byte *k, *k2, *v;
   479	
   480		key = *keyp;
   481		if(docheck)
   482			check(t, h);
   483		if(h->count == 0)
   484			return nil;
   485		hash = h->hash0;
   486		t->key->alg->hash(&hash, t->key->size, key);
   487		bucket = hash & (((uintptr)1 << h->B) - 1);
   488		if(h->oldbuckets != nil) {
   489			oldbucket = bucket & (((uintptr)1 << (h->B - 1)) - 1);
   490			b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   491			if(evacuated(b)) {
   492				b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   493			}
   494		} else {
   495			b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   496		}
   497		top = hash >> (sizeof(uintptr)*8 - 8);
   498		if(top == 0)
   499			top = 1;
   500		do {
   501			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   502				if(b->tophash[i] == top) {
   503					k2 = IK(h, k);
   504					t->key->alg->equal(&eq, t->key->size, key, k2);
   505					if(eq) {
   506						*keyp = k2;
   507						return IV(h, v);
   508					}
   509				}
   510			}
   511			b = b->overflow;
   512		} while(b != nil);
   513		return nil;
   514	}
   515	
   516	// When an item is not found, fast versions return a pointer to this zeroed memory.
   517	#pragma dataflag RODATA
   518	static uint8 empty_value[MAXVALUESIZE];
   519	
   520	// Specialized versions of mapaccess1 for specific types.
   521	// See ./hashmap_fast.c and ../../cmd/gc/walk.c.
   522	#define HASH_LOOKUP1 runtime·mapaccess1_fast32
   523	#define HASH_LOOKUP2 runtime·mapaccess2_fast32
   524	#define KEYTYPE uint32
   525	#define HASHFUNC runtime·algarray[AMEM32].hash
   526	#define FASTKEY(x) true
   527	#define QUICK_NE(x,y) ((x) != (y))
   528	#define QUICK_EQ(x,y) true
   529	#define SLOW_EQ(x,y) true
   530	#define MAYBE_EQ(x,y) true
   531	#include "hashmap_fast.c"
   532	
   533	#undef HASH_LOOKUP1
   534	#undef HASH_LOOKUP2
   535	#undef KEYTYPE
   536	#undef HASHFUNC
   537	#undef FASTKEY
   538	#undef QUICK_NE
   539	#undef QUICK_EQ
   540	#undef SLOW_EQ
   541	#undef MAYBE_EQ
   542	
   543	#define HASH_LOOKUP1 runtime·mapaccess1_fast64
   544	#define HASH_LOOKUP2 runtime·mapaccess2_fast64
   545	#define KEYTYPE uint64
   546	#define HASHFUNC runtime·algarray[AMEM64].hash
   547	#define FASTKEY(x) true
   548	#define QUICK_NE(x,y) ((x) != (y))
   549	#define QUICK_EQ(x,y) true
   550	#define SLOW_EQ(x,y) true
   551	#define MAYBE_EQ(x,y) true
   552	#include "hashmap_fast.c"
   553	
   554	#undef HASH_LOOKUP1
   555	#undef HASH_LOOKUP2
   556	#undef KEYTYPE
   557	#undef HASHFUNC
   558	#undef FASTKEY
   559	#undef QUICK_NE
   560	#undef QUICK_EQ
   561	#undef SLOW_EQ
   562	#undef MAYBE_EQ
   563	
   564	#ifdef GOARCH_amd64
   565	#define CHECKTYPE uint64
   566	#endif
   567	#ifdef GOARCH_386
   568	#define CHECKTYPE uint32
   569	#endif
   570	#ifdef GOARCH_arm
   571	// can't use uint32 on arm because our loads aren't aligned.
   572	// TODO: use uint32 for arm v6+?
   573	#define CHECKTYPE uint8
   574	#endif
   575	
   576	#define HASH_LOOKUP1 runtime·mapaccess1_faststr
   577	#define HASH_LOOKUP2 runtime·mapaccess2_faststr
   578	#define KEYTYPE String
   579	#define HASHFUNC runtime·algarray[ASTRING].hash
   580	#define FASTKEY(x) ((x).len < 32)
   581	#define QUICK_NE(x,y) ((x).len != (y).len)
   582	#define QUICK_EQ(x,y) ((x).str == (y).str)
   583	#define SLOW_EQ(x,y) runtime·memeq((x).str, (y).str, (x).len)
   584	#define MAYBE_EQ(x,y) (*(CHECKTYPE*)(x).str == *(CHECKTYPE*)(y).str && *(CHECKTYPE*)((x).str + (x).len - sizeof(CHECKTYPE)) == *(CHECKTYPE*)((y).str + (x).len - sizeof(CHECKTYPE)))
   585	#include "hashmap_fast.c"
   586	
   587	static void
   588	hash_insert(MapType *t, Hmap *h, void *key, void *value)
   589	{
   590		uintptr hash;
   591		uintptr bucket;
   592		uintptr i;
   593		bool eq;
   594		Bucket *b;
   595		Bucket *newb;
   596		uint8 *inserti;
   597		byte *insertk, *insertv;
   598		uint8 top;
   599		byte *k, *v;
   600		byte *kmem, *vmem;
   601	
   602		if(docheck)
   603			check(t, h);
   604		hash = h->hash0;
   605		t->key->alg->hash(&hash, t->key->size, key);
   606		if(h->buckets == nil)
   607			h->buckets = runtime·cnewarray(t->bucket, 1);
   608	
   609	 again:
   610		bucket = hash & (((uintptr)1 << h->B) - 1);
   611		if(h->oldbuckets != nil)
   612			grow_work(t, h, bucket);
   613		b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   614		top = hash >> (sizeof(uintptr)*8 - 8);
   615		if(top == 0)
   616			top = 1;
   617		inserti = nil;
   618		insertk = nil;
   619		insertv = nil;
   620		while(true) {
   621			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   622				if(b->tophash[i] != top) {
   623					if(b->tophash[i] == 0 && inserti == nil) {
   624						inserti = &b->tophash[i];
   625						insertk = k;
   626						insertv = v;
   627					}
   628					continue;
   629				}
   630				t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   631				if(!eq)
   632					continue;
   633				// already have a mapping for key.  Update it.
   634				t->key->alg->copy(t->key->size, IK(h, k), key); // Need to update key for keys which are distinct but equal (e.g. +0.0 and -0.0)
   635				t->elem->alg->copy(t->elem->size, IV(h, v), value);
   636				if(docheck)
   637					check(t, h);
   638				return;
   639			}
   640			if(b->overflow == nil)
   641				break;
   642			b = b->overflow;
   643		}
   644	
   645		// did not find mapping for key.  Allocate new cell & add entry.
   646		if(h->count >= LOAD * ((uintptr)1 << h->B) && h->count >= BUCKETSIZE) {
   647			hash_grow(t, h);
   648			goto again; // Growing the table invalidates everything, so try again
   649		}
   650	
   651		if(inserti == nil) {
   652			// all current buckets are full, allocate a new one.
   653			if(checkgc) mstats.next_gc = mstats.heap_alloc;
   654			newb = runtime·cnew(t->bucket);
   655			b->overflow = newb;
   656			inserti = newb->tophash;
   657			insertk = newb->data;
   658			insertv = insertk + h->keysize * BUCKETSIZE;
   659		}
   660	
   661		// store new key/value at insert position
   662		if((h->flags & IndirectKey) != 0) {
   663			if(checkgc) mstats.next_gc = mstats.heap_alloc;
   664			kmem = runtime·cnew(t->key);
   665			*(byte**)insertk = kmem;
   666			insertk = kmem;
   667		}
   668		if((h->flags & IndirectValue) != 0) {
   669			if(checkgc) mstats.next_gc = mstats.heap_alloc;
   670			vmem = runtime·cnew(t->elem);
   671			*(byte**)insertv = vmem;
   672			insertv = vmem;
   673		}
   674		t->key->alg->copy(t->key->size, insertk, key);
   675		t->elem->alg->copy(t->elem->size, insertv, value);
   676		*inserti = top;
   677		h->count++;
   678		if(docheck)
   679			check(t, h);
   680	}
   681	
   682	static void
   683	hash_remove(MapType *t, Hmap *h, void *key)
   684	{
   685		uintptr hash;
   686		uintptr bucket;
   687		Bucket *b;
   688		uint8 top;
   689		uintptr i;
   690		byte *k, *v;
   691		bool eq;
   692		
   693		if(docheck)
   694			check(t, h);
   695		if(h->count == 0)
   696			return;
   697		hash = h->hash0;
   698		t->key->alg->hash(&hash, t->key->size, key);
   699		bucket = hash & (((uintptr)1 << h->B) - 1);
   700		if(h->oldbuckets != nil)
   701			grow_work(t, h, bucket);
   702		b = (Bucket*)(h->buckets + bucket * h->bucketsize);
   703		top = hash >> (sizeof(uintptr)*8 - 8);
   704		if(top == 0)
   705			top = 1;
   706		do {
   707			for(i = 0, k = b->data, v = k + h->keysize * BUCKETSIZE; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   708				if(b->tophash[i] != top)
   709					continue;
   710				t->key->alg->equal(&eq, t->key->size, key, IK(h, k));
   711				if(!eq)
   712					continue;
   713	
   714				if((h->flags & IndirectKey) != 0) {
   715					*(byte**)k = nil;
   716				} else {
   717					t->key->alg->copy(t->key->size, k, nil);
   718				}
   719				if((h->flags & IndirectValue) != 0) {
   720					*(byte**)v = nil;
   721				} else {
   722					t->elem->alg->copy(t->elem->size, v, nil);
   723				}
   724	
   725				b->tophash[i] = 0;
   726				h->count--;
   727				
   728				// TODO: consolidate buckets if they are mostly empty
   729				// can only consolidate if there are no live iterators at this size.
   730				if(docheck)
   731					check(t, h);
   732				return;
   733			}
   734			b = b->overflow;
   735		} while(b != nil);
   736	}
   737	
   738	// TODO: shrink the map, the same way we grow it.
   739	
   740	// If you modify hash_iter, also change cmd/gc/range.c to indicate
   741	// the size of this structure.
   742	struct hash_iter
   743	{
   744		uint8* key; // Must be in first position.  Write nil to indicate iteration end (see cmd/gc/range.c).
   745		uint8* value;
   746	
   747		MapType *t;
   748		Hmap *h;
   749	
   750		// end point for iteration
   751		uintptr endbucket;
   752		bool wrapped;
   753	
   754		// state of table at time iterator is initialized
   755		uint8 B;
   756		byte *buckets;
   757	
   758		// iter state
   759		uintptr bucket;
   760		struct Bucket *bptr;
   761		uintptr i;
   762		intptr check_bucket;
   763	};
   764	
   765	// iterator state:
   766	// bucket: the current bucket ID
   767	// b: the current Bucket in the chain
   768	// i: the next offset to check in the current bucket
   769	static void
   770	hash_iter_init(MapType *t, Hmap *h, struct hash_iter *it)
   771	{
   772		uint32 old;
   773	
   774		if(sizeof(struct hash_iter) / sizeof(uintptr) != 11) {
   775			runtime·throw("hash_iter size incorrect"); // see ../../cmd/gc/range.c
   776		}
   777		it->t = t;
   778		it->h = h;
   779	
   780		// grab snapshot of bucket state
   781		it->B = h->B;
   782		it->buckets = h->buckets;
   783	
   784		// iterator state
   785		it->bucket = it->endbucket = runtime·fastrand1() & (((uintptr)1 << h->B) - 1);
   786		it->wrapped = false;
   787		it->bptr = nil;
   788	
   789		// Remember we have an iterator.
   790		// Can run concurrently with another hash_iter_init().
   791		for(;;) {
   792			old = h->flags;
   793			if((old&(Iterator|OldIterator)) == (Iterator|OldIterator))
   794				break;
   795			if(runtime·cas(&h->flags, old, old|Iterator|OldIterator))
   796				break;
   797		}
   798	
   799		if(h->buckets == nil) {
   800			// Empty map. Force next hash_next to exit without
   801			// evalulating h->bucket.
   802			it->wrapped = true;
   803		}
   804	}
   805	
   806	// initializes it->key and it->value to the next key/value pair
   807	// in the iteration, or nil if we've reached the end.
   808	static void
   809	hash_next(struct hash_iter *it)
   810	{
   811		Hmap *h;
   812		MapType *t;
   813		uintptr bucket, oldbucket;
   814		uintptr hash;
   815		Bucket *b;
   816		uintptr i;
   817		intptr check_bucket;
   818		bool eq;
   819		byte *k, *v;
   820		byte *rk, *rv;
   821	
   822		h = it->h;
   823		t = it->t;
   824		bucket = it->bucket;
   825		b = it->bptr;
   826		i = it->i;
   827		check_bucket = it->check_bucket;
   828	
   829	next:
   830		if(b == nil) {
   831			if(bucket == it->endbucket && it->wrapped) {
   832				// end of iteration
   833				it->key = nil;
   834				it->value = nil;
   835				return;
   836			}
   837			if(h->oldbuckets != nil && it->B == h->B) {
   838				// Iterator was started in the middle of a grow, and the grow isn't done yet.
   839				// If the bucket we're looking at hasn't been filled in yet (i.e. the old
   840				// bucket hasn't been evacuated) then we need to iterate through the old
   841				// bucket and only return the ones that will be migrated to this bucket.
   842				oldbucket = bucket & (((uintptr)1 << (it->B - 1)) - 1);
   843				b = (Bucket*)(h->oldbuckets + oldbucket * h->bucketsize);
   844				if(!evacuated(b)) {
   845					check_bucket = bucket;
   846				} else {
   847					b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   848					check_bucket = -1;
   849				}
   850			} else {
   851				b = (Bucket*)(it->buckets + bucket * h->bucketsize);
   852				check_bucket = -1;
   853			}
   854			bucket++;
   855			if(bucket == ((uintptr)1 << it->B)) {
   856				bucket = 0;
   857				it->wrapped = true;
   858			}
   859			i = 0;
   860		}
   861		k = b->data + h->keysize * i;
   862		v = b->data + h->keysize * BUCKETSIZE + h->valuesize * i;
   863		for(; i < BUCKETSIZE; i++, k += h->keysize, v += h->valuesize) {
   864			if(b->tophash[i] != 0) {
   865				if(check_bucket >= 0) {
   866					// Special case: iterator was started during a grow and the
   867					// grow is not done yet.  We're working on a bucket whose
   868					// oldbucket has not been evacuated yet.  So we're iterating
   869					// through the oldbucket, skipping any keys that will go
   870					// to the other new bucket (each oldbucket expands to two
   871					// buckets during a grow).
   872					t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   873					if(eq) {
   874						// If the item in the oldbucket is not destined for
   875						// the current new bucket in the iteration, skip it.
   876						hash = h->hash0;
   877						t->key->alg->hash(&hash, t->key->size, IK(h, k));
   878						if((hash & (((uintptr)1 << it->B) - 1)) != check_bucket) {
   879							continue;
   880						}
   881					} else {
   882						// Hash isn't repeatable if k != k (NaNs).  We need a
   883						// repeatable and randomish choice of which direction
   884						// to send NaNs during evacuation.  We'll use the low
   885						// bit of tophash to decide which way NaNs go.
   886						if(check_bucket >> (it->B - 1) != (b->tophash[i] & 1)) {
   887							continue;
   888						}
   889					}
   890				}
   891				if(!evacuated(b)) {
   892					// this is the golden data, we can return it.
   893					it->key = IK(h, k);
   894					it->value = IV(h, v);
   895				} else {
   896					// The hash table has grown since the iterator was started.
   897					// The golden data for this key is now somewhere else.
   898					t->key->alg->equal(&eq, t->key->size, IK(h, k), IK(h, k));
   899					if(eq) {
   900						// Check the current hash table for the data.
   901						// This code handles the case where the key
   902						// has been deleted, updated, or deleted and reinserted.
   903						// NOTE: we need to regrab the key as it has potentially been
   904						// updated to an equal() but not identical key (e.g. +0.0 vs -0.0).
   905						rk = IK(h, k);
   906						rv = hash_lookup(t, it->h, &rk);
   907						if(rv == nil)
   908							continue; // key has been deleted
   909						it->key = rk;
   910						it->value = rv;
   911					} else {
   912						// if key!=key then the entry can't be deleted or
   913						// updated, so we can just return it.  That's lucky for
   914						// us because when key!=key we can't look it up
   915						// successfully in the current table.
   916						it->key = IK(h, k);
   917						it->value = IV(h, v);
   918					}
   919				}
   920				it->bucket = bucket;
   921				it->bptr = b;
   922				it->i = i + 1;
   923				it->check_bucket = check_bucket;
   924				return;
   925			}
   926		}
   927		b = overflowptr(b);
   928		i = 0;
   929		goto next;
   930	}
   931	
   932	//
   933	/// interfaces to go runtime
   934	//
   935	
   936	void
   937	reflect·ismapkey(Type *typ, bool ret)
   938	{
   939		ret = typ != nil && typ->alg->hash != runtime·nohash;
   940		FLUSH(&ret);
   941	}
   942	
   943	Hmap*
   944	runtime·makemap_c(MapType *typ, int64 hint)
   945	{
   946		Hmap *h;
   947		Type *key;
   948	
   949		key = typ->key;
   950		
   951		if(sizeof(Hmap) > 48)
   952			runtime·panicstring("hmap too large");
   953	
   954		if(hint < 0 || (int32)hint != hint)
   955			runtime·panicstring("makemap: size out of range");
   956	
   957		if(key->alg->hash == runtime·nohash)
   958			runtime·throw("runtime.makemap: unsupported map key type");
   959	
   960		h = runtime·cnew(typ->hmap);
   961		hash_init(typ, h, hint);
   962	
   963		// these calculations are compiler dependent.
   964		// figure out offsets of map call arguments.
   965	
   966		if(debug) {
   967			runtime·printf("makemap: map=%p; keysize=%p; valsize=%p; keyalg=%p; valalg=%p\n",
   968				       h, key->size, typ->elem->size, key->alg, typ->elem->alg);
   969		}
   970	
   971		return h;
   972	}
   973	
   974	// makemap(key, val *Type, hint int64) (hmap *map[any]any);
   975	void
   976	runtime·makemap(MapType *typ, int64 hint, Hmap *ret)
   977	{
   978		ret = runtime·makemap_c(typ, hint);
   979		FLUSH(&ret);
   980	}
   981	
   982	// For reflect:
   983	//	func makemap(Type *mapType) (hmap *map)
   984	void
   985	reflect·makemap(MapType *t, Hmap *ret)
   986	{
   987		ret = runtime·makemap_c(t, 0);
   988		FLUSH(&ret);
   989	}
   990	
   991	void
   992	runtime·mapaccess(MapType *t, Hmap *h, byte *ak, byte *av, bool *pres)
   993	{
   994		byte *res;
   995		Type *elem;
   996	
   997		elem = t->elem;
   998		if(h == nil || h->count == 0) {
   999			elem->alg->copy(elem->size, av, nil);
  1000			*pres = false;
  1001			return;
  1002		}
  1003	
  1004		res = hash_lookup(t, h, &ak);
  1005	
  1006		if(res != nil) {
  1007			*pres = true;
  1008			elem->alg->copy(elem->size, av, res);
  1009		} else {
  1010			*pres = false;
  1011			elem->alg->copy(elem->size, av, nil);
  1012		}
  1013	}
  1014	
  1015	// mapaccess1(hmap *map[any]any, key any) (val any);
  1016	#pragma textflag NOSPLIT
  1017	void
  1018	runtime·mapaccess1(MapType *t, Hmap *h, ...)
  1019	{
  1020		byte *ak, *av;
  1021		byte *res;
  1022	
  1023		if(raceenabled && h != nil)
  1024			runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess1);
  1025	
  1026		ak = (byte*)(&h + 1);
  1027		av = ak + ROUND(t->key->size, Structrnd);
  1028	
  1029		if(h == nil || h->count == 0) {
  1030			t->elem->alg->copy(t->elem->size, av, nil);
  1031		} else {
  1032			res = hash_lookup(t, h, &ak);
  1033			t->elem->alg->copy(t->elem->size, av, res);
  1034		}
  1035	
  1036		if(debug) {
  1037			runtime·prints("runtime.mapaccess1: map=");
  1038			runtime·printpointer(h);
  1039			runtime·prints("; key=");
  1040			t->key->alg->print(t->key->size, ak);
  1041			runtime·prints("; val=");
  1042			t->elem->alg->print(t->elem->size, av);
  1043			runtime·prints("\n");
  1044		}
  1045	}
  1046	
  1047	// mapaccess2(hmap *map[any]any, key any) (val any, pres bool);
  1048	#pragma textflag NOSPLIT
  1049	void
  1050	runtime·mapaccess2(MapType *t, Hmap *h, ...)
  1051	{
  1052		byte *ak, *av, *ap;
  1053	
  1054		if(raceenabled && h != nil)
  1055			runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapaccess2);
  1056	
  1057		ak = (byte*)(&h + 1);
  1058		av = ak + ROUND(t->key->size, Structrnd);
  1059		ap = av + t->elem->size;
  1060	
  1061		runtime·mapaccess(t, h, ak, av, ap);
  1062	
  1063		if(debug) {
  1064			runtime·prints("runtime.mapaccess2: map=");
  1065			runtime·printpointer(h);
  1066			runtime·prints("; key=");
  1067			t->key->alg->print(t->key->size, ak);
  1068			runtime·prints("; val=");
  1069			t->elem->alg->print(t->elem->size, av);
  1070			runtime·prints("; pres=");
  1071			runtime·printbool(*ap);
  1072			runtime·prints("\n");
  1073		}
  1074	}
  1075	
  1076	// For reflect:
  1077	//	func mapaccess(t type, h map, key iword) (val iword, pres bool)
  1078	// where an iword is the same word an interface value would use:
  1079	// the actual data if it fits, or else a pointer to the data.
  1080	void
  1081	reflect·mapaccess(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1082	{
  1083		byte *ak, *av;
  1084	
  1085		if(raceenabled && h != nil)
  1086			runtime·racereadpc(h, runtime·getcallerpc(&t), reflect·mapaccess);
  1087	
  1088		if(t->key->size <= sizeof(key))
  1089			ak = (byte*)&key;
  1090		else
  1091			ak = (byte*)key;
  1092		val = 0;
  1093		pres = false;
  1094		if(t->elem->size <= sizeof(val))
  1095			av = (byte*)&val;
  1096		else {
  1097			av = runtime·mal(t->elem->size);
  1098			val = (uintptr)av;
  1099		}
  1100		runtime·mapaccess(t, h, ak, av, &pres);
  1101		FLUSH(&val);
  1102		FLUSH(&pres);
  1103	}
  1104	
  1105	void
  1106	runtime·mapassign(MapType *t, Hmap *h, byte *ak, byte *av)
  1107	{
  1108		if(h == nil)
  1109			runtime·panicstring("assignment to entry in nil map");
  1110	
  1111		if(av == nil) {
  1112			hash_remove(t, h, ak);
  1113		} else {
  1114			hash_insert(t, h, ak, av);
  1115		}
  1116	
  1117		if(debug) {
  1118			runtime·prints("mapassign: map=");
  1119			runtime·printpointer(h);
  1120			runtime·prints("; key=");
  1121			t->key->alg->print(t->key->size, ak);
  1122			runtime·prints("; val=");
  1123			if(av)
  1124				t->elem->alg->print(t->elem->size, av);
  1125			else
  1126				runtime·prints("nil");
  1127			runtime·prints("\n");
  1128		}
  1129	}
  1130	
  1131	// mapassign1(mapType *type, hmap *map[any]any, key any, val any);
  1132	#pragma textflag NOSPLIT
  1133	void
  1134	runtime·mapassign1(MapType *t, Hmap *h, ...)
  1135	{
  1136		byte *ak, *av;
  1137	
  1138		if(h == nil)
  1139			runtime·panicstring("assignment to entry in nil map");
  1140	
  1141		if(raceenabled)
  1142			runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapassign1);
  1143		ak = (byte*)(&h + 1);
  1144		av = ak + ROUND(t->key->size, t->elem->align);
  1145	
  1146		runtime·mapassign(t, h, ak, av);
  1147	}
  1148	
  1149	// mapdelete(mapType *type, hmap *map[any]any, key any)
  1150	#pragma textflag NOSPLIT
  1151	void
  1152	runtime·mapdelete(MapType *t, Hmap *h, ...)
  1153	{
  1154		byte *ak;
  1155	
  1156		if(h == nil)
  1157			return;
  1158	
  1159		if(raceenabled)
  1160			runtime·racewritepc(h, runtime·getcallerpc(&t), runtime·mapdelete);
  1161		ak = (byte*)(&h + 1);
  1162		runtime·mapassign(t, h, ak, nil);
  1163	
  1164		if(debug) {
  1165			runtime·prints("mapdelete: map=");
  1166			runtime·printpointer(h);
  1167			runtime·prints("; key=");
  1168			t->key->alg->print(t->key->size, ak);
  1169			runtime·prints("\n");
  1170		}
  1171	}
  1172	
  1173	// For reflect:
  1174	//	func mapassign(t type h map, key, val iword, pres bool)
  1175	// where an iword is the same word an interface value would use:
  1176	// the actual data if it fits, or else a pointer to the data.
  1177	void
  1178	reflect·mapassign(MapType *t, Hmap *h, uintptr key, uintptr val, bool pres)
  1179	{
  1180		byte *ak, *av;
  1181	
  1182		if(h == nil)
  1183			runtime·panicstring("assignment to entry in nil map");
  1184		if(raceenabled)
  1185			runtime·racewritepc(h, runtime·getcallerpc(&t), reflect·mapassign);
  1186		if(t->key->size <= sizeof(key))
  1187			ak = (byte*)&key;
  1188		else
  1189			ak = (byte*)key;
  1190		if(t->elem->size <= sizeof(val))
  1191			av = (byte*)&val;
  1192		else
  1193			av = (byte*)val;
  1194		if(!pres)
  1195			av = nil;
  1196		runtime·mapassign(t, h, ak, av);
  1197	}
  1198	
  1199	// mapiterinit(mapType *type, hmap *map[any]any, hiter *any);
  1200	void
  1201	runtime·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1202	{
  1203		if(h == nil || h->count == 0) {
  1204			it->key = nil;
  1205			return;
  1206		}
  1207		if(raceenabled)
  1208			runtime·racereadpc(h, runtime·getcallerpc(&t), runtime·mapiterinit);
  1209		hash_iter_init(t, h, it);
  1210		hash_next(it);
  1211		if(debug) {
  1212			runtime·prints("runtime.mapiterinit: map=");
  1213			runtime·printpointer(h);
  1214			runtime·prints("; iter=");
  1215			runtime·printpointer(it);
  1216			runtime·prints("; key=");
  1217			runtime·printpointer(it->key);
  1218			runtime·prints("\n");
  1219		}
  1220	}
  1221	
  1222	// For reflect:
  1223	//	func mapiterinit(h map) (it iter)
  1224	void
  1225	reflect·mapiterinit(MapType *t, Hmap *h, struct hash_iter *it)
  1226	{
  1227		it = runtime·mal(sizeof *it);
  1228		FLUSH(&it);
  1229		runtime·mapiterinit(t, h, it);
  1230	}
  1231	
  1232	// mapiternext(hiter *any);
  1233	void
  1234	runtime·mapiternext(struct hash_iter *it)
  1235	{
  1236		if(raceenabled)
  1237			runtime·racereadpc(it->h, runtime·getcallerpc(&it), runtime·mapiternext);
  1238	
  1239		hash_next(it);
  1240		if(debug) {
  1241			runtime·prints("runtime.mapiternext: iter=");
  1242			runtime·printpointer(it);
  1243			runtime·prints("; key=");
  1244			runtime·printpointer(it->key);
  1245			runtime·prints("\n");
  1246		}
  1247	}
  1248	
  1249	// For reflect:
  1250	//	func mapiternext(it iter)
  1251	void
  1252	reflect·mapiternext(struct hash_iter *it)
  1253	{
  1254		runtime·mapiternext(it);
  1255	}
  1256	
  1257	// mapiter1(hiter *any) (key any);
  1258	#pragma textflag NOSPLIT
  1259	void
  1260	runtime·mapiter1(struct hash_iter *it, ...)
  1261	{
  1262		byte *ak, *res;
  1263		Type *key;
  1264	
  1265		ak = (byte*)(&it + 1);
  1266	
  1267		res = it->key;
  1268		if(res == nil)
  1269			runtime·throw("runtime.mapiter1: key:val nil pointer");
  1270	
  1271		key = it->t->key;
  1272		key->alg->copy(key->size, ak, res);
  1273	
  1274		if(debug) {
  1275			runtime·prints("mapiter1: iter=");
  1276			runtime·printpointer(it);
  1277			runtime·prints("; map=");
  1278			runtime·printpointer(it->h);
  1279			runtime·prints("\n");
  1280		}
  1281	}
  1282	
  1283	bool
  1284	runtime·mapiterkey(struct hash_iter *it, void *ak)
  1285	{
  1286		byte *res;
  1287		Type *key;
  1288	
  1289		res = it->key;
  1290		if(res == nil)
  1291			return false;
  1292		key = it->t->key;
  1293		key->alg->copy(key->size, ak, res);
  1294		return true;
  1295	}
  1296	
  1297	// For reflect:
  1298	//	func mapiterkey(h map) (key iword, ok bool)
  1299	// where an iword is the same word an interface value would use:
  1300	// the actual data if it fits, or else a pointer to the data.
  1301	void
  1302	reflect·mapiterkey(struct hash_iter *it, uintptr key, bool ok)
  1303	{
  1304		byte *res;
  1305		Type *tkey;
  1306	
  1307		key = 0;
  1308		ok = false;
  1309		res = it->key;
  1310		if(res != nil) {
  1311			tkey = it->t->key;
  1312			if(tkey->size <= sizeof(key))
  1313				tkey->alg->copy(tkey->size, (byte*)&key, res);
  1314			else
  1315				key = (uintptr)res;
  1316			ok = true;
  1317		}
  1318		FLUSH(&key);
  1319		FLUSH(&ok);
  1320	}
  1321	
  1322	// For reflect:
  1323	//	func maplen(h map) (len int)
  1324	// Like len(m) in the actual language, we treat the nil map as length 0.
  1325	void
  1326	reflect·maplen(Hmap *h, intgo len)
  1327	{
  1328		if(h == nil)
  1329			len = 0;
  1330		else {
  1331			len = h->count;
  1332			if(raceenabled)
  1333				runtime·racereadpc(h, runtime·getcallerpc(&h), reflect·maplen);
  1334		}
  1335		FLUSH(&len);
  1336	}
  1337	
  1338	// mapiter2(hiter *any) (key any, val any);
  1339	#pragma textflag NOSPLIT
  1340	void
  1341	runtime·mapiter2(struct hash_iter *it, ...)
  1342	{
  1343		byte *ak, *av, *res;
  1344		MapType *t;
  1345	
  1346		t = it->t;
  1347		ak = (byte*)(&it + 1);
  1348		av = ak + ROUND(t->key->size, t->elem->align);
  1349	
  1350		res = it->key;
  1351		if(res == nil)
  1352			runtime·throw("runtime.mapiter2: key:val nil pointer");
  1353	
  1354		t->key->alg->copy(t->key->size, ak, res);
  1355		t->elem->alg->copy(t->elem->size, av, it->value);
  1356	
  1357		if(debug) {
  1358			runtime·prints("mapiter2: iter=");
  1359			runtime·printpointer(it);
  1360			runtime·prints("; map=");
  1361			runtime·printpointer(it->h);
  1362			runtime·prints("\n");
  1363		}
  1364	}
  1365	
  1366	// exported value for testing
  1367	float64 runtime·hashLoad = LOAD;

View as plain text