pprof

tests.test cpu
File: tests.test
Build ID: caa91d7109af990038802f6c4cd39acce0ba6adc
Type: cpu
Time: 2026-01-08 14:53:10 UTC
Duration: 18.31s, Total samples = 47.77s (260.83%)
Save options as
Delete config

internal/runtime/syscall.Syscall6

/usr/lib/go/src/internal/runtime/syscall/asm_linux_arm64.s

  Total:       5.63s      5.63s (flat, cum) 11.79%
      4            .          .            
      5            .          .           #include "textflag.h" 
      6            .          .            
      7            .          .           // func Syscall6(num, a1, a2, a3, a4, a5, a6 uintptr) (r1, r2, errno uintptr) 
      8            .          .           TEXT ·Syscall6(SB),NOSPLIT,$0-80 
      9         10ms       10ms           	MOVD	num+0(FP), R8	// syscall entry 
     10            .          .           	MOVD	a1+8(FP), R0 
     11            .          .           	MOVD	a2+16(FP), R1 
     12            .          .           	MOVD	a3+24(FP), R2 
     13         10ms       10ms           	MOVD	a4+32(FP), R3 
     14            .          .           	MOVD	a5+40(FP), R4 
     15            .          .           	MOVD	a6+48(FP), R5 
     16            .          .           	SVC 
     17        5.56s      5.56s           	CMN	$4095, R0 
     18         10ms       10ms           	BCC	ok 
     19         10ms       10ms           	MOVD	$-1, R4 
     20            .          .           	MOVD	R4, r1+56(FP) 
     21            .          .           	MOVD	ZR, r2+64(FP) 
     22            .          .           	NEG	R0, R0 
     23            .          .           	MOVD	R0, errno+72(FP) 
     24            .          .           	RET 
     25            .          .           ok: 
     26         30ms       30ms           	MOVD	R0, r1+56(FP) 
     27            .          .           	MOVD	R1, r2+64(FP) 
     28            .          .           	MOVD	ZR, errno+72(FP) 
     29            .          .           	RET 

runtime.gopark

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
    456            .          .           	mp.waitTraceBlockReason = traceReason 
    457            .          .           	mp.waitTraceSkip = traceskip 
    458            .          .           	releasem(mp) 
    459            .          .           	// can't do anything that might move the G between Ms here. 
    460            .          .           	mcall(park_m) 
    461         10ms       10ms           } 
    462            .          .            
    463            .          .           // Puts the current goroutine into a waiting state and unlocks the lock. 

runtime.goparkunlock

/usr/lib/go/src/runtime/proc.go

  Total:           0       10ms (flat, cum) 0.021%
    464            .          .           // The goroutine can be made runnable again by calling goready(gp). 
    465            .          .           func goparkunlock(lock *mutex, reason waitReason, traceReason traceBlockReason, traceskip int) { 
    466            .       10ms           	gopark(parkunlock_c, unsafe.Pointer(lock), reason, traceReason, traceskip) 
    467            .          .           } 
    468            .          .            
    469            .          .           // goready should be an internal detail, 
    470            .          .           // but widely used packages access it using linkname. 
    471            .          .           // Notable members of the hall of shame include: 

runtime.goready

/usr/lib/go/src/runtime/proc.go

  Total:           0      680ms (flat, cum)  1.42%
    475            .          .           // Do not remove or change the type signature. 
    476            .          .           // See go.dev/issue/67401. 
    477            .          .           // 
    478            .          .           //go:linkname goready 
    479            .          .           func goready(gp *g, traceskip int) { 
    480            .      680ms           	systemstack(func() { 

runtime.send.goready.func1

/usr/lib/go/src/runtime/proc.go

  Total:           0      680ms (flat, cum)  1.42%
    481            .      680ms           		ready(gp, traceskip, true) 
    482            .          .           	}) 
    483            .          .           } 
    484            .          .            
    485            .          .           //go:nosplit 
    486            .          .           func acquireSudog() *sudog { 

runtime.acquireSudog

/usr/lib/go/src/runtime/proc.go

  Total:       130ms      130ms (flat, cum)  0.27%
    507            .          .           		// If the central cache is empty, allocate a new one. 
    508            .          .           		if len(pp.sudogcache) == 0 { 
    509            .          .           			pp.sudogcache = append(pp.sudogcache, new(sudog)) 
    510            .          .           		} 
    511            .          .           	} 
    512         10ms       10ms           	n := len(pp.sudogcache) 
    513            .          .           	s := pp.sudogcache[n-1] 
    514         10ms       10ms           	pp.sudogcache[n-1] = nil 
    515            .          .           	pp.sudogcache = pp.sudogcache[:n-1] 
    516        110ms      110ms           	if s.elem != nil { 
    517            .          .           		throw("acquireSudog: found s.elem != nil in cache") 
    518            .          .           	} 
    519            .          .           	releasem(mp) 

runtime.releaseSudog

/usr/lib/go/src/runtime/proc.go

  Total:        40ms       40ms (flat, cum) 0.084%
    521            .          .           } 
    522            .          .            
    523            .          .           //go:nosplit 
    524         20ms       20ms           func releaseSudog(s *sudog) { 
    525            .          .           	if s.elem != nil { 
    526            .          .           		throw("runtime: sudog with non-nil elem") 
    527            .          .           	} 
    528            .          .           	if s.isSelect { 
    529            .          .           		throw("runtime: sudog with non-false isSelect") 
    530            .          .           	} 
    531            .          .           	if s.next != nil { 
    532            .          .           		throw("runtime: sudog with non-nil next") 
    533            .          .           	} 
    534            .          .           	if s.prev != nil { 
    535            .          .           		throw("runtime: sudog with non-nil prev") 
    536            .          .           	} 
    537            .          .           	if s.waitlink != nil { 
    538            .          .           		throw("runtime: sudog with non-nil waitlink") 
    539            .          .           	} 
    540         10ms       10ms           	if s.c != nil { 
    541            .          .           		throw("runtime: sudog with non-nil c") 
    542            .          .           	} 
    543            .          .           	gp := getg() 
    544            .          .           	if gp.param != nil { 
    545            .          .           		throw("runtime: releaseSudog with non-nil gp.param") 
    546            .          .           	} 
    547            .          .           	mp := acquirem() // avoid rescheduling to another P 
    548            .          .           	pp := mp.p.ptr() 
    549         10ms       10ms           	if len(pp.sudogcache) == cap(pp.sudogcache) { 
    550            .          .           		// Transfer half of local cache to the central cache. 
    551            .          .           		var first, last *sudog 
    552            .          .           		for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 
    553            .          .           			n := len(pp.sudogcache) 
    554            .          .           			p := pp.sudogcache[n-1] 

runtime.releaseSudog

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
    566            .          .           		sched.sudogcache = first 
    567            .          .           		unlock(&sched.sudoglock) 
    568            .          .           	} 
    569            .          .           	pp.sudogcache = append(pp.sudogcache, s) 
    570            .          .           	releasem(mp) 
    571         10ms       10ms           } 
    572            .          .            
    573            .          .           // called from assembly. 
    574            .          .           func badmcall(fn func(*g)) { 
    575            .          .           	throw("runtime: mcall called on m->g0 stack") 
    576            .          .           } 

runtime.(*m).becomeSpinning

/usr/lib/go/src/runtime/proc.go

  Total:        80ms       80ms (flat, cum)  0.17%
   1067            .          .           //go:linkname pprof_makeProfStack 
   1068            .          .           func pprof_makeProfStack() []uintptr { return makeProfStack() } 
   1069            .          .            
   1070            .          .           func (mp *m) becomeSpinning() { 
   1071            .          .           	mp.spinning = true 
   1072         80ms       80ms           	sched.nmspinning.Add(1)                                                       return Xaddint32(&i.value, delta)                                    types.go:56

   1073            .          .           	sched.needspinning.Store(0) 
   1074            .          .           } 
   1075            .          .            
   1076            .          .           // Take a snapshot of allp, for use after dropping the P. 
   1077            .          .           // 

runtime.(*m).snapshotAllp

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1078            .          .           // Must be called with a P, but the returned slice may be used after dropping 
   1079            .          .           // the P. The M holds a reference on the snapshot to keep the backing array 
   1080            .          .           // alive. 
   1081            .          .           // 
   1082            .          .           //go:yeswritebarrierrec 
   1083         20ms       20ms           func (mp *m) snapshotAllp() []*p { 
   1084            .          .           	mp.allpSnapshot = allp 
   1085            .          .           	return mp.allpSnapshot 
   1086            .          .           } 
   1087            .          .            
   1088            .          .           // Clear the saved allp snapshot. Should be called as soon as the snapshot is 

runtime.(*m).clearAllpSnapshot

/usr/lib/go/src/runtime/proc.go

  Total:        40ms       40ms (flat, cum) 0.084%
   1089            .          .           // no longer required. 
   1090            .          .           // 
   1091            .          .           // Must be called after reacquiring a P, as it requires a write barrier. 
   1092            .          .           // 
   1093            .          .           //go:yeswritebarrierrec 
   1094         30ms       30ms           func (mp *m) clearAllpSnapshot() { 
   1095         10ms       10ms           	mp.allpSnapshot = nil 
   1096            .          .           } 
   1097            .          .            
   1098            .          .           func (mp *m) hasCgoOnStack() bool { 
   1099            .          .           	return mp.ncgo > 0 || mp.isextra 
   1100            .          .           } 

runtime.ready

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      680ms (flat, cum)  1.42%
   1113            .          .           	osHasLowResClock = osHasLowResClockInt > 0 
   1114            .          .           ) 
   1115            .          .            
   1116            .          .           // Mark gp ready to run. 
   1117            .          .           func ready(gp *g, traceskip int, next bool) { 
   1118         20ms       20ms           	status := readgstatus(gp)                                                       return gp.atomicstatus.Load()                                        proc.go:1205
                                                          return Load(&u.value)                                            types.go:194

   1119            .          .            
   1120            .          .           	// Mark runnable. 
   1121            .          .           	mp := acquirem() // disable preemption because it can be holding p in a local var 
   1122            .          .           	if status&^_Gscan != _Gwaiting { 
   1123            .          .           		dumpgstatus(gp) 
   1124            .          .           		throw("bad g->status in ready") 
   1125            .          .           	} 
   1126            .          .            
   1127            .          .           	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 
   1128            .          .           	trace := traceAcquire() 
   1129            .       10ms           	casgstatus(gp, _Gwaiting, _Grunnable) 
   1130            .          .           	if trace.ok() { 
   1131            .          .           		trace.GoUnpark(gp, traceskip) 
   1132            .          .           		traceRelease(trace) 
   1133            .          .           	} 
   1134         10ms       20ms           	runqput(mp.p.ptr(), gp, next) 
   1135            .      630ms           	wakep() 
   1136            .          .           	releasem(mp) 
   1137            .          .           } 
   1138            .          .            
   1139            .          .           // freezeStopWait is a large value that freezetheworld sets 
   1140            .          .           // sched.stopwait to in order to request that all Gs permanently stop. 

runtime.readgstatus

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   1200            .          .           // All reads and writes of g's status go through readgstatus, casgstatus 
   1201            .          .           // castogscanstatus, casfrom_Gscanstatus. 
   1202            .          .           // 
   1203            .          .           //go:nosplit 
   1204            .          .           func readgstatus(gp *g) uint32 { 
   1205         30ms       30ms           	return gp.atomicstatus.Load()                                                       return Load(&u.value)                                                types.go:194
   1206            .          .           } 
   1207            .          .            
   1208            .          .           // The Gscanstatuses are acting like locks and this releases them. 
   1209            .          .           // If it proves to be a performance hit we should be able to make these 
   1210            .          .           // simple atomic stores but for now we are going to throw if 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:       430ms      430ms (flat, cum)   0.9%
   1265            .          .           // and casfrom_Gscanstatus instead. 
   1266            .          .           // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 
   1267            .          .           // put it in the Gscan state is finished. 
   1268            .          .           // 
   1269            .          .           //go:nosplit 
   1270         20ms       20ms           func casgstatus(gp *g, oldval, newval uint32) { 
   1271            .          .           	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 
   1272            .          .           		systemstack(func() { 
   1273            .          .           			// Call on the systemstack to prevent print and throw from counting 
   1274            .          .           			// against the nosplit stack reservation. 
   1275            .          .           			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 
   1276            .          .           			throw("casgstatus: bad incoming values") 
   1277            .          .           		}) 
   1278            .          .           	} 
   1279            .          .            
   1280            .          .           	lockWithRankMayAcquire(nil, lockRankGscan) 
   1281            .          .            
   1282            .          .           	// See https://golang.org/cl/21503 for justification of the yield delay. 
   1283            .          .           	const yieldDelay = 5 * 1000 
   1284            .          .           	var nextYield int64 
   1285            .          .            
   1286            .          .           	// loop if gp->atomicstatus is in a scan state giving 
   1287            .          .           	// GC time to finish and change the state to oldval. 
   1288        410ms      410ms           	for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {                                                       return Cas(&u.value, old, new)                                       types.go:236
                                     ⋮
                                     ⋮
                                     ⋮

   1289            .          .           		if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable { 
   1290            .          .           			systemstack(func() { 
   1291            .          .           				// Call on the systemstack to prevent throw from counting 
   1292            .          .           				// against the nosplit stack reservation. 
   1293            .          .           				throw("casgstatus: waiting for Gwaiting but is Grunnable") 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:       110ms      110ms (flat, cum)  0.23%
   1304            .          .           			osyield() 
   1305            .          .           			nextYield = nanotime() + yieldDelay/2 
   1306            .          .           		} 
   1307            .          .           	} 
   1308            .          .            
   1309         60ms       60ms           	if gp.bubble != nil { 
   1310            .          .           		systemstack(func() { 
   1311            .          .           			gp.bubble.changegstatus(gp, oldval, newval) 
   1312            .          .           		}) 
   1313            .          .           	} 
   1314            .          .            
   1315         10ms       10ms           	if oldval == _Grunning { 
   1316            .          .           		// Track every gTrackingPeriod time a goroutine transitions out of running. 
   1317            .          .           		if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 { 
   1318            .          .           			gp.tracking = true 
   1319            .          .           		} 
   1320         10ms       10ms           		gp.trackingSeq++ 
   1321            .          .           	} 
   1322            .          .           	if !gp.tracking { 
   1323         10ms       10ms           		return 
   1324            .          .           	} 
   1325            .          .            
   1326            .          .           	// Handle various kinds of tracking. 
   1327            .          .           	// 
   1328            .          .           	// Currently: 
   1329            .          .           	// - Time spent in runnable. 
   1330            .          .           	// - Time spent blocked on a sync.Mutex or sync.RWMutex. 
   1331            .          .           	switch oldval { 
   1332            .          .           	case _Grunnable: 
   1333            .          .           		// We transitioned out of runnable, so measure how much 
   1334            .          .           		// time we spent in this state and add it to 
   1335            .          .           		// runnableTime. 
   1336         20ms       20ms           		now := nanotime()                                                               return nanotime1()                                           time_nofake.go:33

   1337            .          .           		gp.runnableTime += now - gp.trackingStamp 
   1338            .          .           		gp.trackingStamp = 0 
   1339            .          .           	case _Gwaiting: 
   1340            .          .           		if !gp.waitreason.isMutexWait() { 
   1341            .          .           			// Not blocking on a lock. 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      190ms (flat, cum)   0.4%
   1357            .          .           			break 
   1358            .          .           		} 
   1359            .          .           		// Blocking on a lock. Write down the timestamp. 
   1360            .          .           		now := nanotime() 
   1361            .          .           		gp.trackingStamp = now 
   1362         30ms       30ms           	case _Grunnable: 
   1363            .          .           		// We just transitioned into runnable, so record what 
   1364            .          .           		// time that happened. 
   1365         20ms       20ms           		now := nanotime()                                                               return nanotime1()                                           time_nofake.go:33

   1366            .          .           		gp.trackingStamp = now 
   1367            .          .           	case _Grunning: 
   1368            .          .           		// We're transitioning into running, so turn off 
   1369            .          .           		// tracking and record how much time we spent in 
   1370            .          .           		// runnable. 
   1371            .          .           		gp.tracking = false 
   1372            .      140ms           		sched.timeToRun.record(gp.runnableTime) 
   1373            .          .           		gp.runnableTime = 0 
   1374            .          .           	} 
   1375            .          .           } 
   1376            .          .            
   1377            .          .           // casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason. 

runtime.mPark

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.08s (flat, cum)  2.26%
   1969            .          .           // mPark causes a thread to park itself, returning once woken. 
   1970            .          .           // 
   1971            .          .           //go:nosplit 
   1972            .          .           func mPark() { 
   1973            .          .           	gp := getg() 
   1974            .      1.08s           	notesleep(&gp.m.park) 
   1975            .          .           	noteclear(&gp.m.park) 
   1976            .          .           } 
   1977            .          .            
   1978            .          .           // mexit tears down and exits the current thread. 
   1979            .          .           // 

runtime.stopm

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      1.33s (flat, cum)  2.78%
   2992            .          .           	} 
   2993            .          .           } 
   2994            .          .            
   2995            .          .           // Stops execution of the current m until new work is available. 
   2996            .          .           // Returns with acquired P. 
   2997         20ms       20ms           func stopm() { 
   2998            .          .           	gp := getg() 
   2999            .          .            
   3000            .          .           	if gp.m.locks != 0 { 
   3001            .          .           		throw("stopm holding locks") 
   3002            .          .           	} 
   3003            .          .           	if gp.m.p != 0 { 
   3004            .          .           		throw("stopm holding p") 
   3005            .          .           	} 
   3006            .          .           	if gp.m.spinning { 
   3007            .          .           		throw("stopm spinning") 
   3008            .          .           	} 
   3009            .          .            
   3010            .       40ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3011            .       30ms           	mput(gp.m) 
   3012            .       30ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3013            .      1.08s           	mPark()                                                       notesleep(&gp.m.park)                                                proc.go:1974

   3014         10ms      130ms           	acquirep(gp.m.nextp.ptr()) 
   3015            .          .           	gp.m.nextp = 0 
   3016            .          .           } 
   3017            .          .            
   3018            .          .           func mspinning() { 
   3019            .          .           	// startm's caller incremented nmspinning. Set the new M's spinning. 

runtime.startm

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   3072            .          .           			} 
   3073            .          .           			releasem(mp) 
   3074            .          .           			return 
   3075            .          .           		} 
   3076            .          .           	} 
   3077         10ms       10ms           	nmp := mget()                                                       sched.midle = mp.schedlink                                           proc.go:6844

   3078            .          .           	if nmp == nil { 
   3079            .          .           		// No M is available, we must drop sched.lock and call newm. 
   3080            .          .           		// However, we already own a P to assign to the M. 
   3081            .          .           		// 
   3082            .          .           		// Once sched.lock is released, another G (e.g., in a syscall), 

runtime.startm

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      2.76s (flat, cum)  5.78%
   3107            .          .           		// Preemption is now safe. 
   3108            .          .           		releasem(mp) 
   3109            .          .           		return 
   3110            .          .           	} 
   3111            .          .           	if !lockheld { 
   3112            .       30ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3113            .          .           	} 
   3114         10ms       10ms           	if nmp.spinning { 
   3115            .          .           		throw("startm: m is spinning") 
   3116            .          .           	} 
   3117            .          .           	if nmp.nextp != 0 { 
   3118            .          .           		throw("startm: m has p") 
   3119            .          .           	} 
   3120            .          .           	if spinning && !runqempty(pp) { 
   3121            .          .           		throw("startm: p has runnable gs") 
   3122            .          .           	} 
   3123            .          .           	// The caller incremented nmspinning, so set m.spinning in the new M. 
   3124            .          .           	nmp.spinning = spinning 
   3125            .          .           	nmp.nextp.set(pp) 
   3126            .      2.69s           	notewakeup(&nmp.park) 
   3127            .          .           	// Ownership transfer of pp committed by wakeup. Preemption is now 
   3128            .          .           	// safe. 
   3129         10ms       10ms           	releasem(mp)                                                       mp.locks--                                                           runtime1.go:637

   3130         20ms       20ms           } 
   3131            .          .            
   3132            .          .           // Hands off P from syscall or locked M. 
   3133            .          .           // Always runs without a P, so write barriers are not allowed. 
   3134            .          .           // 
   3135            .          .           //go:nowritebarrierrec 

runtime.wakep

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      3.17s (flat, cum)  6.64%
   3215            .          .           // 
   3216            .          .           //go:linkname wakep 
   3217            .          .           func wakep() { 
   3218            .          .           	// Be conservative about spinning threads, only start one if none exist 
   3219            .          .           	// already. 
   3220         40ms       40ms           	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {             return Loadint32(&i.value)                                           types.go:21            return Casint32(&i.value, old, new)                                  types.go:37

   3221            .          .           		return 
   3222            .          .           	} 
   3223            .          .            
   3224            .          .           	// Disable preemption until ownership of pp transfers to the next M in 
   3225            .          .           	// startm. Otherwise preemption here would leave pp stuck waiting to 
   3226            .          .           	// enter _Pgcstop. 
   3227            .          .           	// 
   3228            .          .           	// See preemption comment on acquirem in startm for more details. 
   3229            .          .           	mp := acquirem() 
   3230            .          .            
   3231            .          .           	var pp *p 
   3232            .      100ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3233            .      220ms           	pp, _ = pidlegetSpinning(0) 
   3234            .          .           	if pp == nil { 
   3235            .          .           		if sched.nmspinning.Add(-1) < 0 { 
   3236            .          .           			throw("wakep: negative nmspinning") 
   3237            .          .           		} 
   3238            .       10ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3239            .          .           		releasem(mp) 
   3240            .          .           		return 
   3241            .          .           	} 
   3242            .          .           	// Since we always have a P, the race in the "No M is available" 
   3243            .          .           	// comment in startm doesn't apply during the small window between the 
   3244            .          .           	// unlock here and lock in startm. A checkdead in between will always 
   3245            .          .           	// see at least one running M (ours). 
   3246            .       30ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3247            .          .            
   3248            .      2.77s           	startm(pp, true, false) 
   3249            .          .            
   3250            .          .           	releasem(mp) 
   3251            .          .           } 
   3252            .          .            
   3253            .          .           // Stops execution of the current m that is locked to a g until the g is runnable again. 

runtime.execute

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      240ms (flat, cum)   0.5%
   3345            .          .            
   3346            .          .           	// Assign gp.m before entering _Grunning so running Gs have an M. 
   3347            .          .           	mp.curg = gp 
   3348            .          .           	gp.m = mp 
   3349            .          .           	gp.syncSafePoint = false // Clear the flag, which may have been set by morestack. 
   3350            .      200ms           	casgstatus(gp, _Grunnable, _Grunning) 
   3351            .          .           	gp.waitsince = 0 
   3352            .          .           	gp.preempt = false 
   3353            .          .           	gp.stackguard0 = gp.stack.lo + stackGuard 
   3354            .          .           	if !inheritTime { 
   3355            .          .           		mp.p.ptr().schedtick++ 
   3356            .          .           	} 
   3357            .          .            
   3358            .          .           	// Check whether the profiler needs to be turned on or off. 
   3359            .          .           	hz := sched.profilehz 
   3360         10ms       10ms           	if mp.profilehz != hz { 
   3361            .          .           		setThreadCPUProfiler(hz) 
   3362            .          .           	} 
   3363            .          .            
   3364         10ms       10ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188
                                                          return trace.enabled                                             traceruntime.go:151

   3365            .          .           	if trace.ok() { 
   3366            .          .           		trace.GoStart() 
   3367            .          .           		traceRelease(trace) 
   3368            .          .           	} 
   3369            .          .            
   3370         20ms       20ms           	gogo(&gp.sched) 
   3371            .          .           } 
   3372            .          .            
   3373            .          .           // Finds a runnable goroutine to execute. 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      160ms (flat, cum)  0.33%
   3374            .          .           // Tries to steal from other P's, get g from local or global queue, poll network. 
   3375            .          .           // tryWakeP indicates that the returned goroutine is not normal (GC worker, trace 
   3376            .          .           // reader) so the caller should try to wake a P. 
   3377         10ms       10ms           func findRunnable() (gp *g, inheritTime, tryWakeP bool) { 
   3378            .          .           	mp := getg().m 
   3379            .          .            
   3380            .          .           	// The conditions here and in handoffp must agree: if 
   3381            .          .           	// findrunnable would return a G to run, handoffp must start 
   3382            .          .           	// an M. 
   3383            .          .            
   3384            .          .           top: 
   3385            .          .           	// We may have collected an allp snapshot below. The snapshot is only 
   3386            .          .           	// required in each loop iteration. Clear it to all GC to collect the 
   3387            .          .           	// slice. 
   3388            .       40ms           	mp.clearAllpSnapshot() 
   3389            .          .            
   3390         10ms       10ms           	pp := mp.p.ptr() 
   3391         20ms       20ms           	if sched.gcwaiting.Load() {                                                       return b.u.Load() != 0                                               types.go:168
                                                          return Load8(&u.value)                                           types.go:124
   3392            .          .           		gcstopm() 
   3393            .          .           		goto top 
   3394            .          .           	} 
   3395            .          .           	if pp.runSafePointFn != 0 { 
   3396            .          .           		runSafePointFn() 
   3397            .          .           	} 
   3398            .          .            
   3399            .          .           	// now and pollUntil are saved for work stealing later, 
   3400            .          .           	// which may steal timers. It's important that between now 
   3401            .          .           	// and then, nothing blocks, so these numbers remain mostly 
   3402            .          .           	// relevant. 
   3403            .       40ms           	now, pollUntil, _ := pp.timers.check(0, nil) 
   3404            .          .            
   3405            .          .           	// Try to schedule the trace reader. 
   3406         10ms       10ms           	if traceEnabled() || traceShuttingDown() {                                                       return trace.enabled                                                 traceruntime.go:151

   3407            .          .           		gp := traceReader() 
   3408            .          .           		if gp != nil { 
   3409            .          .           			trace := traceAcquire() 
   3410            .          .           			casgstatus(gp, _Gwaiting, _Grunnable) 
   3411            .          .           			if trace.ok() { 
   3412            .          .           				trace.GoUnpark(gp, 0) 
   3413            .          .           				traceRelease(trace) 
   3414            .          .           			} 
   3415            .          .           			return gp, false, true 
   3416            .          .           		} 
   3417            .          .           	} 
   3418            .          .            
   3419            .          .           	// Try to schedule a GC worker. 
   3420         30ms       30ms           	if gcBlackenEnabled != 0 { 
   3421            .          .           		gp, tnow := gcController.findRunnableGCWorker(pp, now) 
   3422            .          .           		if gp != nil { 
   3423            .          .           			return gp, false, true 
   3424            .          .           		} 
   3425            .          .           		now = tnow 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:       190ms      210ms (flat, cum)  0.44%
   3443            .          .           			ready(gp, 0, true) 
   3444            .          .           		} 
   3445            .          .           	} 
   3446            .          .            
   3447            .          .           	// Wake up one or more cleanup Gs. 
   3448         20ms       20ms           	if gcCleanups.needsWake() {                                                       return q.workUnits.Load() > 0 && (q.asleep.Load() > 0 || q.ng.Load() < maxCleanupGs()) mcleanup.go:506

   3449            .          .           		gcCleanups.wake() 
   3450            .          .           	} 
   3451            .          .            
   3452         20ms       20ms           	if *cgo_yield != nil { 
   3453            .          .           		asmcgocall(*cgo_yield, nil) 
   3454            .          .           	} 
   3455            .          .            
   3456            .          .           	// local runq 
   3457        100ms      100ms           	if gp, inheritTime := runqget(pp); gp != nil {                                                       t := pp.runqtail                                                     proc.go:7190            h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers proc.go:7189            if next != 0 && pp.runnext.cas(next, 0) {                            proc.go:7184            if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume proc.go:7195

   3458            .          .           		return gp, inheritTime, false 
   3459            .          .           	} 
   3460            .          .            
   3461            .          .           	// global runq 
   3462         50ms       50ms           	if !sched.runq.empty() { 
   3463            .       10ms           		lock(&sched.lock)                                                               lockWithRank(l, getLockRank(l))                              lock_spinbit.go:152
                                                                  lock2(l)                                                 lockrank_off.go:24

   3464            .          .           		gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2) 
   3465            .       10ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3466            .          .           		if gp != nil { 
   3467            .          .           			if runqputbatch(pp, &q); !q.empty() { 
   3468            .          .           				throw("Couldn't put Gs into empty local runq") 
   3469            .          .           			} 
   3470            .          .           			return gp, false, false 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      1.63s (flat, cum)  3.41%
   3502            .          .           	// Limit the number of spinning Ms to half the number of busy Ps. 
   3503            .          .           	// This is necessary to prevent excessive CPU consumption when 
   3504            .          .           	// GOMAXPROCS>>1 but the program parallelism is low. 
   3505            .          .           	if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() { 
   3506            .          .           		if !mp.spinning { 
   3507         80ms       80ms           			mp.becomeSpinning()                                                                       sched.nmspinning.Add(1)                              proc.go:1072
                                                                          return Xaddint32(&i.value, delta)                types.go:56

   3508            .          .           		} 
   3509            .          .            
   3510            .      1.55s           		gp, inheritTime, tnow, w, newWork := stealWork(now) 
   3511            .          .           		if gp != nil { 
   3512            .          .           			// Successfully stole. 
   3513            .          .           			return gp, inheritTime, false 
   3514            .          .           		} 
   3515            .          .           		if newWork { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   3527            .          .            
   3528            .          .           	// We have nothing to do. 
   3529            .          .           	// 
   3530            .          .           	// If we're in the GC mark phase, can safely scan and blacken objects, 
   3531            .          .           	// and have work to do, run idle-time marking rather than give up the P. 
   3532         20ms       20ms           	if gcBlackenEnabled != 0 && gcMarkWorkAvailable(pp) && gcController.addIdleMarkWorker() { 
   3533            .          .           		node := (*gcBgMarkWorkerNode)(gcBgMarkWorkerPool.pop()) 
   3534            .          .           		if node != nil { 
   3535            .          .           			pp.gcMarkWorkerMode = gcMarkWorkerIdleMode 
   3536            .          .           			gp := node.gp.ptr() 
   3537            .          .            

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:           0      120ms (flat, cum)  0.25%
   3570            .          .           	// everything up to cap(allp) is immutable. 
   3571            .          .           	// 
   3572            .          .           	// We clear the snapshot from the M after return via 
   3573            .          .           	// mp.clearAllpSnapshop (in schedule) and on each iteration of the top 
   3574            .          .           	// loop. 
   3575            .       20ms           	allpSnapshot := mp.snapshotAllp() 
   3576            .          .           	// Also snapshot masks. Value changes are OK, but we can't allow 
   3577            .          .           	// len to change out from under us. 
   3578            .          .           	idlepMaskSnapshot := idlepMask 
   3579            .          .           	timerpMaskSnapshot := timerpMask 
   3580            .          .            
   3581            .          .           	// return P and block 
   3582            .      100ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3583            .          .           	if sched.gcwaiting.Load() || pp.runSafePointFn != 0 { 
   3584            .          .           		unlock(&sched.lock) 
   3585            .          .           		goto top 
   3586            .          .           	} 
   3587            .          .           	if !sched.runq.empty() { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:           0      280ms (flat, cum)  0.59%
   3599            .          .           		// See "Delicate dance" comment below. 
   3600            .          .           		mp.becomeSpinning() 
   3601            .          .           		unlock(&sched.lock) 
   3602            .          .           		goto top 
   3603            .          .           	} 
   3604            .       10ms           	if releasep() != pp { 
   3605            .          .           		throw("findrunnable: wrong p") 
   3606            .          .           	} 
   3607            .      190ms           	now = pidleput(pp, now) 
   3608            .       80ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3609            .          .            
   3610            .          .           	// Delicate dance: thread transitions from spinning to non-spinning 
   3611            .          .           	// state, potentially concurrently with submission of new work. We must 
   3612            .          .           	// drop nmspinning first and then check all sources again (with 
   3613            .          .           	// #StoreLoad memory barrier in between). If we do it the other way 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      100ms (flat, cum)  0.21%
   3644            .          .           	// Also see "Worker thread parking/unparking" comment at the top of the 
   3645            .          .           	// file. 
   3646            .          .           	wasSpinning := mp.spinning 
   3647            .          .           	if mp.spinning { 
   3648            .          .           		mp.spinning = false 
   3649         10ms       10ms           		if sched.nmspinning.Add(-1) < 0 {                                                               return Xaddint32(&i.value, delta)                            types.go:56

   3650            .          .           			throw("findrunnable: negative nmspinning") 
   3651            .          .           		} 
   3652            .          .            
   3653            .          .           		// Note the for correctness, only the last M transitioning from 
   3654            .          .           		// spinning to non-spinning must perform these rechecks to 
   3655            .          .           		// ensure no missed work. However, the runtime has some cases 
   3656            .          .           		// of transient increments of nmspinning that are decremented 
   3657            .          .           		// without going through this path, so we must be conservative 
   3658            .          .           		// and perform the check on all spinning Ms. 
   3659            .          .           		// 
   3660            .          .           		// See https://go.dev/issue/43997. 
   3661            .          .            
   3662            .          .           		// Check global and P runqueues again. 
   3663            .          .            
   3664            .       30ms           		lock(&sched.lock)                                                               lockWithRank(l, getLockRank(l))                              lock_spinbit.go:152
                                                                  lock2(l)                                                 lockrank_off.go:24

   3665            .          .           		if !sched.runq.empty() { 
   3666            .          .           			pp, _ := pidlegetSpinning(0) 
   3667            .          .           			if pp != nil { 
   3668            .          .           				gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2) 
   3669            .          .           				unlock(&sched.lock) 
   3670            .          .           				if gp == nil { 
   3671            .          .           					throw("global runq empty with non-zero runqsize") 
   3672            .          .           				} 
   3673            .          .           				if runqputbatch(pp, &q); !q.empty() { 
   3674            .          .           					throw("Couldn't put Gs into empty local runq") 
   3675            .          .           				} 
   3676            .          .           				acquirep(pp) 
   3677            .          .           				mp.becomeSpinning() 
   3678            .          .           				return gp, false, false 
   3679            .          .           			} 
   3680            .          .           		} 
   3681            .          .           		unlock(&sched.lock) 
   3682            .          .            
   3683            .       40ms           		pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot) 
   3684            .          .           		if pp != nil { 
   3685            .       10ms           			acquirep(pp) 
   3686            .          .           			mp.becomeSpinning() 
   3687            .          .           			goto top 
   3688            .          .           		} 
   3689            .          .            
   3690            .          .           		// Check for idle-priority GC work again. 
   3691            .       10ms           		pp, gp := checkIdleGCNoP() 
   3692            .          .           		if pp != nil { 
   3693            .          .           			acquirep(pp) 
   3694            .          .           			mp.becomeSpinning() 
   3695            .          .            
   3696            .          .           			// Run the idle worker. 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:       110ms      160ms (flat, cum)  0.33%
   3708            .          .           		// transitioning from spinning to non-spinning. 
   3709            .          .           		// 
   3710            .          .           		// Note that we cannot use checkTimers here because it calls 
   3711            .          .           		// adjusttimers which may need to allocate memory, and that isn't 
   3712            .          .           		// allowed when we don't have an active P. 
   3713         10ms       60ms           		pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil) 
   3714            .          .           	} 
   3715            .          .            
   3716            .          .           	// We don't need allp anymore at this pointer, but can't clear the 
   3717            .          .           	// snapshot without a P for the write barrier.. 
   3718            .          .            
   3719            .          .           	// Poll network until next timer. 
   3720        100ms      100ms           	if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 {                                                       return netpollWaiters.Load() > 0                                     netpoll.go:678
                                                          return Load(&u.value)                                            types.go:194
                                     ⋮
                                     ⋮

   3721            .          .           		sched.pollUntil.Store(pollUntil) 
   3722            .          .           		if mp.p != 0 { 
   3723            .          .           			throw("findrunnable: netpoll with p") 
   3724            .          .           		} 
   3725            .          .           		if mp.spinning { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      1.34s (flat, cum)  2.81%
   3775            .          .           			} 
   3776            .          .           			goto top 
   3777            .          .           		} 
   3778            .          .           	} else if pollUntil != 0 && netpollinited() { 
   3779            .          .           		pollerPollUntil := sched.pollUntil.Load() 
   3780         10ms       10ms           		if pollerPollUntil == 0 || pollerPollUntil > pollUntil { 
   3781            .          .           			netpollBreak() 
   3782            .          .           		} 
   3783            .          .           	} 
   3784            .      1.33s           	stopm() 
   3785            .          .           	goto top 
   3786            .          .           } 
   3787            .          .            
   3788            .          .           // pollWork reports whether there is non-background work this P could 
   3789            .          .           // be doing. This is a fairly lightweight check to be used for 

runtime.stealWork

/usr/lib/go/src/runtime/proc.go

  Total:       440ms      570ms (flat, cum)  1.19%
   3817            .          .           	pp := getg().m.p.ptr() 
   3818            .          .            
   3819            .          .           	ranTimer := false 
   3820            .          .            
   3821            .          .           	const stealTries = 4 
   3822         50ms       50ms           	for i := 0; i < stealTries; i++ { 
   3823         80ms       80ms           		stealTimersOrRunNextG := i == stealTries-1 
   3824            .          .            
   3825         90ms       90ms           		for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {                                                               enum.pos = (enum.pos + enum.inc) % enum.count                proc.go:7602                    return enum.i == enum.count                                  proc.go:7597                    enum.pos = (enum.pos + enum.inc) % enum.count                proc.go:7602                    mp := getg().m                                               rand.go:228                    enum.pos = (enum.pos + enum.inc) % enum.count                proc.go:7602
                                     ⋮
                                     ⋮
                                                              return uint32(hi ^ lo)                                       rand.go:237                    mp.cheaprand += 0xa0761d6478bd642f                           rand.go:235

   3826         40ms       40ms           			if sched.gcwaiting.Load() {                                                                       return b.u.Load() != 0                               types.go:168
                                                                          return Load8(&u.value)                           types.go:124
                                     ⋮
                                     ⋮

   3827            .          .           				// GC work may be available. 
   3828            .          .           				return nil, false, now, pollUntil, true 
   3829            .          .           			} 
   3830         80ms       80ms           			p2 := allp[enum.position()] 
   3831         80ms       80ms           			if pp == p2 { 
   3832            .          .           				continue 
   3833            .          .           			} 
   3834            .          .            
   3835            .          .           			// Steal timers from p2. This call to checkTimers is the only place 
   3836            .          .           			// where we might hold a lock on a different P's timers. We do this 
   3837            .          .           			// once on the last pass before checking runnext because stealing 
   3838            .          .           			// from the other P's runnext should be the last resort, so if there 
   3839            .          .           			// are timers to steal do that first. 
   3840            .          .           			// 
   3841            .          .           			// We only check timers on one of the stealing iterations because 
   3842            .          .           			// the time stored in now doesn't change in this loop and checking 
   3843            .          .           			// the timers for each P more than once with the same value of now 
   3844            .          .           			// is probably a waste of time. 
   3845            .          .           			// 
   3846            .          .           			// timerpMask tells us whether the P may have timers at all. If it 
   3847            .          .           			// can't, no need to check at all. 
   3848         20ms       20ms           			if stealTimersOrRunNextG && timerpMask.read(enum.position()) {                                                                       return (atomic.Load(&p[word]) & mask) != 0           proc.go:6925                            mask := uint32(1) << (id % 32)                       proc.go:6924

   3849            .      130ms           				tnow, w, ran := p2.timers.check(now, nil) 
   3850            .          .           				now = tnow 
   3851            .          .           				if w != 0 && (pollUntil == 0 || w < pollUntil) { 
   3852            .          .           					pollUntil = w 
   3853            .          .           				} 
   3854            .          .           				if ran { 

runtime.stealWork

/usr/lib/go/src/runtime/proc.go

  Total:       280ms      980ms (flat, cum)  2.05%
   3866            .          .           					ranTimer = true 
   3867            .          .           				} 
   3868            .          .           			} 
   3869            .          .            
   3870            .          .           			// Don't bother to attempt to steal if p2 is idle. 
   3871        210ms      210ms           			if !idlepMask.read(enum.position()) {                                                                       return (atomic.Load(&p[word]) & mask) != 0           proc.go:6925
                                     ⋮
                                     ⋮
                                                                      mask := uint32(1) << (id % 32)                       proc.go:6924                            return (atomic.Load(&p[word]) & mask) != 0           proc.go:6925

   3872         10ms      710ms           				if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil { 
   3873         60ms       60ms           					return gp, false, now, pollUntil, ranTimer 
   3874            .          .           				} 
   3875            .          .           			} 
   3876            .          .           		} 
   3877            .          .           	} 
   3878            .          .            

runtime.checkRunqsNoP

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       40ms (flat, cum) 0.084%
   3887            .          .           // On entry we have no P. If a G is available to steal and a P is available, 
   3888            .          .           // the P is returned which the caller should acquire and attempt to steal the 
   3889            .          .           // work to. 
   3890            .          .           func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p { 
   3891            .          .           	for id, p2 := range allpSnapshot { 
   3892         30ms       30ms           		if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {                                                               head := atomic.Load(&pp.runqhead)                            proc.go:7033                    if tail == atomic.Load(&pp.runqtail) {                       proc.go:7036                    head := atomic.Load(&pp.runqhead)                            proc.go:7033

   3893            .          .           			lock(&sched.lock) 
   3894            .       10ms           			pp, _ := pidlegetSpinning(0) 
   3895            .          .           			if pp == nil { 
   3896            .          .           				// Can't get a P, don't bother checking remaining Ps. 
   3897            .          .           				unlock(&sched.lock) 
   3898            .          .           				return nil 
   3899            .          .           			} 

runtime.checkTimersNoP

/usr/lib/go/src/runtime/proc.go

  Total:        50ms       50ms (flat, cum)   0.1%
   3909            .          .           // Check all Ps for a timer expiring sooner than pollUntil. 
   3910            .          .           // 
   3911            .          .           // Returns updated pollUntil value. 
   3912            .          .           func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 { 
   3913            .          .           	for id, p2 := range allpSnapshot { 
   3914         10ms       10ms           		if timerpMaskSnapshot.read(uint32(id)) {                                                               return (atomic.Load(&p[word]) & mask) != 0                   proc.go:6925

   3915         10ms       10ms           			w := p2.timers.wakeTime()                                                                       when := ts.minWhenHeap.Load()                        time.go:989

   3916         10ms       10ms           			if w != 0 && (pollUntil == 0 || w < pollUntil) { 
   3917            .          .           				pollUntil = w 
   3918            .          .           			} 
   3919            .          .           		} 
   3920            .          .           	} 
   3921            .          .            
   3922         20ms       20ms           	return pollUntil 
   3923            .          .           } 
   3924            .          .            
   3925            .          .           // Check for idle-priority GC, without a P on entry. 

runtime.checkIdleGCNoP

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   3926            .          .           // 
   3927            .          .           // If some GC work, a P, and a worker G are all available, the P and G will be 
   3928            .          .           // returned. The returned P has not been wired yet. 
   3929         10ms       10ms           func checkIdleGCNoP() (*p, *g) { 
   3930            .          .           	// N.B. Since we have no P, gcBlackenEnabled may change at any time; we 
   3931            .          .           	// must check again after acquiring a P. As an optimization, we also check 
   3932            .          .           	// if an idle mark worker is needed at all. This is OK here, because if we 
   3933            .          .           	// observe that one isn't needed, at least one is currently running. Even if 
   3934            .          .           	// it stops running, its own journey into the scheduler should schedule it 

runtime.resetspinning

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      2.08s (flat, cum)  4.35%
   4010            .          .           	gp := getg() 
   4011            .          .           	if !gp.m.spinning { 
   4012            .          .           		throw("resetspinning: not a spinning m") 
   4013            .          .           	} 
   4014            .          .           	gp.m.spinning = false 
   4015         50ms       50ms           	nmspinning := sched.nmspinning.Add(-1)                                                       return Xaddint32(&i.value, delta)                                    types.go:56

   4016            .          .           	if nmspinning < 0 { 
   4017            .          .           		throw("findrunnable: negative nmspinning") 
   4018            .          .           	} 
   4019            .          .           	// M wakeup policy is deliberately somewhat conservative, so check if we 
   4020            .          .           	// need to wakeup another P here. See "Worker thread parking/unparking" 
   4021            .          .           	// comment at the top of the file for details. 
   4022            .      2.03s           	wakep() 
   4023            .          .           } 
   4024            .          .            
   4025            .          .           // injectglist adds each runnable G on the list to some run queue, 
   4026            .          .           // and clears glist. If there is no current P, they are added to the 
   4027            .          .           // global queue, and up to npidle M's are started to run them. 

runtime.schedule

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      4.06s (flat, cum)  8.50%
   4118            .          .           	wakep() 
   4119            .          .           } 
   4120            .          .            
   4121            .          .           // One round of scheduler: find a runnable goroutine and execute it. 
   4122            .          .           // Never returns. 
   4123         10ms       10ms           func schedule() { 
   4124            .          .           	mp := getg().m 
   4125            .          .            
   4126            .          .           	if mp.locks != 0 { 
   4127            .          .           		throw("schedule: holding locks") 
   4128            .          .           	} 
   4129            .          .            
   4130            .          .           	if mp.lockedg != 0 { 
   4131            .          .           		stoplockedm() 
   4132            .          .           		execute(mp.lockedg.ptr(), false) // Never returns. 
   4133            .          .           	} 
   4134            .          .            
   4135            .          .           	// We should not schedule away from a g that is executing a cgo call, 
   4136            .          .           	// since the cgo call is using the m's g0 stack. 
   4137         10ms       10ms           	if mp.incgo { 
   4138            .          .           		throw("schedule: in cgo") 
   4139            .          .           	} 
   4140            .          .            
   4141            .          .           top: 
   4142            .          .           	pp := mp.p.ptr() 
   4143            .          .           	pp.preempt = false 
   4144            .          .            
   4145            .          .           	// Safety check: if we are spinning, the run queue should be empty. 
   4146            .          .           	// Check this before calling checkTimers, as that might call 
   4147            .          .           	// goready to put a ready goroutine on the local run queue. 
   4148            .          .           	if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { 
   4149            .          .           		throw("schedule: spinning with local work") 
   4150            .          .           	} 
   4151            .          .            
   4152         20ms      4.04s           	gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available 
   4153            .          .            
   4154            .          .           	// findRunnable may have collected an allp snapshot. The snapshot is 
   4155            .          .           	// only required within findRunnable. Clear it to all GC to collect the 
   4156            .          .           	// slice. 
   4157            .          .           	mp.clearAllpSnapshot() 

runtime.schedule

/usr/lib/go/src/runtime/proc.go

  Total:        20ms      2.10s (flat, cum)  4.40%
   4170            .          .            
   4171            .          .           	// This thread is going to run a goroutine and is not spinning anymore, 
   4172            .          .           	// so if it was marked as spinning we need to reset it now and potentially 
   4173            .          .           	// start a new spinning M. 
   4174            .          .           	if mp.spinning { 
   4175            .      2.08s           		resetspinning() 
   4176            .          .           	} 
   4177            .          .            
   4178         20ms       20ms           	if sched.disable.user && !schedEnabled(gp) { 
   4179            .          .           		// Scheduling of this goroutine is disabled. Put it on 
   4180            .          .           		// the list of pending runnable goroutines for when we 
   4181            .          .           		// re-enable user scheduling and look again. 
   4182            .          .           		lock(&sched.lock) 
   4183            .          .           		if schedEnabled(gp) { 

runtime.schedule

/usr/lib/go/src/runtime/proc.go

  Total:        70ms      310ms (flat, cum)  0.65%
   4194            .          .           	// If about to schedule a not-normal goroutine (a GCworker or tracereader), 
   4195            .          .           	// wake a P if there is one. 
   4196            .          .           	if tryWakeP { 
   4197            .          .           		wakep() 
   4198            .          .           	} 
   4199         70ms       70ms           	if gp.lockedm != 0 { 
   4200            .          .           		// Hands off own p to the locked m, 
   4201            .          .           		// then blocks waiting for a new p. 
   4202            .          .           		startlockedm(gp) 
   4203            .          .           		goto top 
   4204            .          .           	} 
   4205            .          .            
   4206            .      240ms           	execute(gp, inheritTime) 
   4207            .          .           } 
   4208            .          .            
   4209            .          .           // dropg removes the association between m and the current goroutine m->curg (gp for short). 
   4210            .          .           // Typically a caller sets gp's status away from Grunning and then 
   4211            .          .           // immediately calls dropg to finish the job. The caller is also responsible 

runtime.dropg

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4214            .          .           // readied later, the caller can do other work but eventually should 
   4215            .          .           // call schedule to restart the scheduling of goroutines on this m. 
   4216            .          .           func dropg() { 
   4217            .          .           	gp := getg() 
   4218            .          .            
   4219         10ms       10ms           	setMNoWB(&gp.m.curg.m, nil)                                                       (*muintptr)(unsafe.Pointer(mp)).set(new)                             runtime2.go:294
                                                          func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } runtime2.go:286

   4220            .          .           	setGNoWB(&gp.m.curg, nil) 
   4221            .          .           } 
   4222            .          .            
   4223            .          .           func parkunlock_c(gp *g, lock unsafe.Pointer) bool { 
   4224            .          .           	unlock((*mutex)(lock)) 

runtime.park_m

/usr/lib/go/src/runtime/proc.go

  Total:           0      100ms (flat, cum)  0.21%
   4246            .          .           		// transition anymore. 
   4247            .          .           		trace.GoPark(mp.waitTraceBlockReason, mp.waitTraceSkip) 
   4248            .          .           	} 
   4249            .          .           	// N.B. Not using casGToWaiting here because the waitreason is 
   4250            .          .           	// set by park_m's caller. 
   4251            .       20ms           	casgstatus(gp, _Grunning, _Gwaiting) 
   4252            .          .           	if trace.ok() { 
   4253            .          .           		traceRelease(trace) 
   4254            .          .           	} 
   4255            .          .            
   4256            .          .           	dropg() 
   4257            .          .            
   4258            .          .           	if fn := mp.waitunlockf; fn != nil { 
   4259            .       80ms           		ok := fn(gp, mp.waitlock) 
   4260            .          .           		mp.waitunlockf = nil 
   4261            .          .           		mp.waitlock = nil 
   4262            .          .           		if !ok { 
   4263            .          .           			trace := traceAcquire() 
   4264            .          .           			casgstatus(gp, _Gwaiting, _Grunnable) 

runtime.park_m

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.08s (flat, cum)  2.26%
   4275            .          .            
   4276            .          .           	if bubble != nil { 
   4277            .          .           		bubble.decActive() 
   4278            .          .           	} 
   4279            .          .            
   4280            .      1.08s           	schedule() 
   4281            .          .           } 
   4282            .          .            
   4283            .          .           func goschedImpl(gp *g, preempted bool) { 
   4284            .          .           	trace := traceAcquire() 
   4285            .          .           	status := readgstatus(gp) 

runtime.goschedImpl

/usr/lib/go/src/runtime/proc.go

  Total:           0      430ms (flat, cum)   0.9%
   4303            .          .           	} 
   4304            .          .            
   4305            .          .           	dropg() 
   4306            .          .           	lock(&sched.lock) 
   4307            .          .           	globrunqput(gp) 
   4308            .       10ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   4309            .          .            
   4310            .          .           	if mainStarted { 
   4311            .      200ms           		wakep() 
   4312            .          .           	} 
   4313            .          .            
   4314            .      220ms           	schedule() 
   4315            .          .           } 
   4316            .          .            

runtime.gosched_m

/usr/lib/go/src/runtime/proc.go

  Total:           0      430ms (flat, cum)   0.9%
   4317            .          .           // Gosched continuation on g0. 
   4318            .          .           func gosched_m(gp *g) { 
   4319            .      430ms           	goschedImpl(gp, false) 
   4320            .          .           } 
   4321            .          .            
   4322            .          .           // goschedguarded is a forbidden-states-avoided version of gosched_m. 
   4323            .          .           func goschedguarded_m(gp *g) { 
   4324            .          .           	if !canPreemptM(gp.m) { 

runtime.goexit1

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4426            .          .           	runqput(pp, gp, false) 
   4427            .          .           	schedule() 
   4428            .          .           } 
   4429            .          .            
   4430            .          .           // Finishes execution of the current goroutine. 
   4431         10ms       10ms           func goexit1() { 
   4432            .          .           	if raceenabled { 
   4433            .          .           		if gp := getg(); gp.bubble != nil { 
   4434            .          .           			racereleasemergeg(gp, gp.bubble.raceaddr()) 
   4435            .          .           		} 
   4436            .          .           		racegoend() 

runtime.goexit0

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      5.50s (flat, cum) 11.51%
   4442            .          .           	} 
   4443            .          .           	mcall(goexit0) 
   4444            .          .           } 
   4445            .          .            
   4446            .          .           // goexit continuation on g0. 
   4447         10ms       10ms           func goexit0(gp *g) { 
   4448            .      320ms           	gdestroy(gp) 
   4449            .      5.17s           	schedule() 
   4450            .          .           } 
   4451            .          .            
   4452            .          .           func gdestroy(gp *g) { 

runtime.gdestroy

/usr/lib/go/src/runtime/proc.go

  Total:        20ms      170ms (flat, cum)  0.36%
   4453            .          .           	mp := getg().m 
   4454            .          .           	pp := mp.p.ptr() 
   4455            .          .            
   4456            .       10ms           	casgstatus(gp, _Grunning, _Gdead) 
   4457         20ms       20ms           	gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))                                                       c.maxStackScan.Add(pp.maxStackScanDelta)                             mgcpacer.go:924
                                                          return Xadd64(&u.value, delta)                                   types.go:344

   4458            .      140ms           	if isSystemGoroutine(gp, false) { 
   4459            .          .           		sched.ngsys.Add(-1) 
   4460            .          .           	} 
   4461            .          .           	gp.m = nil 
   4462            .          .           	locked := gp.lockedm != 0 
   4463            .          .           	gp.lockedm = 0 

runtime.gdestroy

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      150ms (flat, cum)  0.31%
   4481            .          .           		scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes)) 
   4482            .          .           		gcController.bgScanCredit.Add(scanCredit) 
   4483            .          .           		gp.gcAssistBytes = 0 
   4484            .          .           	} 
   4485            .          .            
   4486         10ms       10ms           	dropg()                                                       setMNoWB(&gp.m.curg.m, nil)                                          proc.go:4219
                                                          (*muintptr)(unsafe.Pointer(mp)).set(new)                         runtime2.go:294
                                                              func (mp *muintptr) set(m *m) { *mp = muintptr(unsafe.Pointer(m)) } runtime2.go:286

   4487            .          .            
   4488            .          .           	if GOARCH == "wasm" { // no threads yet on wasm 
   4489            .          .           		gfput(pp, gp) 
   4490            .          .           		return 
   4491            .          .           	} 
   4492            .          .            
   4493            .          .           	if locked && mp.lockedInt != 0 { 
   4494            .          .           		print("runtime: mp.lockedInt = ", mp.lockedInt, "\n") 
   4495            .          .           		if mp.isextra { 
   4496            .          .           			throw("runtime.Goexit called in a thread that was not created by the Go runtime") 
   4497            .          .           		} 
   4498            .          .           		throw("exited a goroutine internally locked to the OS thread") 
   4499            .          .           	} 
   4500            .      140ms           	gfput(pp, gp) 
   4501            .          .           	if locked { 
   4502            .          .           		// The goroutine may have locked this thread because 
   4503            .          .           		// it put it in an unusual kernel state. Kill it 
   4504            .          .           		// rather than returning it to the thread pool. 
   4505            .          .            

runtime.save

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4535            .          .           		throw("save on system g not allowed") 
   4536            .          .           	} 
   4537            .          .            
   4538            .          .           	gp.sched.pc = pc 
   4539            .          .           	gp.sched.sp = sp 
   4540         10ms       10ms           	gp.sched.lr = 0 
   4541            .          .           	gp.sched.bp = bp 
   4542            .          .           	// We need to ensure ctxt is zero, but can't have a write 
   4543            .          .           	// barrier here. However, it should always already be zero. 
   4544            .          .           	// Assert that. 
   4545            .          .           	if gp.sched.ctxt != nil { 

runtime.reentersyscall

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       90ms (flat, cum)  0.19%
   4570            .          .           // must always point to a valid stack frame. entersyscall below is the normal 
   4571            .          .           // entry point for syscalls, which obtains the SP and PC from the caller. 
   4572            .          .           // 
   4573            .          .           //go:nosplit 
   4574            .          .           func reentersyscall(pc, sp, bp uintptr) { 
   4575         10ms       10ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188

   4576            .          .           	gp := getg() 
   4577            .          .            
   4578            .          .           	// Disable preemption because during this function g is in Gsyscall status, 
   4579            .          .           	// but can have inconsistent g->sched, do not let GC observe it. 
   4580            .          .           	gp.m.locks++ 
   4581            .          .            
   4582            .          .           	// Entersyscall must not call any function that might split/grow the stack. 
   4583            .          .           	// (See details in comment above.) 
   4584            .          .           	// Catch calls that might, by replacing the stack guard with something that 
   4585            .          .           	// will trip any stack check and leaving a flag to tell newstack to die. 
   4586            .          .           	gp.stackguard0 = stackPreempt 
   4587            .          .           	gp.throwsplit = true 
   4588            .          .            
   4589            .          .           	// Leave SP around for GC and traceback. 
   4590            .       10ms           	save(pc, sp, bp) 
   4591            .          .           	gp.syscallsp = sp 
   4592            .          .           	gp.syscallpc = pc 
   4593            .          .           	gp.syscallbp = bp 
   4594            .       70ms           	casgstatus(gp, _Grunning, _Gsyscall) 
   4595            .          .           	if staticLockRanking { 
   4596            .          .           		// When doing static lock ranking casgstatus can call 
   4597            .          .           		// systemstack which clobbers g.sched. 
   4598            .          .           		save(pc, sp, bp) 
   4599            .          .           	} 

runtime.reentersyscall

/usr/lib/go/src/runtime/proc.go

  Total:        90ms       90ms (flat, cum)  0.19%
   4624            .          .           	if sched.sysmonwait.Load() { 
   4625            .          .           		systemstack(entersyscall_sysmon) 
   4626            .          .           		save(pc, sp, bp) 
   4627            .          .           	} 
   4628            .          .            
   4629         10ms       10ms           	if gp.m.p.ptr().runSafePointFn != 0 { 
   4630            .          .           		// runSafePointFn may stack split if run on this stack 
   4631            .          .           		systemstack(runSafePointFn) 
   4632            .          .           		save(pc, sp, bp) 
   4633            .          .           	} 
   4634            .          .            
   4635            .          .           	gp.m.syscalltick = gp.m.p.ptr().syscalltick 
   4636            .          .           	pp := gp.m.p.ptr() 
   4637            .          .           	pp.m = 0 
   4638            .          .           	gp.m.oldp.set(pp) 
   4639            .          .           	gp.m.p = 0 
   4640            .          .           	atomic.Store(&pp.status, _Psyscall) 
   4641         80ms       80ms           	if sched.gcwaiting.Load() {                                                       return b.u.Load() != 0                                               types.go:168

   4642            .          .           		systemstack(entersyscall_gcwait) 
   4643            .          .           		save(pc, sp, bp) 
   4644            .          .           	} 
   4645            .          .            
   4646            .          .           	gp.m.locks-- 

runtime.entersyscall

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      230ms (flat, cum)  0.48%
   4663            .          .           func entersyscall() { 
   4664            .          .           	// N.B. getcallerfp cannot be written directly as argument in the call 
   4665            .          .           	// to reentersyscall because it forces spilling the other arguments to 
   4666            .          .           	// the stack. This results in exceeding the nosplit stack requirements 
   4667            .          .           	// on some platforms. 
   4668            .       40ms           	fp := getcallerfp() 
   4669            .      180ms           	reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp) 
   4670         10ms       10ms           } 
   4671            .          .            
   4672            .          .           func entersyscall_sysmon() { 
   4673            .          .           	lock(&sched.lock) 
   4674            .          .           	if sched.sysmonwait.Load() { 
   4675            .          .           		sched.sysmonwait.Store(false) 

runtime.exitsyscall

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      200ms (flat, cum)  0.42%
   4799            .          .           //go:nowritebarrierrec 
   4800            .          .           //go:linkname exitsyscall 
   4801            .          .           func exitsyscall() { 
   4802            .          .           	gp := getg() 
   4803            .          .            
   4804         20ms       20ms           	gp.m.locks++ // see comment in entersyscall 
   4805            .          .           	if sys.GetCallerSP() > gp.syscallsp { 
   4806            .          .           		throw("exitsyscall: syscall frame is no longer valid") 
   4807            .          .           	} 
   4808            .          .            
   4809            .          .           	gp.waitsince = 0 
   4810            .          .           	oldp := gp.m.oldp.ptr() 
   4811            .          .           	gp.m.oldp = 0 
   4812            .      170ms           	if exitsyscallfast(oldp) { 
   4813            .          .           		// When exitsyscallfast returns success, we have a P so can now use 
   4814            .          .           		// write barriers 
   4815         10ms       10ms           		if goroutineProfile.active { 
   4816            .          .           			// Make sure that gp has had its stack written out to the goroutine 
   4817            .          .           			// profile, exactly as it was when the goroutine profiler first 
   4818            .          .           			// stopped the world. 
   4819            .          .           			systemstack(func() { 
   4820            .          .           				tryRecordGoroutineProfileWB(gp) 

runtime.exitsyscall

/usr/lib/go/src/runtime/proc.go

  Total:        70ms      280ms (flat, cum)  0.59%
   4839            .          .           			}) 
   4840            .          .           		} 
   4841            .          .           		// There's a cpu for us, so we can run. 
   4842            .          .           		gp.m.p.ptr().syscalltick++ 
   4843            .          .           		// We need to cas the status and scan before resuming... 
   4844            .      210ms           		casgstatus(gp, _Gsyscall, _Grunning) 
   4845            .          .           		if trace.ok() { 
   4846            .          .           			traceRelease(trace) 
   4847            .          .           		} 
   4848            .          .            
   4849            .          .           		// Garbage collector isn't running (since we are), 
   4850            .          .           		// so okay to clear syscallsp. 
   4851            .          .           		gp.syscallsp = 0 
   4852         10ms       10ms           		gp.m.locks-- 
   4853            .          .           		if gp.preempt { 
   4854            .          .           			// restore the preemption request in case we've cleared it in newstack 
   4855            .          .           			gp.stackguard0 = stackPreempt 
   4856            .          .           		} else { 
   4857            .          .           			// otherwise restore the real stackGuard, we've spoiled it in entersyscall/entersyscallblock 
   4858         10ms       10ms           			gp.stackguard0 = gp.stack.lo + stackGuard 
   4859            .          .           		} 
   4860            .          .           		gp.throwsplit = false 
   4861            .          .            
   4862         40ms       40ms           		if sched.disable.user && !schedEnabled(gp) { 
   4863            .          .           			// Scheduling of this goroutine is disabled. 
   4864            .          .           			Gosched() 
   4865            .          .           		} 
   4866            .          .            
   4867         10ms       10ms           		return 
   4868            .          .           	} 
   4869            .          .            
   4870            .          .           	gp.m.locks-- 
   4871            .          .            
   4872            .          .           	// Call the scheduler. 

runtime.exitsyscallfast

/usr/lib/go/src/runtime/proc.go

  Total:       140ms      170ms (flat, cum)  0.36%
   4889            .          .           	if sched.stopwait == freezeStopWait { 
   4890            .          .           		return false 
   4891            .          .           	} 
   4892            .          .            
   4893            .          .           	// Try to re-acquire the last P. 
   4894         10ms       10ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188
                                                          return trace.enabled                                             traceruntime.go:151

   4895        130ms      130ms           	if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 
   4896            .          .           		// There's a cpu for us, so we can run. 
   4897            .       10ms           		wirep(oldp) 
   4898            .       20ms           		exitsyscallfast_reacquired(trace) 
   4899            .          .           		if trace.ok() { 
   4900            .          .           			traceRelease(trace) 
   4901            .          .           		} 
   4902            .          .           		return true 
   4903            .          .           	} 

runtime.exitsyscallfast_reacquired

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   4923            .          .           // syscall. 
   4924            .          .           // 
   4925            .          .           //go:nosplit 
   4926            .          .           func exitsyscallfast_reacquired(trace traceLocker) { 
   4927            .          .           	gp := getg() 
   4928         10ms       10ms           	if gp.m.syscalltick != gp.m.p.ptr().syscalltick { 
   4929            .          .           		if trace.ok() { 
   4930            .          .           			// The p was retaken and then enter into syscall again (since gp.m.syscalltick has changed). 
   4931            .          .           			// tracev2.GoSysBlock for this syscall was already emitted, 
   4932            .          .           			// but here we effectively retake the p from the new syscall running on the same p. 
   4933            .          .           			systemstack(func() { 
   4934            .          .           				// We're stealing the P. It's treated 
   4935            .          .           				// as if it temporarily stopped running. Then, start running. 
   4936            .          .           				trace.ProcSteal(gp.m.p.ptr(), true) 
   4937            .          .           				trace.ProcStart() 
   4938            .          .           			}) 
   4939            .          .           		} 
   4940            .          .           		gp.m.p.ptr().syscalltick++ 
   4941            .          .           	} 
   4942         10ms       10ms           } 
   4943            .          .            
   4944            .          .           func exitsyscallfast_pidle() bool { 
   4945            .          .           	lock(&sched.lock) 
   4946            .          .           	pp, _ := pidleget(0) 
   4947            .          .           	if pp != nil && sched.sysmonwait.Load() { 

runtime.newproc

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      1.11s (flat, cum)  2.32%
   5156            .          .           // Put it on the queue of g's waiting to run. 
   5157            .          .           // The compiler turns a go statement into a call to this. 
   5158            .          .           func newproc(fn *funcval) { 
   5159            .          .           	gp := getg() 
   5160            .          .           	pc := sys.GetCallerPC() 
   5161         10ms      1.11s           	systemstack(func() { 

runtime.newproc.func1

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      1.07s (flat, cum)  2.24%
   5162            .      710ms           		newg := newproc1(fn, gp, pc, false, waitReasonZero) 
   5163            .          .            
   5164         10ms       10ms           		pp := getg().m.p.ptr() 
   5165            .       40ms           		runqput(pp, newg, true) 
   5166            .          .            
   5167            .          .           		if mainStarted { 
   5168            .      310ms           			wakep() 
   5169            .          .           		} 
   5170            .          .           	}) 
   5171            .          .           } 

runtime.newproc1

/usr/lib/go/src/runtime/proc.go

  Total:       220ms      710ms (flat, cum)  1.49%
   5173            .          .           // Create a new g in state _Grunnable (or _Gwaiting if parked is true), starting at fn. 
   5174            .          .           // callerpc is the address of the go statement that created this. The caller is responsible 
   5175            .          .           // for adding the new g to the scheduler. If parked is true, waitreason must be non-zero. 
   5176         20ms       20ms           func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g { 
   5177         40ms       40ms           	if fn == nil { 
   5178            .          .           		fatal("go of nil func value") 
   5179            .          .           	} 
   5180            .          .            
   5181         10ms       10ms           	mp := acquirem() // disable preemption because we hold M and P in local vars.                                                       gp.m.locks++                                                         runtime1.go:630

   5182         10ms       10ms           	pp := mp.p.ptr() 
   5183            .      250ms           	newg := gfget(pp) 
   5184            .          .           	if newg == nil { 
   5185            .          .           		newg = malg(stackMin) 
   5186            .          .           		casgstatus(newg, _Gidle, _Gdead) 
   5187            .          .           		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 
   5188            .          .           	} 
   5189         10ms       10ms           	if newg.stack.hi == 0 { 
   5190            .          .           		throw("newproc1: newg missing stack") 
   5191            .          .           	} 
   5192            .          .            
   5193            .          .           	if readgstatus(newg) != _Gdead { 
   5194            .          .           		throw("newproc1: new g is not Gdead") 
   5195            .          .           	} 
   5196            .          .            
   5197            .          .           	totalSize := uintptr(4*goarch.PtrSize + sys.MinFrameSize) // extra space in case of reads slightly beyond frame 
   5198            .          .           	totalSize = alignUp(totalSize, sys.StackAlign) 
   5199            .          .           	sp := newg.stack.hi - totalSize 
   5200            .          .           	if usesLR { 
   5201            .          .           		// caller's LR 
   5202            .          .           		*(*uintptr)(unsafe.Pointer(sp)) = 0 
   5203            .          .           		prepGoExitFrame(sp) 
   5204            .          .           	} 
   5205            .          .           	if GOARCH == "arm64" { 
   5206            .          .           		// caller's FP 
   5207         30ms       30ms           		*(*uintptr)(unsafe.Pointer(sp - goarch.PtrSize)) = 0 
   5208            .          .           	} 
   5209            .          .            
   5210            .          .           	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 
   5211            .          .           	newg.sched.sp = sp 
   5212            .          .           	newg.stktopsp = sp 
   5213            .          .           	newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 
   5214            .          .           	newg.sched.g = guintptr(unsafe.Pointer(newg)) 
   5215            .       20ms           	gostartcallfn(&newg.sched, fn) 
   5216            .          .           	newg.parentGoid = callergp.goid 
   5217            .          .           	newg.gopc = callerpc 
   5218            .          .           	newg.ancestors = saveAncestors(callergp) 
   5219         10ms       10ms           	newg.startpc = fn.fn 
   5220            .          .           	newg.runningCleanups.Store(false) 
   5221            .      120ms           	if isSystemGoroutine(newg, false) { 
   5222            .          .           		sched.ngsys.Add(1) 
   5223            .          .           	} else { 
   5224            .          .           		// Only user goroutines inherit synctest groups and pprof labels. 
   5225            .          .           		newg.bubble = callergp.bubble 
   5226            .          .           		if mp.curg != nil { 
   5227            .          .           			newg.labels = mp.curg.labels 
   5228            .          .           		} 
   5229            .          .           		if goroutineProfile.active { 
   5230            .          .           			// A concurrent goroutine profile is running. It should include 
   5231            .          .           			// exactly the set of goroutines that were alive when the goroutine 
   5232            .          .           			// profiler first stopped the world. That does not include newg, so 
   5233            .          .           			// mark it as not needing a profile before transitioning it from 
   5234            .          .           			// _Gdead. 
   5235            .          .           			newg.goroutineProfiled.Store(goroutineProfileSatisfied) 
   5236            .          .           		} 
   5237            .          .           	} 
   5238            .          .           	// Track initial transition? 
   5239         10ms       10ms           	newg.trackingSeq = uint8(cheaprand())                                                       mp.cheaprand += 0xa0761d6478bd642f                                   rand.go:235

   5240            .          .           	if newg.trackingSeq%gTrackingPeriod == 0 { 
   5241            .          .           		newg.tracking = true 
   5242            .          .           	} 
   5243         70ms       70ms           	gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))                                                       c.maxStackScan.Add(pp.maxStackScanDelta)                             mgcpacer.go:924
                                                          return Xadd64(&u.value, delta)                                   types.go:344
                                     ⋮
                                     ⋮

   5244            .          .            
   5245            .          .           	// Get a goid and switch to runnable. Make all this atomic to the tracer. 
   5246            .          .           	trace := traceAcquire() 
   5247            .          .           	var status uint32 = _Grunnable 
   5248            .          .           	if parked { 
   5249            .          .           		status = _Gwaiting 
   5250            .          .           		newg.waitreason = waitreason 
   5251            .          .           	} 
   5252            .          .           	if pp.goidcache == pp.goidcacheend { 
   5253            .          .           		// Sched.goidgen is the last allocated id, 
   5254            .          .           		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 
   5255            .          .           		// At startup sched.goidgen=0, so main goroutine receives goid=1. 
   5256            .          .           		pp.goidcache = sched.goidgen.Add(_GoidCacheBatch) 
   5257            .          .           		pp.goidcache -= _GoidCacheBatch - 1 
   5258            .          .           		pp.goidcacheend = pp.goidcache + _GoidCacheBatch 
   5259            .          .           	} 
   5260            .          .           	newg.goid = pp.goidcache 
   5261            .      100ms           	casgstatus(newg, _Gdead, status) 
   5262            .          .           	pp.goidcache++ 
   5263            .          .           	newg.trace.reset() 
   5264            .          .           	if trace.ok() { 
   5265            .          .           		trace.GoCreate(newg, newg.startpc, parked) 
   5266            .          .           		traceRelease(trace) 
   5267            .          .           	} 
   5268            .          .            
   5269            .          .           	// Set up race context. 
   5270            .          .           	if raceenabled { 
   5271            .          .           		newg.racectx = racegostart(callerpc) 
   5272            .          .           		newg.raceignore = 0 
   5273            .          .           		if newg.labels != nil { 
   5274            .          .           			// See note in proflabel.go on labelSync's role in synchronizing 
   5275            .          .           			// with the reads in the signal handler. 
   5276            .          .           			racereleasemergeg(newg, unsafe.Pointer(&labelSync)) 
   5277            .          .           		} 
   5278            .          .           	} 
   5279         10ms       10ms           	releasem(mp)                                                       mp.locks--                                                           runtime1.go:637

   5280            .          .            
   5281            .          .           	return newg 
   5282            .          .           } 
   5283            .          .            
   5284            .          .           // saveAncestors copies previous ancestors of the given caller g and 

runtime.gfput

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      140ms (flat, cum)  0.29%
   5315            .          .           	return ancestorsp 
   5316            .          .           } 
   5317            .          .            
   5318            .          .           // Put on gfree list. 
   5319            .          .           // If local list is too long, transfer a batch to the global list. 
   5320         10ms       10ms           func gfput(pp *p, gp *g) { 
   5321         10ms       10ms           	if readgstatus(gp) != _Gdead {                                                       return gp.atomicstatus.Load()                                        proc.go:1205
                                                          return Load(&u.value)                                            types.go:194

   5322            .          .           		throw("gfput: bad status (not Gdead)") 
   5323            .          .           	} 
   5324            .          .            
   5325            .          .           	stksize := gp.stack.hi - gp.stack.lo 
   5326            .          .            
   5327            .          .           	if stksize != uintptr(startingStackSize) { 
   5328            .          .           		// non-standard stack size - free it. 
   5329            .       60ms           		stackfree(gp.stack) 
   5330            .          .           		gp.stack.lo = 0 
   5331            .          .           		gp.stack.hi = 0 
   5332            .          .           		gp.stackguard0 = 0 
   5333            .          .           		if valgrindenabled { 
   5334            .          .           			valgrindDeregisterStack(gp.valgrindStackID) 
   5335            .          .           			gp.valgrindStackID = 0 
   5336            .          .           		} 
   5337            .          .           	} 
   5338            .          .            
   5339         10ms       10ms           	pp.gFree.push(gp) 
   5340            .          .           	if pp.gFree.size >= 64 { 
   5341            .          .           		var ( 
   5342            .          .           			stackQ   gQueue 
   5343            .          .           			noStackQ gQueue 
   5344            .          .           		) 
   5345            .          .           		for pp.gFree.size >= 32 { 
   5346         30ms       30ms           			gp := pp.gFree.pop()                                                                       l.head = gp.schedlink                                proc.go:7420

   5347            .          .           			if gp.stack.lo == 0 { 
   5348            .          .           				noStackQ.push(gp) 
   5349            .          .           			} else { 
   5350         10ms       10ms           				stackQ.push(gp)                                                                               if q.tail == 0 {                             proc.go:7333

   5351            .          .           			} 
   5352            .          .           		} 
   5353            .          .           		lock(&sched.gFree.lock) 
   5354            .          .           		sched.gFree.noStack.pushAll(noStackQ) 
   5355            .          .           		sched.gFree.stack.pushAll(stackQ) 
   5356            .          .           		unlock(&sched.gFree.lock) 
   5357            .          .           	} 
   5358         10ms       10ms           } 
   5359            .          .            
   5360            .          .           // Get from gfree list. 
   5361            .          .           // If local list is empty, grab a batch from global list. 
   5362            .          .           func gfget(pp *p) *g { 
   5363            .          .           retry: 

runtime.gfget

/usr/lib/go/src/runtime/proc.go

  Total:       130ms      260ms (flat, cum)  0.54%
   5376            .          .           			pp.gFree.push(gp) 
   5377            .          .           		} 
   5378            .          .           		unlock(&sched.gFree.lock) 
   5379            .          .           		goto retry 
   5380            .          .           	} 
   5381        100ms      100ms           	gp := pp.gFree.pop()                                                       l.head = gp.schedlink                                                proc.go:7420

   5382            .          .           	if gp == nil { 
   5383            .          .           		return nil 
   5384            .          .           	} 
   5385         10ms       10ms           	if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) { 
   5386            .          .           		// Deallocate old stack. We kept it in gfput because it was the 
   5387            .          .           		// right size when the goroutine was put on the free list, but 
   5388            .          .           		// the right size has changed since then. 
   5389            .          .           		systemstack(func() { 
   5390            .          .           			stackfree(gp.stack) 
   5391            .          .           			gp.stack.lo = 0 
   5392            .          .           			gp.stack.hi = 0 
   5393            .          .           			gp.stackguard0 = 0 
   5394            .          .           			if valgrindenabled { 
   5395            .          .           				valgrindDeregisterStack(gp.valgrindStackID) 
   5396            .          .           				gp.valgrindStackID = 0 
   5397            .          .           			} 
   5398            .          .           		}) 
   5399            .          .           	} 
   5400         10ms       10ms           	if gp.stack.lo == 0 { 
   5401            .          .           		// Stack was deallocated in gfput or just above. Allocate a new one. 
   5402         10ms      140ms           		systemstack(func() { 

runtime.gfget.func2

/usr/lib/go/src/runtime/proc.go

  Total:           0      120ms (flat, cum)  0.25%
   5403            .      120ms           			gp.stack = stackalloc(startingStackSize) 
   5404            .          .           			if valgrindenabled { 
   5405            .          .           				gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi)) 
   5406            .          .           			} 
   5407            .          .           		}) 
   5408            .          .           		gp.stackguard0 = gp.stack.lo + stackGuard 

runtime.acquirep

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      130ms (flat, cum)  0.27%
   6012            .          .           // isn't because it immediately acquires pp. 
   6013            .          .           // 
   6014            .          .           //go:yeswritebarrierrec 
   6015            .          .           func acquirep(pp *p) { 
   6016            .          .           	// Do the part that isn't allowed to have write barriers. 
   6017            .       10ms           	wirep(pp) 
   6018            .          .            
   6019            .          .           	// Have p; write barriers now allowed. 
   6020            .          .            
   6021            .          .           	// Perform deferred mcache flush before this P can allocate 
   6022            .          .           	// from a potentially stale mcache. 
   6023         10ms      100ms           	pp.mcache.prepareForSweep() 
   6024            .          .            
   6025         20ms       20ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188

   6026            .          .           	if trace.ok() { 
   6027            .          .           		trace.ProcStart() 
   6028            .          .           		traceRelease(trace) 
   6029            .          .           	} 
   6030            .          .           } 

runtime.wirep

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   6036            .          .           //go:nowritebarrierrec 
   6037            .          .           //go:nosplit 
   6038            .          .           func wirep(pp *p) { 
   6039            .          .           	gp := getg() 
   6040            .          .            
   6041         10ms       10ms           	if gp.m.p != 0 { 
   6042            .          .           		// Call on the systemstack to avoid a nosplit overflow build failure 
   6043            .          .           		// on some platforms when built with -N -l. See #64113. 
   6044            .          .           		systemstack(func() { 
   6045            .          .           			throw("wirep: already in go") 
   6046            .          .           		}) 
   6047            .          .           	} 
   6048            .          .           	if pp.m != 0 || pp.status != _Pidle { 
   6049            .          .           		// Call on the systemstack to avoid a nosplit overflow build failure 
   6050            .          .           		// on some platforms when built with -N -l. See #64113. 
   6051            .          .           		systemstack(func() { 
   6052            .          .           			id := int64(0) 
   6053            .          .           			if pp.m != 0 { 
   6054            .          .           				id = pp.m.ptr().id 
   6055            .          .           			} 
   6056            .          .           			print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n") 
   6057            .          .           			throw("wirep: invalid p state") 
   6058            .          .           		}) 
   6059            .          .           	} 
   6060         10ms       10ms           	gp.m.p.set(pp) 
   6061            .          .           	pp.m.set(gp.m) 
   6062            .          .           	pp.status = _Prunning 

runtime.releasep

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6064            .          .            
   6065            .          .           // Disassociate p and the current m. 
   6066         10ms       10ms           func releasep() *p { 
   6067            .          .           	trace := traceAcquire() 
   6068            .          .           	if trace.ok() { 
   6069            .          .           		trace.ProcStop(getg().m.p.ptr()) 
   6070            .          .           		traceRelease(trace) 
   6071            .          .           	} 

runtime.checkdead

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   6125            .          .           	// If we are not running under cgo, but we have an extra M then account 
   6126            .          .           	// for it. (It is possible to have an extra M on Windows without cgo to 
   6127            .          .           	// accommodate callbacks created by syscall.NewCallback. See issue #6751 
   6128            .          .           	// for details.) 
   6129            .          .           	var run0 int32 
   6130         10ms       10ms           	if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 { 
   6131            .          .           		run0 = 1 
   6132            .          .           	} 
   6133            .          .            
   6134            .          .           	run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 
   6135            .          .           	if run > run0 { 
   6136         10ms       10ms           		return 
   6137            .          .           	} 
   6138            .          .           	if run < 0 { 
   6139            .          .           		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 
   6140            .          .           		unlock(&sched.lock) 
   6141            .          .           		throw("checkdead: inconsistent counts") 

runtime.mput

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       30ms (flat, cum) 0.063%
   6825            .          .           func mput(mp *m) { 
   6826            .          .           	assertLockHeld(&sched.lock) 
   6827            .          .            
   6828            .          .           	mp.schedlink = sched.midle 
   6829            .          .           	sched.midle.set(mp) 
   6830         10ms       10ms           	sched.nmidle++ 
   6831            .       20ms           	checkdead() 
   6832            .          .           } 
   6833            .          .            
   6834            .          .           // Try to get an m from midle list. 
   6835            .          .           // sched.lock must be held. 
   6836            .          .           // May run during STW, so write barriers are not allowed. 

runtime.mget

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6839            .          .           func mget() *m { 
   6840            .          .           	assertLockHeld(&sched.lock) 
   6841            .          .            
   6842            .          .           	mp := sched.midle.ptr() 
   6843            .          .           	if mp != nil { 
   6844         10ms       10ms           		sched.midle = mp.schedlink 
   6845            .          .           		sched.nmidle-- 
   6846            .          .           	} 
   6847            .          .           	return mp 
   6848            .          .           } 
   6849            .          .            

runtime.pMask.read

/usr/lib/go/src/runtime/proc.go

  Total:       230ms      230ms (flat, cum)  0.48%
   6919            .          .           type pMask []uint32 
   6920            .          .            
   6921            .          .           // read returns true if P id's bit is set. 
   6922            .          .           func (p pMask) read(id uint32) bool { 
   6923            .          .           	word := id / 32 
   6924         20ms       20ms           	mask := uint32(1) << (id % 32) 
   6925        210ms      210ms           	return (atomic.Load(&p[word]) & mask) != 0 
   6926            .          .           } 
   6927            .          .            

runtime.pMask.set

/usr/lib/go/src/runtime/proc.go

  Total:        40ms       40ms (flat, cum) 0.084%
   6929            .          .           func (p pMask) set(id int32) { 
   6930            .          .           	word := id / 32 
   6931         40ms       40ms           	mask := uint32(1) << (id % 32) 
   6932            .          .           	atomic.Or(&p[word], mask) 
   6933            .          .           } 
   6934            .          .            

runtime.pMask.clear

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6935            .          .           // clear clears P id's bit. 
   6936            .          .           func (p pMask) clear(id int32) { 
   6937            .          .           	word := id / 32 
   6938         10ms       10ms           	mask := uint32(1) << (id % 32) 
   6939            .          .           	atomic.And(&p[word], ^mask) 
   6940            .          .           } 
   6941            .          .            
   6942            .          .           // pidleput puts p on the _Pidle list. now must be a relatively recent call 
   6943            .          .           // to nanotime or zero. Returns now or the current time if now was zero. 

runtime.pidleput

/usr/lib/go/src/runtime/proc.go

  Total:       190ms      190ms (flat, cum)   0.4%
   6948            .          .           // sched.lock must be held. 
   6949            .          .           // 
   6950            .          .           // May run during STW, so write barriers are not allowed. 
   6951            .          .           // 
   6952            .          .           //go:nowritebarrierrec 
   6953         10ms       10ms           func pidleput(pp *p, now int64) int64 { 
   6954            .          .           	assertLockHeld(&sched.lock) 
   6955            .          .            
   6956            .          .           	if !runqempty(pp) { 
   6957            .          .           		throw("pidleput: P has non-empty run queue") 
   6958            .          .           	} 
   6959            .          .           	if now == 0 { 
   6960            .          .           		now = nanotime() 
   6961            .          .           	} 
   6962         10ms       10ms           	if pp.timers.len.Load() == 0 {                                                       return Load(&u.value)                                                types.go:194

   6963         10ms       10ms           		timerpMask.clear(pp.id)                                                               mask := uint32(1) << (id % 32)                               proc.go:6938

   6964            .          .           	} 
   6965        100ms      100ms           	idlepMask.set(pp.id) 
   6966         30ms       30ms           	pp.link = sched.pidle 
   6967            .          .           	sched.pidle.set(pp) 
   6968         30ms       30ms           	sched.npidle.Add(1)                                                       return Xaddint32(&i.value, delta)                                    types.go:56

   6969            .          .           	if !pp.limiterEvent.start(limiterEventIdle, now) { 
   6970            .          .           		throw("must be able to track idle limiter event") 
   6971            .          .           	} 
   6972            .          .           	return now 
   6973            .          .           } 

runtime.pidleget

/usr/lib/go/src/runtime/proc.go

  Total:       170ms      230ms (flat, cum)  0.48%
   6984            .          .            
   6985            .          .           	pp := sched.pidle.ptr() 
   6986            .          .           	if pp != nil { 
   6987            .          .           		// Timer may get added at any time now. 
   6988            .          .           		if now == 0 { 
   6989         60ms       60ms           			now = nanotime()                                                                       return nanotime1()                                   time_nofake.go:33

   6990            .          .           		} 
   6991         40ms       40ms           		timerpMask.set(pp.id)                                                               mask := uint32(1) << (id % 32)                               proc.go:6931

   6992         50ms       50ms           		idlepMask.clear(pp.id) 
   6993         10ms       10ms           		sched.pidle = pp.link 
   6994         10ms       10ms           		sched.npidle.Add(-1)                                                               return Xaddint32(&i.value, delta)                            types.go:56

   6995            .       60ms           		pp.limiterEvent.stop(limiterEventIdle, now) 
   6996            .          .           	} 
   6997            .          .           	return pp, now 
   6998            .          .           } 
   6999            .          .            
   7000            .          .           // pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership. 

runtime.pidlegetSpinning

/usr/lib/go/src/runtime/proc.go

  Total:           0      230ms (flat, cum)  0.48%
   7008            .          .           // 
   7009            .          .           //go:nowritebarrierrec 
   7010            .          .           func pidlegetSpinning(now int64) (*p, int64) { 
   7011            .          .           	assertLockHeld(&sched.lock) 
   7012            .          .            
   7013            .      230ms           	pp, now := pidleget(now) 
   7014            .          .           	if pp == nil { 
   7015            .          .           		// See "Delicate dance" comment in findrunnable. We found work 
   7016            .          .           		// that we cannot take, we must synchronize with non-spinning 
   7017            .          .           		// Ms that may be preparing to drop their P. 
   7018            .          .           		sched.needspinning.Store(1) 

runtime.runqempty

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   7028            .          .           	// Defend against a race where 1) pp has G1 in runqnext but runqhead == runqtail, 
   7029            .          .           	// 2) runqput on pp kicks G1 to the runq, 3) runqget on pp empties runqnext. 
   7030            .          .           	// Simply observing that runqhead == runqtail and then observing that runqnext == nil 
   7031            .          .           	// does not mean the queue is empty. 
   7032            .          .           	for { 
   7033         20ms       20ms           		head := atomic.Load(&pp.runqhead) 
   7034            .          .           		tail := atomic.Load(&pp.runqtail) 
   7035            .          .           		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext))) 
   7036         10ms       10ms           		if tail == atomic.Load(&pp.runqtail) { 
   7037            .          .           			return head == tail && runnext == 0 
   7038            .          .           		} 
   7039            .          .           	} 
   7040            .          .           } 
   7041            .          .            

runtime.runqput

/usr/lib/go/src/runtime/proc.go

  Total:        50ms       50ms (flat, cum)   0.1%
   7071            .          .           		next = false 
   7072            .          .           	} 
   7073            .          .            
   7074            .          .           	if next { 
   7075            .          .           	retryNext: 
   7076         10ms       10ms           		oldnext := pp.runnext 
   7077         40ms       40ms           		if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {                                                               return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246

   7078            .          .           			goto retryNext 
   7079            .          .           		} 
   7080            .          .           		if oldnext == 0 { 
   7081            .          .           			return 
   7082            .          .           		} 

runtime.runqget

/usr/lib/go/src/runtime/proc.go

  Total:       100ms      100ms (flat, cum)  0.21%
   7179            .          .           	// If there's a runnext, it's the next G to run. 
   7180            .          .           	next := pp.runnext 
   7181            .          .           	// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P, 
   7182            .          .           	// because other Ps can race to set runnext to 0, but only the current P can set it to non-0. 
   7183            .          .           	// Hence, there's no need to retry this CAS if it fails. 
   7184         10ms       10ms           	if next != 0 && pp.runnext.cas(next, 0) { 
   7185            .          .           		return next.ptr(), true 
   7186            .          .           	} 
   7187            .          .            
   7188            .          .           	for { 
   7189         10ms       10ms           		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers 
   7190         30ms       30ms           		t := pp.runqtail 
   7191            .          .           		if t == h { 
   7192            .          .           			return nil, false 
   7193            .          .           		} 
   7194            .          .           		gp := pp.runq[h%uint32(len(pp.runq))].ptr() 
   7195         50ms       50ms           		if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume 
   7196            .          .           			return gp, false 
   7197            .          .           		} 
   7198            .          .           	} 
   7199            .          .           } 
   7200            .          .            

runtime.runqgrab

/usr/lib/go/src/runtime/proc.go

  Total:       480ms      660ms (flat, cum)  1.38%
   7237            .          .            
   7238            .          .           // Grabs a batch of goroutines from pp's runnable queue into batch. 
   7239            .          .           // Batch is a ring buffer starting at batchHead. 
   7240            .          .           // Returns number of grabbed goroutines. 
   7241            .          .           // Can be executed by any P. 
   7242         20ms       20ms           func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 
   7243            .          .           	for { 
   7244        180ms      180ms           		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers 
   7245         40ms       40ms           		t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer 
   7246         10ms       10ms           		n := t - h 
   7247            .          .           		n = n - n/2 
   7248            .          .           		if n == 0 { 
   7249         20ms       20ms           			if stealRunNextG { 
   7250            .          .           				// Try to steal from pp.runnext. 
   7251         40ms       40ms           				if next := pp.runnext; next != 0 { 
   7252            .          .           					if pp.status == _Prunning { 
   7253            .          .           						// Sleep to ensure that pp isn't about to run the g 
   7254            .          .           						// we are about to steal. 
   7255            .          .           						// The important use case here is when the g running 
   7256            .          .           						// on pp ready()s another g and then almost 
   7257            .          .           						// immediately blocks. Instead of stealing runnext 
   7258            .          .           						// in this window, back off to give pp a chance to 
   7259            .          .           						// schedule runnext. This will avoid thrashing gs 
   7260            .          .           						// between different Ps. 
   7261            .          .           						// A sync chan send/recv takes ~50ns as of time of 
   7262            .          .           						// writing, so 3us gives ~50x overshoot. 
   7263            .          .           						if !osHasLowResTimer { 
   7264            .      180ms           							usleep(3) 
   7265            .          .           						} else { 
   7266            .          .           							// On some platforms system timer granularity is 
   7267            .          .           							// 1-15ms, which is way too much for this 
   7268            .          .           							// optimization. So just yield. 
   7269            .          .           							osyield() 
   7270            .          .           						} 
   7271            .          .           					} 
   7272         30ms       30ms           					if !pp.runnext.cas(next, 0) {                                                                                       return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246
   7273            .          .           						continue 
   7274            .          .           					} 
   7275            .          .           					batch[batchHead%uint32(len(batch))] = next 
   7276            .          .           					return 1 
   7277            .          .           				} 
   7278            .          .           			} 
   7279            .          .           			return 0 
   7280            .          .           		} 
   7281            .          .           		if n > uint32(len(pp.runq)/2) { // read inconsistent h and t 
   7282            .          .           			continue 
   7283            .          .           		} 
   7284         10ms       10ms           		for i := uint32(0); i < n; i++ { 
   7285            .          .           			g := pp.runq[(h+i)%uint32(len(pp.runq))] 
   7286         80ms       80ms           			batch[(batchHead+i)%uint32(len(batch))] = g 
   7287            .          .           		} 
   7288         50ms       50ms           		if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume 
   7289            .          .           			return n 
   7290            .          .           		} 
   7291            .          .           	} 
   7292            .          .           } 

runtime.runqsteal

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      700ms (flat, cum)  1.47%
   7293            .          .            
   7294            .          .           // Steal half of elements from local runnable queue of p2 
   7295            .          .           // and put onto local runnable queue of p. 
   7296            .          .           // Returns one of the stolen elements (or nil if failed). 
   7297         10ms       10ms           func runqsteal(pp, p2 *p, stealRunNextG bool) *g { 
   7298         10ms       10ms           	t := pp.runqtail 
   7299            .      660ms           	n := runqgrab(p2, &pp.runq, t, stealRunNextG) 
   7300            .          .           	if n == 0 { 
   7301         10ms       10ms           		return nil 
   7302            .          .           	} 
   7303            .          .           	n-- 
   7304         10ms       10ms           	gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr() 
   7305            .          .           	if n == 0 { 
   7306            .          .           		return gp 
   7307            .          .           	} 
   7308            .          .           	h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with consumers 
   7309            .          .           	if t-h+n >= uint32(len(pp.runq)) { 

runtime.(*gQueue).push

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7328            .          .            
   7329            .          .           // push adds gp to the head of q. 
   7330            .          .           func (q *gQueue) push(gp *g) { 
   7331            .          .           	gp.schedlink = q.head 
   7332            .          .           	q.head.set(gp) 
   7333         10ms       10ms           	if q.tail == 0 { 
   7334            .          .           		q.tail.set(gp) 
   7335            .          .           	} 
   7336            .          .           	q.size++ 
   7337            .          .           } 
   7338            .          .            

runtime.(*gList).pop

/usr/lib/go/src/runtime/proc.go

  Total:       130ms      130ms (flat, cum)  0.27%
   7415            .          .            
   7416            .          .           // pop removes and returns the head of l. If l is empty, it returns nil. 
   7417            .          .           func (l *gList) pop() *g { 
   7418            .          .           	gp := l.head.ptr() 
   7419            .          .           	if gp != nil { 
   7420        130ms      130ms           		l.head = gp.schedlink 
   7421            .          .           		l.size-- 
   7422            .          .           	} 
   7423            .          .           	return gp 
   7424            .          .           } 
   7425            .          .            

runtime.(*randomEnum).done

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7592            .          .           		inc:   ord.coprimes[i/ord.count%uint32(len(ord.coprimes))], 
   7593            .          .           	} 
   7594            .          .           } 
   7595            .          .            
   7596            .          .           func (enum *randomEnum) done() bool { 
   7597         10ms       10ms           	return enum.i == enum.count 
   7598            .          .           } 
   7599            .          .            

runtime.(*randomEnum).next

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   7600            .          .           func (enum *randomEnum) next() { 
   7601            .          .           	enum.i++ 
   7602         30ms       30ms           	enum.pos = (enum.pos + enum.inc) % enum.count 
   7603            .          .           } 
   7604            .          .            
   7605            .          .           func (enum *randomEnum) position() uint32 { 
   7606            .          .           	return enum.pos 
   7607            .          .           } 

runtime.usleep

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       180ms      180ms (flat, cum)  0.38%
    134            .          .           	// nanosleep(&ts, 0) 
    135            .          .           	ADD	$8, RSP, R0 
    136            .          .           	MOVD	$0, R1 
    137            .          .           	MOVD	$SYS_nanosleep, R8 
    138            .          .           	SVC 
    139        180ms      180ms           	RET 
    140            .          .            
    141            .          .           TEXT runtime·gettid(SB),NOSPLIT,$0-4 
    142            .          .           	MOVD	$SYS_gettid, R8 
    143            .          .           	SVC 
    144            .          .           	MOVW	R0, ret+0(FP) 

runtime.sysMmap

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    578            .          .           	MOVW	fd+24(FP), R4 
    579            .          .           	MOVW	off+28(FP), R5 
    580            .          .            
    581            .          .           	MOVD	$SYS_mmap, R8 
    582            .          .           	SVC 
    583         10ms       10ms           	CMN	$4095, R0 
    584            .          .           	BCC	ok 
    585            .          .           	NEG	R0,R0 
    586            .          .           	MOVD	$0, p+32(FP) 
    587            .          .           	MOVD	R0, err+40(FP) 
    588            .          .           	RET 

runtime.madvise

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:        50ms       50ms (flat, cum)   0.1%
    633            .          .           	MOVD	addr+0(FP), R0 
    634            .          .           	MOVD	n+8(FP), R1 
    635            .          .           	MOVW	flags+16(FP), R2 
    636            .          .           	MOVD	$SYS_madvise, R8 
    637            .          .           	SVC 
    638         50ms       50ms           	MOVW	R0, ret+24(FP) 
    639            .          .           	RET 
    640            .          .            
    641            .          .           // int64 futex(int32 *uaddr, int32 op, int32 val, 

runtime.futex

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       3.77s      3.77s (flat, cum)  7.89%
    642            .          .           //	struct timespec *timeout, int32 *uaddr2, int32 val2); 
    643            .          .           TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 
    644            .          .           	MOVD	addr+0(FP), R0 
    645         10ms       10ms           	MOVW	op+8(FP), R1 
    646            .          .           	MOVW	val+12(FP), R2 
    647            .          .           	MOVD	ts+16(FP), R3 
    648            .          .           	MOVD	addr2+24(FP), R4 
    649            .          .           	MOVW	val3+32(FP), R5 
    650            .          .           	MOVD	$SYS_futex, R8 
    651         10ms       10ms           	SVC 
    652        3.74s      3.74s           	MOVW	R0, ret+40(FP) 
    653         10ms       10ms           	RET 
    654            .          .            
    655            .          .           // int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void)); 
    656            .          .           TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0 
    657            .          .           	MOVW	flags+0(FP), R0 
    658            .          .           	MOVD	stk+8(FP), R1 

runtime.osyield

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       270ms      270ms (flat, cum)  0.57%
    734            .          .           	RET 
    735            .          .            
    736            .          .           TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 
    737            .          .           	MOVD	$SYS_sched_yield, R8 
    738            .          .           	SVC 
    739        270ms      270ms           	RET 
    740            .          .            
    741            .          .           TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 
    742            .          .           	MOVD	pid+0(FP), R0 
    743            .          .           	MOVD	len+8(FP), R1 
    744            .          .           	MOVD	buf+16(FP), R2 

runtime.nextFreeFast

/usr/lib/go/src/runtime/malloc.go

  Total:       970ms      970ms (flat, cum)  2.03%
    927            .          .           var zerobase uintptr 
    928            .          .            
    929            .          .           // nextFreeFast returns the next free object if one is quickly available. 
    930            .          .           // Otherwise it returns 0. 
    931            .          .           func nextFreeFast(s *mspan) gclinkptr { 
    932        610ms      610ms           	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 
    933            .          .           	if theBit < 64 { 
    934         70ms       70ms           		result := s.freeindex + uint16(theBit) 
    935            .          .           		if result < s.nelems { 
    936         50ms       50ms           			freeidx := result + 1 
    937            .          .           			if freeidx%64 == 0 && freeidx != s.nelems { 
    938            .          .           				return 0 
    939            .          .           			} 
    940         40ms       40ms           			s.allocCache >>= uint(theBit + 1) 
    941         30ms       30ms           			s.freeindex = freeidx 
    942         50ms       50ms           			s.allocCount++ 
    943        120ms      120ms           			return gclinkptr(uintptr(result)*s.elemsize + s.base())                                                                       return s.startAddr                                   mheap.go:523
                                     ⋮
                                     ⋮
                                     ⋮
                                                                      return s.startAddr                                   mheap.go:523
    944            .          .           		} 
    945            .          .           	} 
    946            .          .           	return 0 
    947            .          .           } 
    948            .          .            

runtime.(*mcache).nextFree

/usr/lib/go/src/runtime/malloc.go

  Total:        50ms      2.44s (flat, cum)  5.11%
    953            .          .           // determine whether a new GC cycle needs to be started or if the GC is active 
    954            .          .           // whether this goroutine needs to assist the GC. 
    955            .          .           // 
    956            .          .           // Must run in a non-preemptible context since otherwise the owner of 
    957            .          .           // c could change. 
    958         30ms      130ms           func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 
    959         20ms       20ms           	s = c.alloc[spc] 
    960            .          .           	checkGCTrigger = false 
    961            .      220ms           	freeIndex := s.nextFreeIndex() 
    962            .          .           	if freeIndex == s.nelems { 
    963            .          .           		// The span is full. 
    964            .          .           		if s.allocCount != s.nelems { 
    965            .          .           			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 
    966            .          .           			throw("s.allocCount != s.nelems && freeIndex == s.nelems") 
    967            .          .           		} 
    968            .      2.06s           		c.refill(spc) 
    969            .          .           		checkGCTrigger = true 
    970            .          .           		s = c.alloc[spc] 
    971            .          .            
    972            .       10ms           		freeIndex = s.nextFreeIndex() 
    973            .          .           	} 
    974            .          .            
    975            .          .           	if freeIndex >= s.nelems { 
    976            .          .           		throw("freeIndex is not valid") 
    977            .          .           	} 

runtime.mallocgc

/usr/lib/go/src/runtime/malloc.go

  Total:       560ms     11.53s (flat, cum) 24.14%
   1009            .          .           // 
   1010            .          .           // Do not remove or change the type signature. 
   1011            .          .           // See go.dev/issue/67401. 
   1012            .          .           // 
   1013            .          .           //go:linkname mallocgc 
   1014        110ms      2.22s           func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 
   1015            .          .           	if doubleCheckMalloc { 
   1016            .          .           		if gcphase == _GCmarktermination { 
   1017            .          .           			throw("mallocgc called with gcphase == _GCmarktermination") 
   1018            .          .           		} 
   1019            .          .           	} 
   1020            .          .            
   1021            .          .           	// Short-circuit zero-sized allocation requests. 
   1022        100ms      100ms           	if size == 0 { 
   1023            .          .           		return unsafe.Pointer(&zerobase) 
   1024            .          .           	} 
   1025            .          .            
   1026            .          .           	// It's possible for any malloc to trigger sweeping, which may in 
   1027            .          .           	// turn queue finalizers. Record this dynamic lock edge. 
   1028            .          .           	// N.B. Compiled away if lockrank experiment is not enabled. 
   1029            .          .           	lockRankMayQueueFinalizer() 
   1030            .          .            
   1031            .          .           	// Pre-malloc debug hooks. 
   1032        120ms      120ms           	if debug.malloc { 
   1033            .          .           		if x := preMallocgcDebug(size, typ); x != nil { 
   1034            .          .           			return x 
   1035            .          .           		} 
   1036            .          .           	} 
   1037            .          .            
   1038            .          .           	// For ASAN, we allocate extra memory around each allocation called the "redzone." 
   1039            .          .           	// These "redzones" are marked as unaddressable. 
   1040            .          .           	var asanRZ uintptr 
   1041            .          .           	if asanenabled { 
   1042            .          .           		asanRZ = redZoneSize(size) 
   1043            .          .           		size += asanRZ 
   1044            .          .           	} 
   1045            .          .            
   1046            .          .           	// Assist the GC if needed. 
   1047        100ms      100ms           	if gcBlackenEnabled != 0 { 
   1048            .          .           		deductAssistCredit(size) 
   1049            .          .           	} 
   1050            .          .            
   1051            .          .           	// Actually do the allocation. 
   1052            .          .           	var x unsafe.Pointer 
   1053            .          .           	var elemsize uintptr 
   1054         40ms       40ms           	if size <= maxSmallSize-gc.MallocHeaderSize { 
   1055         40ms       40ms           		if typ == nil || !typ.Pointers() {                     func (t *Type) Pointers() bool { return t.PtrBytes != 0 }    type.go:200
   1056            .          .           			if size < maxTinySize { 
   1057         10ms      240ms           				x, elemsize = mallocgcTiny(size, typ) 
   1058            .          .           			} else { 
   1059            .      2.13s           				x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 
   1060            .          .           			} 
   1061            .          .           		} else { 
   1062            .          .           			if !needzero { 
   1063            .          .           				throw("objects with pointers must be zeroed") 
   1064            .          .           			} 
   1065         10ms       10ms           			if heapBitsInSpan(size) {                                                                       return userSize <= gc.MinSizeForMallocHeader         mbitmap.go:79

   1066         20ms      5.95s           				x, elemsize = mallocgcSmallScanNoHeader(size, typ) 
   1067            .          .           			} else { 
   1068         10ms      580ms           				x, elemsize = mallocgcSmallScanHeader(size, typ) 
   1069            .          .           			} 
   1070            .          .           		} 
   1071            .          .           	} else { 
   1072            .          .           		x, elemsize = mallocgcLarge(size, typ, needzero) 
   1073            .          .           	} 

runtime.mallocgc

/usr/lib/go/src/runtime/malloc.go

  Total:       140ms      140ms (flat, cum)  0.29%
   1092            .          .           	if valgrindenabled { 
   1093            .          .           		valgrindMalloc(x, size-asanRZ) 
   1094            .          .           	} 
   1095            .          .            
   1096            .          .           	// Adjust our GC assist debt to account for internal fragmentation. 
   1097         80ms       80ms           	if gcBlackenEnabled != 0 && elemsize != 0 { 
   1098            .          .           		if assistG := getg().m.curg; assistG != nil { 
   1099            .          .           			assistG.gcAssistBytes -= int64(elemsize - size) 
   1100            .          .           		} 
   1101            .          .           	} 
   1102            .          .            
   1103            .          .           	// Post-malloc debug hooks. 
   1104         20ms       20ms           	if debug.malloc { 
   1105            .          .           		postMallocgcDebug(x, elemsize, typ) 
   1106            .          .           	} 
   1107         40ms       40ms           	return x 
   1108            .          .           } 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   1109            .          .            
   1110         20ms       20ms           func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1111            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1112         10ms       10ms           	mp := acquirem()                                                       return gp.m                                                          runtime1.go:631

   1113            .          .           	if doubleCheckMalloc { 
   1114            .          .           		if mp.mallocing != 0 { 
   1115            .          .           			throw("malloc deadlock") 
   1116            .          .           		} 
   1117            .          .           		if mp.gsignal == getg() { 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        90ms      160ms (flat, cum)  0.33%
   1150            .          .           	// 
   1151            .          .           	// The main targets of tiny allocator are small strings and 
   1152            .          .           	// standalone escaping variables. On a json benchmark 
   1153            .          .           	// the allocator reduces number of allocations by ~12% and 
   1154            .          .           	// reduces heap size by ~20%. 
   1155         30ms       30ms           	c := getMCache(mp)                                                       c = pp.mcache                                                        mcache.go:139            if pp == nil {                                                       mcache.go:132

   1156            .          .           	off := c.tinyoffset 
   1157            .          .           	// Align tiny pointer for required (conservative) alignment. 
   1158         10ms       10ms           	if size&7 == 0 { 
   1159            .          .           		off = alignUp(off, 8) 
   1160            .          .           	} else if goarch.PtrSize == 4 && size == 12 { 
   1161            .          .           		// Conservatively align 12-byte objects to 8 bytes on 32-bit 
   1162            .          .           		// systems so that objects whose first field is a 64-bit 
   1163            .          .           		// value is aligned to 8 bytes and does not cause a fault on 
   1164            .          .           		// atomic access. See issue 37262. 
   1165            .          .           		// TODO(mknyszek): Remove this workaround if/when issue 36606 
   1166            .          .           		// is resolved. 
   1167            .          .           		off = alignUp(off, 8) 
   1168            .          .           	} else if size&3 == 0 { 
   1169            .          .           		off = alignUp(off, 4) 
   1170            .          .           	} else if size&1 == 0 { 
   1171            .          .           		off = alignUp(off, 2) 
   1172            .          .           	} 
   1173         10ms       10ms           	if off+size <= maxTinySize && c.tiny != 0 { 
   1174            .          .           		// The object fits into existing tiny block. 
   1175            .          .           		x := unsafe.Pointer(c.tiny + off) 
   1176            .          .           		c.tinyoffset = off + size 
   1177            .          .           		c.tinyAllocs++ 
   1178            .          .           		mp.mallocing = 0 
   1179            .          .           		releasem(mp) 
   1180            .          .           		return x, 0 
   1181            .          .           	} 
   1182            .          .           	// Allocate a new maxTinySize block. 
   1183            .          .           	checkGCTrigger := false 
   1184            .          .           	span := c.alloc[tinySpanClass] 
   1185         40ms       40ms           	v := nextFreeFast(span)                                                       s.allocCache >>= uint(theBit + 1)                                    malloc.go:940            theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932
   1186            .          .           	if v == 0 { 
   1187            .       70ms           		v, span, checkGCTrigger = c.nextFree(tinySpanClass) 
   1188            .          .           	} 
   1189            .          .           	x := unsafe.Pointer(v) 
   1190            .          .           	(*[2]uint64)(x)[0] = 0 // Always zero 
   1191            .          .           	(*[2]uint64)(x)[1] = 0 
   1192            .          .           	// See if we need to replace the existing tiny block with the new one 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms       20ms (flat, cum) 0.042%
   1232            .          .           	// of gc_sys or something. The code below just pretends it is 
   1233            .          .           	// internal fragmentation and matches the GC's accounting by 
   1234            .          .           	// using the whole allocation slot. 
   1235            .          .           	c.nextSample -= int64(span.elemsize) 
   1236            .          .           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1237            .       10ms           		profilealloc(mp, x, span.elemsize) 
   1238            .          .           	} 
   1239            .          .           	mp.mallocing = 0 
   1240            .          .           	releasem(mp) 
   1241            .          .            
   1242         10ms       10ms           	if checkGCTrigger { 
   1243            .          .           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1244            .          .           			gcStart(t) 
   1245            .          .           		} 
   1246            .          .           	} 
   1247            .          .            

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1258            .          .           		// TODO: enable this padding for all allocations, not just 
   1259            .          .           		// tinyalloc ones. It's tricky because of pointer maps. 
   1260            .          .           		// Maybe just all noscan objects? 
   1261            .          .           		x = add(x, span.elemsize-size) 
   1262            .          .           	} 
   1263         20ms       20ms           	return x, span.elemsize 
   1264            .          .           } 

runtime.mallocgcSmallNoscan

/usr/lib/go/src/runtime/malloc.go

  Total:       140ms      2.09s (flat, cum)  4.38%
   1265            .          .            
   1266            .      1.68s           func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 
   1267            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1268            .          .           	mp := acquirem() 
   1269            .          .           	if doubleCheckMalloc { 
   1270            .          .           		if mp.mallocing != 0 { 
   1271            .          .           			throw("malloc deadlock") 
   1272            .          .           		} 
   1273            .          .           		if mp.gsignal == getg() { 
   1274            .          .           			throw("malloc during signal") 
   1275            .          .           		} 
   1276            .          .           		if typ != nil && typ.Pointers() { 
   1277            .          .           			throw("expected noscan type for noscan alloc") 
   1278            .          .           		} 
   1279            .          .           	} 
   1280            .          .           	mp.mallocing = 1 
   1281            .          .            
   1282            .          .           	checkGCTrigger := false 
   1283         20ms       20ms           	c := getMCache(mp)                                                       c = pp.mcache                                                        mcache.go:139
   1284            .          .           	var sizeclass uint8 
   1285            .          .           	if size <= gc.SmallSizeMax-8 { 
   1286            .          .           		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1287            .          .           	} else { 
   1288            .          .           		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 
   1289            .          .           	} 
   1290            .          .           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1291         10ms       10ms           	spc := makeSpanClass(sizeclass, true)                                                       return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))         mheap.go:594

   1292            .          .           	span := c.alloc[spc] 
   1293         90ms       90ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            freeidx := result + 1                                                malloc.go:936            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943
                                                          return s.startAddr                                               mheap.go:523
   1294         10ms       10ms           	if v == 0 { 
   1295            .      190ms           		v, span, checkGCTrigger = c.nextFree(spc) 
   1296            .          .           	} 
   1297            .          .           	x := unsafe.Pointer(v) 
   1298         10ms       10ms           	if needzero && span.needzero != 0 { 
   1299            .       80ms           		memclrNoHeapPointers(x, size) 
   1300            .          .           	} 
   1301            .          .            
   1302            .          .           	// Ensure that the stores above that initialize x to 
   1303            .          .           	// type-safe memory and set the heap bits occur before 
   1304            .          .           	// the caller can make x observable to the garbage 

runtime.mallocgcSmallNoscan

/usr/lib/go/src/runtime/malloc.go

  Total:        40ms       40ms (flat, cum) 0.084%
   1320            .          .           		// that are both free and recently-allocated. It's safe to do that 
   1321            .          .           		// because we allocate-black if the GC is enabled. The conservative 
   1322            .          .           		// scanner produces pointers out of thin air, so without additional 
   1323            .          .           		// synchronization it might otherwise observe a partially-initialized 
   1324            .          .           		// object, which could crash the program. 
   1325         10ms       10ms           		span.freeIndexForScan = span.freeindex 
   1326            .          .           	} 
   1327            .          .            
   1328            .          .           	// Note cache c only valid while m acquired; see #47302 
   1329            .          .           	// 
   1330            .          .           	// N.B. Use the full size because that matches how the GC 
   1331            .          .           	// will update the mem profile on the "free" side. 
   1332            .          .           	// 
   1333            .          .           	// TODO(mknyszek): We should really count the header as part 
   1334            .          .           	// of gc_sys or something. The code below just pretends it is 
   1335            .          .           	// internal fragmentation and matches the GC's accounting by 
   1336            .          .           	// using the whole allocation slot. 
   1337            .          .           	c.nextSample -= int64(size) 
   1338         10ms       10ms           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1339            .          .           		profilealloc(mp, x, size) 
   1340            .          .           	} 
   1341            .          .           	mp.mallocing = 0 
   1342            .          .           	releasem(mp) 
   1343            .          .            
   1344         10ms       10ms           	if checkGCTrigger { 
   1345            .          .           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1346            .          .           			gcStart(t) 
   1347            .          .           		} 
   1348            .          .           	} 
   1349         10ms       10ms           	return x, size 
   1350            .          .           } 

runtime.mallocgcSmallScanNoHeader

/usr/lib/go/src/runtime/malloc.go

  Total:       2.57s      5.93s (flat, cum) 12.41%
   1351            .          .            
   1352         90ms      220ms           func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1353            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1354        130ms      130ms           	mp := acquirem()                                                       gp := getg()                                                         runtime1.go:629            gp.m.locks++                                                         runtime1.go:630            return gp.m                                                          runtime1.go:631
   1355            .          .           	if doubleCheckMalloc { 
   1356            .          .           		if mp.mallocing != 0 { 
   1357            .          .           			throw("malloc deadlock") 
   1358            .          .           		} 
   1359            .          .           		if mp.gsignal == getg() { 
   1360            .          .           			throw("malloc during signal") 
   1361            .          .           		} 
   1362            .          .           		if typ == nil || !typ.Pointers() { 
   1363            .          .           			throw("noscan allocated in scan-only path") 
   1364            .          .           		} 
   1365            .          .           		if !heapBitsInSpan(size) { 
   1366            .          .           			throw("heap bits in not in span for non-header-only path") 
   1367            .          .           		} 
   1368            .          .           	} 
   1369            .          .           	mp.mallocing = 1 
   1370            .          .            
   1371            .          .           	checkGCTrigger := false 
   1372        230ms      230ms           	c := getMCache(mp)                                                       if pp == nil {                                                       mcache.go:132            c = pp.mcache                                                        mcache.go:139            if pp == nil {                                                       mcache.go:132            pp := mp.p.ptr()                                                     mcache.go:130            if pp == nil {                                                       mcache.go:132
                                     ⋮
                                     ⋮
                                                      c = pp.mcache                                                        mcache.go:139

   1373         70ms       70ms           	sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1374        150ms      150ms           	spc := makeSpanClass(sizeclass, false)                                                       return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))         mheap.go:594
                                                          return int(*(*uint8)(unsafe.Pointer(&x)))                        stubs.go:394
   1375        120ms      120ms           	span := c.alloc[spc] 
   1376        810ms      810ms           	v := nextFreeFast(span)                                                       s.allocCache >>= uint(theBit + 1)                                    malloc.go:940            freeidx := result + 1                                                malloc.go:936            theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            result := s.freeindex + uint16(theBit)                               malloc.go:934            theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943            s.allocCount++                                                       malloc.go:942            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943            s.freeindex = freeidx                                                malloc.go:941            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943
                                                          return s.startAddr                                               mheap.go:523
   1377         20ms       20ms           	if v == 0 { 
   1378            .      1.80s           		v, span, checkGCTrigger = c.nextFree(spc) 
   1379            .          .           	} 
   1380            .          .           	x := unsafe.Pointer(v) 
   1381         10ms       10ms           	if span.needzero != 0 { 
   1382         20ms      510ms           		memclrNoHeapPointers(x, size) 
   1383            .          .           	} 
   1384         50ms       50ms           	if goarch.PtrSize == 8 && sizeclass == 1 { 
   1385            .          .           		// initHeapBits already set the pointer bits for the 8-byte sizeclass 
   1386            .          .           		// on 64-bit platforms. 
   1387         20ms       20ms           		c.scanAlloc += 8 
   1388            .          .           	} else { 
   1389        190ms      1.03s           		c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)                                                               scanSize := span.writeHeapBitsSmall(x, dataSize, typ)        mbitmap.go:709
                                     ⋮
                                     ⋮
                                     ⋮
                                                              scanSize := span.writeHeapBitsSmall(x, dataSize, typ)        mbitmap.go:709
                                     ⋮
                                     ⋮

   1390            .          .           	} 
   1391        100ms      100ms           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1392            .          .            
   1393            .          .           	// Ensure that the stores above that initialize x to 
   1394            .          .           	// type-safe memory and set the heap bits occur before 
   1395            .          .           	// the caller can make x observable to the garbage 
   1396            .          .           	// collector. Otherwise, on weakly ordered machines, 
   1397            .          .           	// the garbage collector could follow a pointer to x, 
   1398            .          .           	// but see uninitialized memory or stale heap bits. 
   1399         50ms       50ms           	publicationBarrier() 
   1400            .          .            
   1401         30ms       30ms           	if writeBarrier.enabled { 
   1402            .          .           		// Allocate black during GC. 
   1403            .          .           		// All slots hold nil so no scanning is needed. 
   1404            .          .           		// This may be racing with GC so do it atomically if there can be 
   1405            .          .           		// a race marking the bit. 
   1406            .          .           		gcmarknewobject(span, uintptr(x)) 
   1407            .          .           	} else { 
   1408            .          .           		// Track the last free index before the mark phase. This field 
   1409            .          .           		// is only used by the garbage collector. During the mark phase 
   1410            .          .           		// this is used by the conservative scanner to filter out objects 
   1411            .          .           		// that are both free and recently-allocated. It's safe to do that 
   1412            .          .           		// because we allocate-black if the GC is enabled. The conservative 
   1413            .          .           		// scanner produces pointers out of thin air, so without additional 
   1414            .          .           		// synchronization it might otherwise observe a partially-initialized 
   1415            .          .           		// object, which could crash the program. 
   1416         80ms       80ms           		span.freeIndexForScan = span.freeindex 
   1417            .          .           	} 
   1418            .          .            
   1419            .          .           	// Note cache c only valid while m acquired; see #47302 
   1420            .          .           	// 
   1421            .          .           	// N.B. Use the full size because that matches how the GC 
   1422            .          .           	// will update the mem profile on the "free" side. 
   1423            .          .           	// 
   1424            .          .           	// TODO(mknyszek): We should really count the header as part 
   1425            .          .           	// of gc_sys or something. The code below just pretends it is 
   1426            .          .           	// internal fragmentation and matches the GC's accounting by 
   1427            .          .           	// using the whole allocation slot. 
   1428         20ms       20ms           	c.nextSample -= int64(size) 
   1429         20ms       20ms           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1430        100ms      150ms           		profilealloc(mp, x, size) 
   1431            .          .           	} 
   1432         30ms       30ms           	mp.mallocing = 0 
   1433         70ms       70ms           	releasem(mp)                                                       if mp.locks == 0 && gp.preempt {                                     runtime1.go:638
                                     ⋮
                                     ⋮
                                                      mp.locks--                                                           runtime1.go:637            if mp.locks == 0 && gp.preempt {                                     runtime1.go:638

   1434            .          .            
   1435        120ms      120ms           	if checkGCTrigger { 
   1436            .       50ms           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1437            .          .           			gcStart(t) 
   1438            .          .           		} 
   1439            .          .           	} 
   1440         40ms       40ms           	return x, size 
   1441            .          .           } 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1442            .          .            
   1443         10ms       10ms           func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1444            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1445            .          .           	mp := acquirem() 
   1446            .          .           	if doubleCheckMalloc { 
   1447            .          .           		if mp.mallocing != 0 { 
   1448            .          .           			throw("malloc deadlock") 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        80ms      530ms (flat, cum)  1.11%
   1462            .          .           	checkGCTrigger := false 
   1463            .          .           	c := getMCache(mp) 
   1464            .          .           	size += gc.MallocHeaderSize 
   1465            .          .           	var sizeclass uint8 
   1466            .          .           	if size <= gc.SmallSizeMax-8 { 
   1467         10ms       10ms           		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1468            .          .           	} else { 
   1469            .          .           		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 
   1470            .          .           	} 
   1471            .          .           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1472         10ms       10ms           	spc := makeSpanClass(sizeclass, false) 
   1473            .          .           	span := c.alloc[spc] 
   1474         40ms       40ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932

   1475            .          .           	if v == 0 { 
   1476            .      380ms           		v, span, checkGCTrigger = c.nextFree(spc) 
   1477            .          .           	} 
   1478            .          .           	x := unsafe.Pointer(v) 
   1479            .          .           	if span.needzero != 0 { 
   1480            .       70ms           		memclrNoHeapPointers(x, size) 
   1481            .          .           	} 
   1482            .          .           	header := (**_type)(x) 
   1483         20ms       20ms           	x = add(x, gc.MallocHeaderSize) 
   1484            .          .           	c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span) 
   1485            .          .            
   1486            .          .           	// Ensure that the stores above that initialize x to 
   1487            .          .           	// type-safe memory and set the heap bits occur before 
   1488            .          .           	// the caller can make x observable to the garbage 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        20ms       30ms (flat, cum) 0.063%
   1517            .          .           	// TODO(mknyszek): We should really count the header as part 
   1518            .          .           	// of gc_sys or something. The code below just pretends it is 
   1519            .          .           	// internal fragmentation and matches the GC's accounting by 
   1520            .          .           	// using the whole allocation slot. 
   1521            .          .           	c.nextSample -= int64(size) 
   1522         10ms       10ms           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1523            .       10ms           		profilealloc(mp, x, size) 
   1524            .          .           	} 
   1525            .          .           	mp.mallocing = 0 
   1526         10ms       10ms           	releasem(mp)                                                       if mp.locks == 0 && gp.preempt {                                     runtime1.go:638

   1527            .          .            
   1528            .          .           	if checkGCTrigger { 
   1529            .          .           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1530            .          .           			gcStart(t) 
   1531            .          .           		} 

runtime.newobject

/usr/lib/go/src/runtime/malloc.go

  Total:       330ms      5.37s (flat, cum) 11.24%
   1741            .          .           } 
   1742            .          .            
   1743            .          .           // implementation of new builtin 
   1744            .          .           // compiler (both frontend and SSA backend) knows the signature 
   1745            .          .           // of this function. 
   1746        100ms      110ms           func newobject(typ *_type) unsafe.Pointer { 
   1747        230ms      5.26s           	return mallocgc(typ.Size_, typ, true) 
   1748            .          .           } 
   1749            .          .            
   1750            .          .           //go:linkname maps_newobject internal/runtime/maps.newobject 
   1751            .          .           func maps_newobject(typ *_type) unsafe.Pointer { 
   1752            .          .           	return newobject(typ) 

runtime.newarray

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms      1.13s (flat, cum)  2.37%
   1786            .          .           // See go.dev/issue/67401. 
   1787            .          .           // 
   1788            .          .           //go:linkname newarray 
   1789            .          .           func newarray(typ *_type, n int) unsafe.Pointer { 
   1790            .          .           	if n == 1 { 
   1791         10ms      830ms           		return mallocgc(typ.Size_, typ, true) 
   1792            .          .           	} 
   1793            .          .           	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 
   1794            .          .           	if overflow || mem > maxAlloc || n < 0 { 
   1795            .          .           		panic(plainError("runtime: allocation size out of range")) 
   1796            .          .           	} 
   1797            .      300ms           	return mallocgc(mem, typ, true) 
   1798            .          .           } 
   1799            .          .            
   1800            .          .           // reflect_unsafe_NewArray is meant for package reflect, 
   1801            .          .           // but widely used packages access it using linkname. 
   1802            .          .           // Notable members of the hall of shame include: 

internal/runtime/maps.newarray

/usr/lib/go/src/runtime/malloc.go

  Total:        20ms      1.15s (flat, cum)  2.41%
   1815            .          .           func reflect_unsafe_NewArray(typ *_type, n int) unsafe.Pointer { 
   1816            .          .           	return newarray(typ, n) 
   1817            .          .           } 
   1818            .          .            
   1819            .          .           //go:linkname maps_newarray internal/runtime/maps.newarray 
   1820         20ms       20ms           func maps_newarray(typ *_type, n int) unsafe.Pointer { 
   1821            .      1.13s           	return newarray(typ, n) 
   1822            .          .           } 
   1823            .          .            
   1824            .          .           // profilealloc resets the current mcache's nextSample counter and 
   1825            .          .           // records a memory profile sample. 
   1826            .          .           // 

runtime.profilealloc

/usr/lib/go/src/runtime/malloc.go

  Total:           0       70ms (flat, cum)  0.15%
   1830            .          .           	if c == nil { 
   1831            .          .           		throw("profilealloc called without a P or outside bootstrapping") 
   1832            .          .           	} 
   1833            .          .           	c.memProfRate = MemProfileRate 
   1834            .          .           	c.nextSample = nextSample() 
   1835            .       70ms           	mProf_Malloc(mp, x, size) 
   1836            .          .           } 
   1837            .          .            
   1838            .          .           // nextSample returns the next sampling point for heap profiling. The goal is 
   1839            .          .           // to sample allocations on average every MemProfileRate bytes, but with a 
   1840            .          .           // completely random distribution over the allocation timeline; this 

runtime.(*moduledata).textAddr

/usr/lib/go/src/runtime/symtab.go

  Total:       260ms      260ms (flat, cum)  0.54%
    683            .          .           // relocated baseaddr to compute the function address. 
    684            .          .           // 
    685            .          .           // It is nosplit because it is part of the findfunc implementation. 
    686            .          .           // 
    687            .          .           //go:nosplit 
    688         20ms       20ms           func (md *moduledata) textAddr(off32 uint32) uintptr { 
    689         50ms       50ms           	off := uintptr(off32) 
    690         60ms       60ms           	res := md.text + off 
    691         40ms       40ms           	if len(md.textsectmap) > 1 { 
    692            .          .           		for i, sect := range md.textsectmap { 
    693            .          .           			// For the last section, include the end address (etext), as it is included in the functab. 
    694            .          .           			if off >= sect.vaddr && off < sect.end || (i == len(md.textsectmap)-1 && off == sect.end) { 
    695            .          .           				res = sect.baseaddr + off - sect.vaddr 
    696            .          .           				break 
    697            .          .           			} 
    698            .          .           		} 
    699            .          .           		if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory 
    700            .          .           			println("runtime: textAddr", hex(res), "out of range", hex(md.text), "-", hex(md.etext)) 
    701            .          .           			throw("runtime: text offset out of range") 
    702            .          .           		} 
    703            .          .           	} 
    704            .          .           	if GOARCH == "wasm" { 
    705            .          .           		// On Wasm, a text offset (e.g. in the method table) is function index, whereas 
    706            .          .           		// the "PC" is function index << 16 + block index. 
    707            .          .           		res <<= 16 
    708            .          .           	} 
    709         90ms       90ms           	return res 
    710            .          .           } 
    711            .          .            
    712            .          .           // textOff is the opposite of textAddr. It converts a PC to a (virtual) offset 
    713            .          .           // to md.text, and returns if the PC is in any Go text section. 
    714            .          .           // 

runtime.(*moduledata).textOff

/usr/lib/go/src/runtime/symtab.go

  Total:        10ms       10ms (flat, cum) 0.021%
    721            .          .           		// On Wasm, the func table contains the function index, whereas 
    722            .          .           		// the "PC" is function index << 16 + block index. 
    723            .          .           		off >>= 16 
    724            .          .           	} 
    725            .          .           	res := uint32(off) 
    726         10ms       10ms           	if len(md.textsectmap) > 1 { 
    727            .          .           		if GOARCH == "wasm" { 
    728            .          .           			fatal("unexpected multiple text sections on Wasm") 
    729            .          .           		} 
    730            .          .           		for i, sect := range md.textsectmap { 
    731            .          .           			if sect.baseaddr > pc { 

runtime.(*moduledata).funcName

/usr/lib/go/src/runtime/symtab.go

  Total:           0       80ms (flat, cum)  0.17%
    749            .          .           // funcName returns the string at nameOff in the function name table. 
    750            .          .           func (md *moduledata) funcName(nameOff int32) string { 
    751            .          .           	if nameOff == 0 { 
    752            .          .           		return "" 
    753            .          .           	} 
    754            .       80ms           	return gostringnocopy(&md.funcnametab[nameOff])                                                       ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}     string.go:538

    755            .          .           } 
    756            .          .            
    757            .          .           // Despite being an exported symbol, 
    758            .          .           // FuncForPC is linknamed by widely used packages. 
    759            .          .           // Notable members of the hall of shame include: 

runtime.findmoduledatap

/usr/lib/go/src/runtime/symtab.go

  Total:        40ms       40ms (flat, cum) 0.084%
    854            .          .           // implementation. 
    855            .          .           // 
    856            .          .           //go:nosplit 
    857            .          .           func findmoduledatap(pc uintptr) *moduledata { 
    858            .          .           	for datap := &firstmoduledata; datap != nil; datap = datap.next { 
    859         40ms       40ms           		if datap.minpc <= pc && pc < datap.maxpc { 
    860            .          .           			return datap 
    861            .          .           		} 
    862            .          .           	} 
    863            .          .           	return nil 
    864            .          .           } 

runtime.funcInfo.entry

/usr/lib/go/src/runtime/symtab.go

  Total:       230ms      490ms (flat, cum)  1.03%
    889            .          .           //   - github.com/phuslu/log 
    890            .          .           // 
    891            .          .           // Do not remove or change the type signature. 
    892            .          .           // See go.dev/issue/67401. 
    893            .          .           func (f funcInfo) entry() uintptr { 
    894        230ms      490ms           	return f.datap.textAddr(f.entryOff) 
    895            .          .           } 
    896            .          .            
    897            .          .           //go:linkname badFuncInfoEntry runtime.funcInfo.entry 
    898            .          .           func badFuncInfoEntry(funcInfo) uintptr 
    899            .          .            

runtime.findfunc

/usr/lib/go/src/runtime/symtab.go

  Total:       530ms      530ms (flat, cum)  1.11%
    911            .          .           // See go.dev/issue/67401. 
    912            .          .           // 
    913            .          .           //go:nosplit 
    914            .          .           //go:linkname findfunc 
    915            .          .           func findfunc(pc uintptr) funcInfo { 
    916         40ms       40ms           	datap := findmoduledatap(pc)                                                       if datap.minpc <= pc && pc < datap.maxpc {                           symtab.go:859
                                     ⋮
                                     ⋮

    917            .          .           	if datap == nil { 
    918            .          .           		return funcInfo{} 
    919            .          .           	} 
    920            .          .           	const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 
    921            .          .            
    922         10ms       10ms           	pcOff, ok := datap.textOff(pc)                                                       if len(md.textsectmap) > 1 {                                         symtab.go:726

    923            .          .           	if !ok { 
    924            .          .           		return funcInfo{} 
    925            .          .           	} 
    926            .          .            
    927         40ms       40ms           	x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal? 
    928            .          .           	if GOARCH == "wasm" { 
    929            .          .           		// On Wasm, pcOff is the function index, whereas 
    930            .          .           		// the "PC" is function index << 16 + block index. 
    931            .          .           		x = uintptr(pcOff)<<16 + datap.text - datap.minpc 
    932            .          .           	} 
    933            .          .           	b := x / abi.FuncTabBucketSize 
    934            .          .           	i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub) 
    935            .          .            
    936            .          .           	ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 
    937        190ms      190ms           	idx := ffb.idx + uint32(ffb.subbuckets[i]) 
    938            .          .            
    939            .          .           	// Find the ftab entry. 
    940        200ms      200ms           	for datap.ftab[idx+1].entryoff <= pcOff { 
    941            .          .           		idx++ 
    942            .          .           	} 
    943            .          .            
    944         50ms       50ms           	funcoff := datap.ftab[idx].funcoff 
    945            .          .           	return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[funcoff])), datap} 
    946            .          .           } 
    947            .          .            
    948            .          .           // A srcFunc represents a logical function in the source code. This may 
    949            .          .           // correspond to an actual symbol in the binary text, or it may correspond to a 

runtime.pcvalue

/usr/lib/go/src/runtime/symtab.go

  Total:       1.47s      2.65s (flat, cum)  5.55%
   1000            .          .           func pcvalueCacheKey(targetpc uintptr) uintptr { 
   1001            .          .           	return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries)) 
   1002            .          .           } 
   1003            .          .            
   1004            .          .           // Returns the PCData value, and the PC where this value starts. 
   1005         20ms       20ms           func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr) { 
   1006            .          .           	// If true, when we get a cache hit, still look up the data and make sure it 
   1007            .          .           	// matches the cached contents. 
   1008            .          .           	const debugCheckCache = false 
   1009            .          .            
   1010            .          .           	// If true, skip checking the cache entirely. 
   1011            .          .           	const skipCache = false 
   1012            .          .            
   1013         20ms       20ms           	if off == 0 { 
   1014            .          .           		return -1, 0 
   1015            .          .           	} 
   1016            .          .            
   1017            .          .           	// Check the cache. This speeds up walks of deep stacks, which 
   1018            .          .           	// tend to have the same recursive functions over and over, 
   1019            .          .           	// or repetitive stacks between goroutines. 
   1020            .          .           	var checkVal int32 
   1021            .          .           	var checkPC uintptr 
   1022            .          .           	ck := pcvalueCacheKey(targetpc) 
   1023            .          .           	if !skipCache { 
   1024         50ms       50ms           		mp := acquirem()                                                               gp.m.locks++                                                 runtime1.go:630
   1025         10ms       10ms           		cache := &mp.pcvalueCache 
   1026            .          .           		// The cache can be used by the signal handler on this M. Avoid 
   1027            .          .           		// re-entrant use of the cache. The signal handler can also write inUse, 
   1028            .          .           		// but will always restore its value, so we can use a regular increment 
   1029            .          .           		// even if we get signaled in the middle of it. 
   1030         20ms       20ms           		cache.inUse++ 
   1031            .          .           		if cache.inUse == 1 { 
   1032        130ms      130ms           			for i := range cache.entries[ck] { 
   1033            .          .           				// We check off first because we're more 
   1034            .          .           				// likely to have multiple entries with 
   1035            .          .           				// different offsets for the same targetpc 
   1036            .          .           				// than the other way around, so we'll usually 
   1037            .          .           				// fail in the first clause. 
   1038         40ms       40ms           				ent := &cache.entries[ck][i] 
   1039        240ms      240ms           				if ent.off == off && ent.targetpc == targetpc { 
   1040         10ms       10ms           					val, pc := ent.val, ent.valPC 
   1041            .          .           					if debugCheckCache { 
   1042            .          .           						checkVal, checkPC = ent.val, ent.valPC 
   1043            .          .           						break 
   1044            .          .           					} else { 
   1045            .          .           						cache.inUse-- 
   1046            .          .           						releasem(mp) 
   1047            .          .           						return val, pc 
   1048            .          .           					} 
   1049            .          .           				} 
   1050            .          .           			} 
   1051            .          .           		} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) { 
   1052            .          .           			// Catch accounting errors or deeply reentrant use. In principle 
   1053            .          .           			// "inUse" should never exceed 2. 
   1054            .          .           			throw("cache.inUse out of range") 
   1055            .          .           		} 
   1056            .          .           		cache.inUse-- 
   1057         10ms       10ms           		releasem(mp)                                                               mp.locks--                                                   runtime1.go:637

   1058            .          .           	} 
   1059            .          .            
   1060            .          .           	if !f.valid() { 
   1061            .          .           		if strict && panicking.Load() == 0 { 
   1062            .          .           			println("runtime: no module data for", hex(f.entry())) 
   1063            .          .           			throw("no module data") 
   1064            .          .           		} 
   1065            .          .           		return -1, 0 
   1066            .          .           	} 
   1067            .          .           	datap := f.datap 
   1068         50ms       50ms           	p := datap.pctab[off:] 
   1069         20ms       90ms           	pc := f.entry()                                                       return f.datap.textAddr(f.entryOff)                                  symtab.go:894
                                     ⋮
                                     ⋮
                                                      return f.datap.textAddr(f.entryOff)                                  symtab.go:894

   1070            .          .           	prevpc := pc 
   1071            .          .           	val := int32(-1) 
   1072            .          .           	for { 
   1073            .          .           		var ok bool 
   1074        300ms      1.41s           		p, ok = step(p, &pc, &val, pc == f.entry())                                                               return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮

   1075         20ms       20ms           		if !ok { 
   1076            .          .           			break 
   1077            .          .           		} 
   1078        160ms      160ms           		if targetpc < pc { 
   1079            .          .           			// Replace a random entry in the cache. Random 
   1080            .          .           			// replacement prevents a performance cliff if 
   1081            .          .           			// a recursive stack's cycle is slightly 
   1082            .          .           			// larger than the cache. 
   1083            .          .           			// Put the new element at the beginning, 
   1084            .          .           			// since it is the most likely to be newly used. 
   1085            .          .           			if debugCheckCache && checkPC != 0 { 
   1086            .          .           				if checkVal != val || checkPC != prevpc { 
   1087            .          .           					print("runtime: table value ", val, "@", prevpc, " != cache value ", checkVal, "@", checkPC, " at PC ", targetpc, " off ", off, "\n") 
   1088            .          .           					throw("bad pcvalue cache") 
   1089            .          .           				} 
   1090            .          .           			} else { 
   1091         40ms       40ms           				mp := acquirem()                                                                               gp.m.locks++                                 runtime1.go:630
   1092            .          .           				cache := &mp.pcvalueCache 
   1093         30ms       30ms           				cache.inUse++ 
   1094            .          .           				if cache.inUse == 1 { 
   1095         10ms       10ms           					e := &cache.entries[ck] 
   1096         30ms       30ms           					ci := cheaprandn(uint32(len(cache.entries[ck])))                                                                                       return uint32((uint64(cheaprand()) * uint64(n)) >> 32) rand.go:293
                                                                                          mp.cheaprand += 0xa0761d6478bd642f rand.go:235                                                hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) rand.go:236

   1097        180ms      180ms           					e[ci] = e[0] 
   1098            .          .           					e[0] = pcvalueCacheEnt{ 
   1099            .          .           						targetpc: targetpc, 
   1100            .          .           						off:      off, 
   1101            .          .           						val:      val, 
   1102            .          .           						valPC:    prevpc, 
   1103            .          .           					} 
   1104            .          .           				} 
   1105         10ms       10ms           				cache.inUse-- 
   1106         20ms       20ms           				releasem(mp)                                                                               if mp.locks == 0 && gp.preempt {             runtime1.go:638
   1107            .          .           			} 
   1108            .          .            
   1109         50ms       50ms           			return val, prevpc 
   1110            .          .           		} 
   1111            .          .           		prevpc = pc 
   1112            .          .           	} 
   1113            .          .            
   1114            .          .           	// If there was a table, it should have covered all program counters. 

runtime.funcname

/usr/lib/go/src/runtime/symtab.go

  Total:           0       80ms (flat, cum)  0.17%
   1137            .          .            
   1138            .          .           func funcname(f funcInfo) string { 
   1139            .          .           	if !f.valid() { 
   1140            .          .           		return "" 
   1141            .          .           	} 
   1142            .       80ms           	return f.datap.funcName(f.nameOff) 
   1143            .          .           } 
   1144            .          .            
   1145            .          .           func funcpkgpath(f funcInfo) string { 
   1146            .          .           	name := funcNameForPrint(funcname(f)) 
   1147            .          .           	i := len(name) - 1 

runtime.funcspdelta

/usr/lib/go/src/runtime/symtab.go

  Total:           0      1.49s (flat, cum)  3.12%
   1198            .          .           func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 
   1199            .          .           	return funcline1(f, targetpc, true) 
   1200            .          .           } 
   1201            .          .            
   1202            .          .           func funcspdelta(f funcInfo, targetpc uintptr) int32 { 
   1203            .      1.49s           	x, _ := pcvalue(f, f.pcsp, targetpc, true) 
   1204            .          .           	if debugPcln && x&(goarch.PtrSize-1) != 0 { 
   1205            .          .           		print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 
   1206            .          .           		throw("bad spdelta") 
   1207            .          .           	} 
   1208            .          .           	return x 

runtime.funcMaxSPDelta

/usr/lib/go/src/runtime/symtab.go

  Total:       100ms      250ms (flat, cum)  0.52%
   1209            .          .           } 
   1210            .          .            
   1211            .          .           // funcMaxSPDelta returns the maximum spdelta at any point in f. 
   1212            .          .           func funcMaxSPDelta(f funcInfo) int32 { 
   1213            .          .           	datap := f.datap 
   1214         20ms       20ms           	p := datap.pctab[f.pcsp:] 
   1215            .          .           	pc := f.entry() 
   1216            .          .           	val := int32(-1) 
   1217            .          .           	most := int32(0) 
   1218            .          .           	for { 
   1219            .          .           		var ok bool 
   1220         80ms      230ms           		p, ok = step(p, &pc, &val, pc == f.entry())                                                               return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮

   1221            .          .           		if !ok { 
   1222            .          .           			return most 
   1223            .          .           		} 
   1224            .          .           		most = max(most, val) 

runtime.pcdatastart

/usr/lib/go/src/runtime/symtab.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1225            .          .           	} 
   1226            .          .           } 
   1227            .          .            
   1228            .          .           func pcdatastart(f funcInfo, table uint32) uint32 { 
   1229         20ms       20ms           	return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 
   1230            .          .           } 

runtime.pcdatavalue

/usr/lib/go/src/runtime/symtab.go

  Total:        70ms      1.22s (flat, cum)  2.55%
   1231            .          .            
   1232         10ms       10ms           func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32 { 
   1233            .          .           	if table >= f.npcdata { 
   1234            .          .           		return -1 
   1235            .          .           	} 
   1236         60ms      1.21s           	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, true)             return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) symtab.go:1229
   1237            .          .           	return r 
   1238            .          .           } 
   1239            .          .            

runtime.pcdatavalue1

/usr/lib/go/src/runtime/symtab.go

  Total:           0       10ms (flat, cum) 0.021%
   1241            .          .           	if table >= f.npcdata { 
   1242            .          .           		return -1 
   1243            .          .           	} 
   1244            .       10ms           	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, strict) 
   1245            .          .           	return r 
   1246            .          .           } 
   1247            .          .            
   1248            .          .           // Like pcdatavalue, but also return the start PC of this PCData value. 
   1249            .          .           func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) { 

runtime.funcdata

/usr/lib/go/src/runtime/symtab.go

  Total:       100ms      100ms (flat, cum)  0.21%
   1254            .          .           } 
   1255            .          .            
   1256            .          .           // funcdata returns a pointer to the ith funcdata for f. 
   1257            .          .           // funcdata should be kept in sync with cmd/link:writeFuncs. 
   1258            .          .           func funcdata(f funcInfo, i uint8) unsafe.Pointer { 
   1259         30ms       30ms           	if i < 0 || i >= f.nfuncdata { 
   1260            .          .           		return nil 
   1261            .          .           	} 
   1262         20ms       20ms           	base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses 
   1263            .          .           	p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4 
   1264            .          .           	off := *(*uint32)(unsafe.Pointer(p)) 
   1265            .          .           	// Return off == ^uint32(0) ? 0 : f.datap.gofunc + uintptr(off), but without branches. 
   1266            .          .           	// The compiler calculates mask on most architectures using conditional assignment. 
   1267            .          .           	var mask uintptr 
   1268            .          .           	if off == ^uint32(0) { 
   1269            .          .           		mask = 1 
   1270            .          .           	} 
   1271            .          .           	mask-- 
   1272         50ms       50ms           	raw := base + uintptr(off) 
   1273            .          .           	return unsafe.Pointer(raw & mask) 
   1274            .          .           } 

runtime.step

/usr/lib/go/src/runtime/symtab.go

  Total:       1.09s      1.09s (flat, cum)  2.28%
   1275            .          .            
   1276            .          .           // step advances to the next pc, value pair in the encoded table. 
   1277         70ms       70ms           func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 
   1278            .          .           	// For both uvdelta and pcdelta, the common case (~70%) 
   1279            .          .           	// is that they are a single byte. If so, avoid calling readvarint. 
   1280         10ms       10ms           	uvdelta := uint32(p[0]) 
   1281        560ms      560ms           	if uvdelta == 0 && !first { 
   1282            .          .           		return nil, false 
   1283            .          .           	} 
   1284            .          .           	n := uint32(1) 
   1285         10ms       10ms           	if uvdelta&0x80 != 0 { 
   1286         70ms       70ms           		n, uvdelta = readvarint(p)                                                               shift += 7                                                   symtab.go:1311                    v |= uint32(b&0x7F) << (shift & 31)                          symtab.go:1307                    b := p[n]                                                    symtab.go:1305                    for {                                                        symtab.go:1304

   1287            .          .           	} 
   1288         30ms       30ms           	*val += int32(-(uvdelta & 1) ^ (uvdelta >> 1)) 
   1289         60ms       60ms           	p = p[n:] 
   1290            .          .            
   1291         80ms       80ms           	pcdelta := uint32(p[0]) 
   1292            .          .           	n = 1 
   1293         90ms       90ms           	if pcdelta&0x80 != 0 { 
   1294         10ms       10ms           		n, pcdelta = readvarint(p)                                                               for {                                                        symtab.go:1304

   1295            .          .           	} 
   1296         50ms       50ms           	p = p[n:] 
   1297         40ms       40ms           	*pc += uintptr(pcdelta * sys.PCQuantum) 
   1298         10ms       10ms           	return p, true 
   1299            .          .           } 
   1300            .          .            

runtime.readvarint

/usr/lib/go/src/runtime/symtab.go

  Total:        80ms       80ms (flat, cum)  0.17%
   1302            .          .           func readvarint(p []byte) (read uint32, val uint32) { 
   1303            .          .           	var v, shift, n uint32 
   1304         50ms       50ms           	for { 
   1305         10ms       10ms           		b := p[n] 
   1306            .          .           		n++ 
   1307         10ms       10ms           		v |= uint32(b&0x7F) << (shift & 31) 
   1308            .          .           		if b&0x80 == 0 { 
   1309            .          .           			break 
   1310            .          .           		} 
   1311         10ms       10ms           		shift += 7 
   1312            .          .           	} 
   1313            .          .           	return n, v 
   1314            .          .           } 
   1315            .          .            
   1316            .          .           type stackmap struct { 

runtime.stackmapdata

/usr/lib/go/src/runtime/symtab.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1325            .          .           	// The invariant is already checked by many of stackmapdata's callers, 
   1326            .          .           	// and disabling it by default allows stackmapdata to be inlined. 
   1327            .          .           	if stackDebug > 0 && (n < 0 || n >= stkmap.n) { 
   1328            .          .           		throw("stackmapdata: index out of range") 
   1329            .          .           	} 
   1330         20ms       20ms           	return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} 
   1331            .          .           } 

runtime.lock

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        10ms      1.15s (flat, cum)  2.41%
    147            .          .           func mutexContended(l *mutex) bool { 
    148            .          .           	return atomic.Loaduintptr(&l.key)&^mutexMMask != 0 
    149            .          .           } 
    150            .          .            
    151            .          .           func lock(l *mutex) { 
    152         10ms      1.15s           	lockWithRank(l, getLockRank(l))                                                       lock2(l)                                                             lockrank_off.go:24
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                      lock2(l)                                                             lockrank_off.go:24
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    153            .          .           } 

runtime.lock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:       800ms      1.15s (flat, cum)  2.41%
    154            .          .            
    155         20ms       20ms           func lock2(l *mutex) { 
    156            .          .           	gp := getg() 
    157         10ms       10ms           	if gp.m.locks < 0 { 
    158            .          .           		throw("runtime·lock: lock count") 
    159            .          .           	} 
    160            .          .           	gp.m.locks++ 
    161            .          .            
    162            .          .           	k8 := key8(&l.key) 
    163            .          .            
    164            .          .           	// Speculative grab for lock. 
    165            .          .           	v8 := atomic.Xchg8(k8, mutexLocked) 
    166        590ms      590ms           	if v8&mutexLocked == 0 { 
    167         20ms       20ms           		if v8&mutexSleeping != 0 { 
    168            .          .           			atomic.Or8(k8, mutexSleeping) 
    169            .          .           		} 
    170         10ms       10ms           		return 
    171            .          .           	} 
    172            .          .           	semacreate(gp.m) 
    173            .          .            
    174            .          .           	var startTime int64 
    175            .          .           	// On uniprocessors, no point spinning. 
    176            .          .           	// On multiprocessors, spin for mutexActiveSpinCount attempts. 
    177            .          .           	spin := 0 
    178         10ms       10ms           	if numCPUStartup > 1 { 
    179            .          .           		spin = mutexActiveSpinCount 
    180            .          .           	} 
    181            .          .            
    182            .          .           	var weSpin, atTail, haveTimers bool 
    183            .          .           	v := atomic.Loaduintptr(&l.key) 
    184            .          .           tryAcquire: 
    185            .          .           	for i := 0; ; i++ { 
    186            .          .           		if v&mutexLocked == 0 { 
    187            .          .           			if weSpin { 
    188            .          .           				next := (v &^ mutexSpinning) | mutexSleeping | mutexLocked 
    189            .          .           				if next&^mutexMMask == 0 { 
    190            .          .           					// The fast-path Xchg8 may have cleared mutexSleeping. Fix 
    191            .          .           					// the hint so unlock2 knows when to use its slow path. 
    192            .          .           					next = next &^ mutexSleeping 
    193            .          .           				} 
    194         50ms       50ms           				if atomic.Casuintptr(&l.key, v, next) { 
    195            .          .           					gp.m.mLockProfile.end(startTime) 
    196            .          .           					return 
    197            .          .           				} 
    198            .          .           			} else { 
    199            .          .           				prev8 := atomic.Xchg8(k8, mutexLocked|mutexSleeping) 
    200         10ms       10ms           				if prev8&mutexLocked == 0 { 
    201            .          .           					gp.m.mLockProfile.end(startTime) 
    202            .          .           					return 
    203            .          .           				} 
    204            .          .           			} 
    205            .          .           			v = atomic.Loaduintptr(&l.key) 
    206            .          .           			continue tryAcquire 
    207            .          .           		} 
    208            .          .            
    209         10ms       10ms           		if !weSpin && v&mutexSpinning == 0 && atomic.Casuintptr(&l.key, v, v|mutexSpinning) { 
    210            .          .           			v |= mutexSpinning 
    211            .          .           			weSpin = true 
    212            .          .           		} 
    213            .          .            
    214         10ms       10ms           		if weSpin || atTail || mutexPreferLowLatency(l) { 
    215         10ms       10ms           			if i < spin { 
    216            .       20ms           				procyield(mutexActiveSpinSize) 
    217            .          .           				v = atomic.Loaduintptr(&l.key) 
    218            .          .           				continue tryAcquire 
    219            .          .           			} else if i < spin+mutexPassiveSpinCount { 
    220            .      270ms           				osyield() // TODO: Consider removing this step. See https://go.dev/issue/69268. 
    221            .          .           				v = atomic.Loaduintptr(&l.key) 
    222            .          .           				continue tryAcquire 
    223            .          .           			} 
    224            .          .           		} 
    225            .          .            
    226            .          .           		// Go to sleep 
    227            .          .           		if v&mutexLocked == 0 { 
    228            .          .           			throw("runtime·lock: sleeping while lock is available") 
    229            .          .           		} 
    230            .          .            
    231            .          .           		// Collect times for mutex profile (seen in unlock2 only via mWaitList), 
    232            .          .           		// and for "/sync/mutex/wait/total:seconds" metric (to match). 
    233            .          .           		if !haveTimers { 
    234         10ms       10ms           			gp.m.mWaitList.startTicks = cputicks()                                                                       return nanotime()                                    os_linux_arm64.go:23
                                                                          return nanotime1()                               time_nofake.go:33

    235            .       10ms           			startTime = gp.m.mLockProfile.start() 
    236            .          .           			haveTimers = true 
    237            .          .           		} 
    238            .          .           		// Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field 
    239            .          .           		gp.m.mWaitList.next = mutexWaitListHead(v) 
    240            .          .            
    241            .          .           		// Pack a (partial) pointer to this M with the current lock state bits 
    242            .          .           		next := (uintptr(unsafe.Pointer(gp.m)) &^ mutexMMask) | v&mutexMMask | mutexSleeping 
    243            .          .           		if weSpin { // If we were spinning, prepare to retire 
    244            .          .           			next = next &^ mutexSpinning 
    245            .          .           		} 
    246            .          .            
    247         20ms       20ms           		if atomic.Casuintptr(&l.key, v, next) { 
    248            .          .           			weSpin = false 
    249            .          .           			// We've pushed ourselves onto the stack of waiters. Wait. 
    250            .       50ms           			semasleep(-1) 
    251            .          .           			atTail = gp.m.mWaitList.next == 0 // we were at risk of starving 
    252            .          .           			i = 0 
    253            .          .           		} 
    254            .          .            
    255         10ms       10ms           		gp.m.mWaitList.next = 0 
    256         10ms       10ms           		v = atomic.Loaduintptr(&l.key) 
    257            .          .           	} 
    258            .          .           } 

runtime.unlock

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:           0         1s (flat, cum)  2.09%
    259            .          .            
    260            .          .           func unlock(l *mutex) { 
    261            .         1s           	unlockWithRank(l)                                                       unlock2(l)                                                           lockrank_off.go:35
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    262            .          .           } 
    263            .          .            

runtime.unlock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:       120ms      120ms (flat, cum)  0.25%
    265            .          .           // 
    266            .          .           //go:nowritebarrier 
    267         10ms       10ms           func unlock2(l *mutex) { 
    268            .          .           	gp := getg() 
    269            .          .            
    270            .          .           	var prev8 uint8 
    271            .          .           	var haveStackLock bool 
    272            .          .           	var endTicks int64 
    273        110ms      110ms           	if !mutexSampleContention() {                                                       if rate := int64(atomic.Load64(&mutexprofilerate)); rate <= 0 {      lock_spinbit.go:330
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    274            .          .           		// Not collecting a sample for the contention profile, do the quick release 
    275            .          .           		prev8 = atomic.Xchg8(key8(&l.key), 0) 
    276            .          .           	} else { 
    277            .          .           		// If there's contention, we'll sample it. Don't allow another 
    278            .          .           		// lock2/unlock2 pair to finish before us and take our blame. Prevent 

runtime.unlock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:       650ms      880ms (flat, cum)  1.84%
    309            .          .           	if prev8&mutexLocked == 0 { 
    310            .          .           		throw("unlock of unlocked lock") 
    311            .          .           	} 
    312            .          .            
    313            .          .           	if prev8&mutexSleeping != 0 { 
    314        530ms      760ms           		unlock2Wake(l, haveStackLock, endTicks) 
    315            .          .           	} 
    316            .          .            
    317         40ms       40ms           	gp.m.mLockProfile.store()                                                       if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {    mprof.go:756
    318            .          .           	gp.m.locks-- 
    319         50ms       50ms           	if gp.m.locks < 0 { 
    320            .          .           		throw("runtime·unlock: lock count") 
    321            .          .           	} 
    322         20ms       20ms           	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 
    323            .          .           		gp.stackguard0 = stackPreempt 
    324            .          .           	} 
    325         10ms       10ms           } 
    326            .          .            
    327            .          .           // mutexSampleContention returns whether the current mutex operation should 

runtime.mutexSampleContention

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        90ms       90ms (flat, cum)  0.19%
    328            .          .           // report any contention it discovers. 
    329            .          .           func mutexSampleContention() bool { 
    330         90ms       90ms           	if rate := int64(atomic.Load64(&mutexprofilerate)); rate <= 0 { 
    331            .          .           		return false 
    332            .          .           	} else { 
    333            .          .           		// TODO: have SetMutexProfileFraction do the clamping 
    334            .          .           		rate32 := uint32(rate) 
    335            .          .           		if int64(rate32) != rate { 

runtime.unlock2Wake

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        10ms       10ms (flat, cum) 0.021%
    340            .          .           } 
    341            .          .            
    342            .          .           // unlock2Wake updates the list of Ms waiting on l, waking an M if necessary. 
    343            .          .           // 
    344            .          .           //go:nowritebarrier 
    345         10ms       10ms           func unlock2Wake(l *mutex, haveStackLock bool, endTicks int64) { 
    346            .          .           	v := atomic.Loaduintptr(&l.key) 
    347            .          .            
    348            .          .           	// On occasion, seek out and wake the M at the bottom of the stack so it 
    349            .          .           	// doesn't starve. 
    350            .          .           	antiStarve := cheaprandn(mutexTailWakePeriod) == 0 

runtime.unlock2Wake

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        10ms       10ms (flat, cum) 0.021%
    418            .          .           	for { 
    419            .          .           		headM := v &^ mutexMMask 
    420            .          .           		flags := v & (mutexMMask &^ mutexStackLocked) // preserve low bits, but release stack lock 
    421            .          .            
    422            .          .           		mp := mutexWaitListHead(v).ptr() 
    423         10ms       10ms           		wakem := committed 
    424            .          .           		if committed == nil { 
    425            .          .           			if v&mutexSpinning == 0 || mutexPreferLowLatency(l) { 
    426            .          .           				wakem = mp 
    427            .          .           			} 
    428            .          .           			if antiStarve { 

runtime.unlock2Wake

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        40ms      210ms (flat, cum)  0.44%
    449            .          .           				} 
    450            .          .           			} 
    451            .          .           		} 
    452            .          .            
    453            .          .           		if wakem == mp { 
    454         20ms       20ms           			headM = uintptr(mp.mWaitList.next) &^ mutexMMask 
    455            .          .           		} 
    456            .          .            
    457            .          .           		next := headM | flags 
    458         20ms       20ms           		if atomic.Casuintptr(&l.key, v, next) { 
    459            .          .           			if wakem != nil { 
    460            .          .           				// Claimed an M. Wake it. 
    461            .      170ms           				semawakeup(wakem)                                                                               futexwakeup(&mp.waitsema, 1)                 lock_futex.go:161

    462            .          .           			} 
    463            .          .           			return 
    464            .          .           		} 
    465            .          .            
    466            .          .           		v = atomic.Loaduintptr(&l.key) 

internal/runtime/atomic.(*Int32).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
     16            .          .            
     17            .          .           // Load accesses and returns the value atomically. 
     18            .          .           // 
     19            .          .           //go:nosplit 
     20            .          .           func (i *Int32) Load() int32 { 
     21         10ms       10ms           	return Loadint32(&i.value) 
     22            .          .           } 
     23            .          .            
     24            .          .           // Store updates the value atomically. 
     25            .          .           // 
     26            .          .           //go:nosplit 

internal/runtime/atomic.(*Int32).CompareAndSwap

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
     32            .          .           // and if they're equal, swaps i's value with new. 
     33            .          .           // It reports whether the swap ran. 
     34            .          .           // 
     35            .          .           //go:nosplit 
     36            .          .           func (i *Int32) CompareAndSwap(old, new int32) bool { 
     37         10ms       10ms           	return Casint32(&i.value, old, new) 
     38            .          .           } 
     39            .          .            
     40            .          .           // Swap replaces i's value with new, returning 
     41            .          .           // i's value before the replacement. 
     42            .          .           // 

internal/runtime/atomic.(*Int32).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       180ms      180ms (flat, cum)  0.38%
     51            .          .           // This operation wraps around in the usual 
     52            .          .           // two's-complement way. 
     53            .          .           // 
     54            .          .           //go:nosplit 
     55            .          .           func (i *Int32) Add(delta int32) int32 { 
     56        180ms      180ms           	return Xaddint32(&i.value, delta) 
     57            .          .           } 
     58            .          .            
     59            .          .           // Int64 is an atomically accessed int64 value. 
     60            .          .           // 
     61            .          .           // 8-byte aligned on all platforms, unlike a regular int64. 

internal/runtime/atomic.(*Int64).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        70ms       70ms (flat, cum)  0.15%
     69            .          .            
     70            .          .           // Load accesses and returns the value atomically. 
     71            .          .           // 
     72            .          .           //go:nosplit 
     73            .          .           func (i *Int64) Load() int64 { 
     74         70ms       70ms           	return Loadint64(&i.value) 
     75            .          .           } 
     76            .          .            
     77            .          .           // Store updates the value atomically. 
     78            .          .           // 
     79            .          .           //go:nosplit 

internal/runtime/atomic.(*Uint8).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        60ms       60ms (flat, cum)  0.13%
    119            .          .            
    120            .          .           // Load accesses and returns the value atomically. 
    121            .          .           // 
    122            .          .           //go:nosplit 
    123            .          .           func (u *Uint8) Load() uint8 { 
    124         60ms       60ms           	return Load8(&u.value) 
    125            .          .           } 
    126            .          .            
    127            .          .           // Store updates the value atomically. 
    128            .          .           // 
    129            .          .           //go:nosplit 

internal/runtime/atomic.(*Bool).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       140ms      140ms (flat, cum)  0.29%
    163            .          .            
    164            .          .           // Load accesses and returns the value atomically. 
    165            .          .           // 
    166            .          .           //go:nosplit 
    167            .          .           func (b *Bool) Load() bool { 
    168        140ms      140ms           	return b.u.Load() != 0                                                       return Load8(&u.value)                                               types.go:124
                                     ⋮
                                     ⋮
                                     ⋮
                                                      return Load8(&u.value)                                               types.go:124
                                     ⋮
                                     ⋮

    169            .          .           } 
    170            .          .            
    171            .          .           // Store updates the value atomically. 
    172            .          .           // 
    173            .          .           //go:nosplit 

internal/runtime/atomic.(*Uint32).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        70ms       70ms (flat, cum)  0.15%
    189            .          .            
    190            .          .           // Load accesses and returns the value atomically. 
    191            .          .           // 
    192            .          .           //go:nosplit 
    193            .          .           func (u *Uint32) Load() uint32 { 
    194         70ms       70ms           	return Load(&u.value) 
    195            .          .           } 
    196            .          .            
    197            .          .           // LoadAcquire is a partially unsynchronized version 
    198            .          .           // of Load that relaxes ordering constraints. Other threads 
    199            .          .           // may observe operations that precede this operation to 

internal/runtime/atomic.(*Uint32).CompareAndSwap

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       480ms      480ms (flat, cum)  1.00%
    231            .          .           // and if they're equal, swaps u's value with new. 
    232            .          .           // It reports whether the swap ran. 
    233            .          .           // 
    234            .          .           //go:nosplit 
    235            .          .           func (u *Uint32) CompareAndSwap(old, new uint32) bool { 
    236        480ms      480ms           	return Cas(&u.value, old, new) 
    237            .          .           } 
    238            .          .            
    239            .          .           // CompareAndSwapRelease is a partially unsynchronized version 
    240            .          .           // of Cas that relaxes ordering constraints. Other threads 
    241            .          .           // may observe operations that occur after this operation to 

internal/runtime/atomic.(*Uint32).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       100ms      100ms (flat, cum)  0.21%
    286            .          .           // This operation wraps around in the usual 
    287            .          .           // two's-complement way. 
    288            .          .           // 
    289            .          .           //go:nosplit 
    290            .          .           func (u *Uint32) Add(delta int32) uint32 { 
    291        100ms      100ms           	return Xadd(&u.value, delta) 
    292            .          .           } 
    293            .          .            
    294            .          .           // Uint64 is an atomically accessed uint64 value. 
    295            .          .           // 
    296            .          .           // 8-byte aligned on all platforms, unlike a regular uint64. 

internal/runtime/atomic.(*Uint64).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
    304            .          .            
    305            .          .           // Load accesses and returns the value atomically. 
    306            .          .           // 
    307            .          .           //go:nosplit 
    308            .          .           func (u *Uint64) Load() uint64 { 
    309         10ms       10ms           	return Load64(&u.value) 
    310            .          .           } 
    311            .          .            
    312            .          .           // Store updates the value atomically. 
    313            .          .           // 
    314            .          .           //go:nosplit 

internal/runtime/atomic.(*Uint64).CompareAndSwap

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        60ms       60ms (flat, cum)  0.13%
    320            .          .           // and if they're equal, swaps u's value with new. 
    321            .          .           // It reports whether the swap ran. 
    322            .          .           // 
    323            .          .           //go:nosplit 
    324            .          .           func (u *Uint64) CompareAndSwap(old, new uint64) bool { 
    325         60ms       60ms           	return Cas64(&u.value, old, new) 
    326            .          .           } 
    327            .          .            
    328            .          .           // Swap replaces u's value with new, returning 
    329            .          .           // u's value before the replacement. 
    330            .          .           // 

internal/runtime/atomic.(*Uint64).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       280ms      280ms (flat, cum)  0.59%
    339            .          .           // This operation wraps around in the usual 
    340            .          .           // two's-complement way. 
    341            .          .           // 
    342            .          .           //go:nosplit 
    343            .          .           func (u *Uint64) Add(delta int64) uint64 { 
    344        280ms      280ms           	return Xadd64(&u.value, delta) 
    345            .          .           } 
    346            .          .            
    347            .          .           // Uintptr is an atomically accessed uintptr value. 
    348            .          .           // 
    349            .          .           // A Uintptr must not be copied. 

internal/runtime/atomic.(*Uintptr).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        50ms       50ms (flat, cum)   0.1%
    415            .          .           // This operation wraps around in the usual 
    416            .          .           // two's-complement way. 
    417            .          .           // 
    418            .          .           //go:nosplit 
    419            .          .           func (u *Uintptr) Add(delta uintptr) uintptr { 
    420         50ms       50ms           	return Xadduintptr(&u.value, delta) 
    421            .          .           } 
    422            .          .            
    423            .          .           // Float64 is an atomically accessed float64 value. 
    424            .          .           // 
    425            .          .           // 8-byte aligned on all platforms, unlike a regular float64. 

internal/runtime/atomic.(*UnsafePointer).StoreNoWB

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
    474            .          .           // It is safe to use with values not found in the Go heap. 
    475            .          .           // Prefer Store instead. 
    476            .          .           // 
    477            .          .           //go:nosplit 
    478            .          .           func (u *UnsafePointer) StoreNoWB(value unsafe.Pointer) { 
    479         10ms       10ms           	StorepNoWB(unsafe.Pointer(&u.value), value) 
    480            .          .           } 
    481            .          .            
    482            .          .           // Store updates the value atomically. 
    483            .          .           func (u *UnsafePointer) Store(value unsafe.Pointer) { 
    484            .          .           	storePointer(&u.value, value) 

runtime.stackpoolalloc

/usr/lib/go/src/runtime/stack.go

  Total:        80ms       80ms (flat, cum)  0.17%
    217            .          .           				// stackalloc in order to avoid overlapping allocations and 
    218            .          .           				// uninitialized memory errors in valgrind. 
    219            .          .           				valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr())) 
    220            .          .           			} 
    221            .          .           			x.ptr().next = s.manualFreeList 
    222         10ms       10ms           			s.manualFreeList = x 
    223            .          .           		} 
    224            .          .           		list.insert(s) 
    225            .          .           	} 
    226            .          .           	x := s.manualFreeList 
    227            .          .           	if x.ptr() == nil { 
    228            .          .           		throw("span has no free stacks") 
    229            .          .           	} 
    230         70ms       70ms           	s.manualFreeList = x.ptr().next 
    231            .          .           	s.allocCount++ 
    232            .          .           	if s.manualFreeList.ptr() == nil { 
    233            .          .           		// all stacks in s are allocated. 
    234            .          .           		list.remove(s) 
    235            .          .           	} 

runtime.stackpoolfree

/usr/lib/go/src/runtime/stack.go

  Total:        40ms       40ms (flat, cum) 0.084%
    236            .          .           	return x 
    237            .          .           } 
    238            .          .            
    239            .          .           // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held. 
    240            .          .           func stackpoolfree(x gclinkptr, order uint8) { 
    241         20ms       20ms           	s := spanOfUnchecked(uintptr(x))                                                       return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] mheap.go:737
    242         20ms       20ms           	if s.state.get() != mSpanManual {                                                       return mSpanState(b.s.Load())                                        mheap.go:417
                                                          return Load8(&u.value)                                           types.go:124
    243            .          .           		throw("freeing stack not in a stack span") 
    244            .          .           	} 
    245            .          .           	if s.manualFreeList.ptr() == nil { 
    246            .          .           		// s will now have a free stack 
    247            .          .           		stackpool[order].item.span.insert(s) 

runtime.stackcacherefill

/usr/lib/go/src/runtime/stack.go

  Total:           0       80ms (flat, cum)  0.17%
    285            .          .           	// Grab half of the allowed capacity (to prevent thrashing). 
    286            .          .           	var list gclinkptr 
    287            .          .           	var size uintptr 
    288            .          .           	lock(&stackpool[order].item.mu) 
    289            .          .           	for size < _StackCacheSize/2 { 
    290            .       80ms           		x := stackpoolalloc(order) 
    291            .          .           		x.ptr().next = list 
    292            .          .           		list = x 
    293            .          .           		size += fixedStack << order 
    294            .          .           	} 
    295            .          .           	unlock(&stackpool[order].item.mu) 

runtime.stackcacherelease

/usr/lib/go/src/runtime/stack.go

  Total:        30ms       70ms (flat, cum)  0.15%
    300            .          .           //go:systemstack 
    301            .          .           func stackcacherelease(c *mcache, order uint8) { 
    302            .          .           	if stackDebug >= 1 { 
    303            .          .           		print("stackcacherelease order=", order, "\n") 
    304            .          .           	} 
    305         10ms       10ms           	x := c.stackcache[order].list 
    306            .          .           	size := c.stackcache[order].size 
    307            .          .           	lock(&stackpool[order].item.mu) 
    308            .          .           	for size > _StackCacheSize/2 { 
    309         20ms       20ms           		y := x.ptr().next 
    310            .       40ms           		stackpoolfree(x, order) 
    311            .          .           		x = y 
    312            .          .           		size -= fixedStack << order 
    313            .          .           	} 
    314            .          .           	unlock(&stackpool[order].item.mu) 
    315            .          .           	c.stackcache[order].list = x 

runtime.stackalloc

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
    339            .          .           // 
    340            .          .           // stackalloc must run on the system stack because it uses per-P 
    341            .          .           // resources and must not split the stack. 
    342            .          .           // 
    343            .          .           //go:systemstack 
    344         10ms       10ms           func stackalloc(n uint32) stack { 
    345            .          .           	// Stackalloc must be called on scheduler stack, so that we 
    346            .          .           	// never try to grow the stack during the code that stackalloc runs. 
    347            .          .           	// Doing so would cause a deadlock (issue 1547). 
    348            .          .           	thisg := getg() 
    349            .          .           	if thisg != thisg.m.g0 { 

runtime.stackalloc

/usr/lib/go/src/runtime/stack.go

  Total:        90ms      170ms (flat, cum)  0.36%
    370            .          .           	// a dedicated span. 
    371            .          .           	var v unsafe.Pointer 
    372            .          .           	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize { 
    373            .          .           		order := uint8(0) 
    374            .          .           		n2 := n 
    375         10ms       10ms           		for n2 > fixedStack { 
    376            .          .           			order++ 
    377            .          .           			n2 >>= 1 
    378            .          .           		} 
    379            .          .           		var x gclinkptr 
    380            .          .           		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { 
    381            .          .           			// thisg.m.p == 0 can happen in the guts of exitsyscall 
    382            .          .           			// or procresize. Just get a stack from the global pool. 
    383            .          .           			// Also don't touch stackcache during gc 
    384            .          .           			// as it's flushed concurrently. 
    385            .          .           			lock(&stackpool[order].item.mu) 
    386            .          .           			x = stackpoolalloc(order) 
    387            .          .           			unlock(&stackpool[order].item.mu) 
    388            .          .           		} else { 
    389         10ms       10ms           			c := thisg.m.p.ptr().mcache 
    390         10ms       10ms           			x = c.stackcache[order].list 
    391         10ms       10ms           			if x.ptr() == nil {                                                                       return (*gclink)(unsafe.Pointer(p))                  mcache.go:76

    392            .       80ms           				stackcacherefill(c, order) 
    393            .          .           				x = c.stackcache[order].list 
    394            .          .           			} 
    395         50ms       50ms           			c.stackcache[order].list = x.ptr().next 
    396            .          .           			c.stackcache[order].size -= uintptr(n) 
    397            .          .           		} 
    398            .          .           		if valgrindenabled { 
    399            .          .           			// We're about to allocate the stack region starting at x.ptr(). 
    400            .          .           			// To prevent valgrind from complaining about overlapping allocations, 

runtime.stackalloc

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
    427            .          .           			s.elemsize = uintptr(n) 
    428            .          .           		} 
    429            .          .           		v = unsafe.Pointer(s.base()) 
    430            .          .           	} 
    431            .          .            
    432         10ms       10ms           	if traceAllocFreeEnabled() {                                                       return trace.enabledWithAllocFree                                    traceruntime.go:159

    433            .          .           		trace := traceAcquire() 
    434            .          .           		if trace.ok() { 
    435            .          .           			trace.GoroutineStackAlloc(uintptr(v), uintptr(n)) 
    436            .          .           			traceRelease(trace) 
    437            .          .           		} 

runtime.stackfree

/usr/lib/go/src/runtime/stack.go

  Total:        50ms       50ms (flat, cum)   0.1%
    458            .          .           // 
    459            .          .           // stackfree must run on the system stack because it uses per-P 
    460            .          .           // resources and must not split the stack. 
    461            .          .           // 
    462            .          .           //go:systemstack 
    463         20ms       20ms           func stackfree(stk stack) { 
    464         10ms       10ms           	gp := getg() 
    465         10ms       10ms           	v := unsafe.Pointer(stk.lo) 
    466         10ms       10ms           	n := stk.hi - stk.lo 
    467            .          .           	if n&(n-1) != 0 { 
    468            .          .           		throw("stack not a power of 2") 
    469            .          .           	} 
    470            .          .           	if stk.lo+n < stk.hi { 
    471            .          .           		throw("bad stack size") 

runtime.stackfree

/usr/lib/go/src/runtime/stack.go

  Total:        70ms      140ms (flat, cum)  0.29%
    500            .          .           	} 
    501            .          .           	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize { 
    502            .          .           		order := uint8(0) 
    503            .          .           		n2 := n 
    504            .          .           		for n2 > fixedStack { 
    505         10ms       10ms           			order++ 
    506            .          .           			n2 >>= 1 
    507            .          .           		} 
    508            .          .           		x := gclinkptr(v) 
    509         10ms       10ms           		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { 
    510            .          .           			lock(&stackpool[order].item.mu) 
    511            .          .           			if valgrindenabled { 
    512            .          .           				// x.ptr() is the head of the list of free stacks, and will be used 
    513            .          .           				// when allocating a new stack, so it has to be marked allocated. 
    514            .          .           				valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr())) 
    515            .          .           			} 
    516            .          .           			stackpoolfree(x, order) 
    517            .          .           			unlock(&stackpool[order].item.mu) 
    518            .          .           		} else { 
    519         10ms       10ms           			c := gp.m.p.ptr().mcache 
    520         30ms       30ms           			if c.stackcache[order].size >= _StackCacheSize { 
    521            .       70ms           				stackcacherelease(c, order) 
    522            .          .           			} 
    523            .          .           			if valgrindenabled { 
    524            .          .           				// x.ptr() is the head of the list of free stacks, and will 
    525            .          .           				// be used when allocating a new stack, so it has to be 
    526            .          .           				// marked allocated. 
    527            .          .           				valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr())) 
    528            .          .           			} 
    529         10ms       10ms           			x.ptr().next = c.stackcache[order].list 
    530            .          .           			c.stackcache[order].list = x 
    531            .          .           			c.stackcache[order].size += n 
    532            .          .           		} 
    533            .          .           	} else { 
    534            .          .           		s := spanOfUnchecked(uintptr(v)) 

runtime.adjustpointer

/usr/lib/go/src/runtime/stack.go

  Total:        80ms       80ms (flat, cum)  0.17%
    607            .          .            
    608            .          .           // adjustpointer checks whether *vpp is in the old stack described by adjinfo. 
    609            .          .           // If so, it rewrites *vpp to point into the new stack. 
    610            .          .           func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 
    611            .          .           	pp := (*uintptr)(vpp) 
    612         20ms       20ms           	p := *pp 
    613            .          .           	if stackDebug >= 4 { 
    614            .          .           		print("        ", pp, ":", hex(p), "\n") 
    615            .          .           	} 
    616            .          .           	if valgrindenabled { 
    617            .          .           		// p is a pointer on a stack, it is inherently initialized, as 
    618            .          .           		// everything on the stack is, but valgrind for _some unknown reason_ 
    619            .          .           		// sometimes thinks it's uninitialized, and flags operations on p below 
    620            .          .           		// as uninitialized. We just initialize it if valgrind thinks its 
    621            .          .           		// uninitialized. 
    622            .          .           		// 
    623            .          .           		// See go.dev/issues/73801. 
    624            .          .           		valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p)) 
    625            .          .           	} 
    626         60ms       60ms           	if adjinfo.old.lo <= p && p < adjinfo.old.hi { 
    627            .          .           		*pp = p + adjinfo.delta 
    628            .          .           		if stackDebug >= 3 { 
    629            .          .           			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 
    630            .          .           		} 
    631            .          .           	} 

runtime.adjustpointers

/usr/lib/go/src/runtime/stack.go

  Total:       370ms      370ms (flat, cum)  0.77%
    647            .          .           	return (b >> (i % 8)) & 1 
    648            .          .           } 
    649            .          .            
    650            .          .           // bv describes the memory starting at address scanp. 
    651            .          .           // Adjust any pointers contained therein. 
    652         20ms       20ms           func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 
    653         10ms       10ms           	minp := adjinfo.old.lo 
    654            .          .           	maxp := adjinfo.old.hi 
    655         10ms       10ms           	delta := adjinfo.delta 
    656         20ms       20ms           	num := uintptr(bv.n) 
    657            .          .           	// If this frame might contain channel receive slots, use CAS 
    658            .          .           	// to adjust pointers. If the slot hasn't been received into 
    659            .          .           	// yet, it may contain stack pointers and a concurrent send 
    660            .          .           	// could race with adjusting those pointers. (The sent value 
    661            .          .           	// itself can never contain stack pointers.) 
    662            .          .           	useCAS := uintptr(scanp) < adjinfo.sghi 
    663         20ms       20ms           	for i := uintptr(0); i < num; i += 8 { 
    664            .          .           		if stackDebug >= 4 { 
    665            .          .           			for j := uintptr(0); j < 8; j++ { 
    666            .          .           				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 
    667            .          .           			} 
    668            .          .           		} 
    669         50ms       50ms           		b := *(addb(bv.bytedata, i/8))                                                               return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) mbitmap.go:1012
                                     ⋮
                                     ⋮

    670         70ms       70ms           		for b != 0 { 
    671         30ms       30ms           			j := uintptr(sys.TrailingZeros8(b)) 
    672            .          .           			b &= b - 1 
    673            .          .           			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize)) 
    674            .          .           		retry: 
    675         30ms       30ms           			p := *pp 
    676         60ms       60ms           			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 
    677            .          .           				// Looks like a junk value in a pointer slot. 
    678            .          .           				// Live analysis wrong? 
    679            .          .           				getg().m.traceback = 2 
    680            .          .           				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 
    681            .          .           				throw("invalid pointer found on stack") 
    682            .          .           			} 
    683         30ms       30ms           			if minp <= p && p < maxp { 
    684            .          .           				if stackDebug >= 3 { 
    685            .          .           					print("adjust ptr ", hex(p), " ", funcname(f), "\n") 
    686            .          .           				} 
    687            .          .           				if useCAS { 
    688            .          .           					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 
    689            .          .           					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 
    690            .          .           						goto retry 
    691            .          .           					} 
    692            .          .           				} else { 
    693            .          .           					*pp = p + delta 
    694            .          .           				} 
    695            .          .           			} 
    696            .          .           		} 
    697            .          .           	} 
    698         20ms       20ms           } 
    699            .          .            

runtime.adjustframe

/usr/lib/go/src/runtime/stack.go

  Total:       370ms      2.79s (flat, cum)  5.84%
    700            .          .           // Note: the argument/return area is adjusted by the callee. 
    701         40ms       40ms           func adjustframe(frame *stkframe, adjinfo *adjustinfo) { 
    702         30ms       30ms           	if frame.continpc == 0 { 
    703            .          .           		// Frame is dead. 
    704            .          .           		return 
    705            .          .           	} 
    706            .          .           	f := frame.fn 
    707            .          .           	if stackDebug >= 2 { 
    708            .          .           		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 
    709            .          .           	} 
    710            .          .            
    711            .          .           	// Adjust saved frame pointer if there is one. 
    712         20ms       20ms           	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize { 
    713            .          .           		if stackDebug >= 3 { 
    714            .          .           			print("      saved bp\n") 
    715            .          .           		} 
    716            .          .           		if debugCheckBP { 
    717            .          .           			// Frame pointers should always point to the next higher frame on 
    718            .          .           			// the Go stack (or be nil, for the top frame on the stack). 
    719            .          .           			bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 
    720            .          .           			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 
    721            .          .           				println("runtime: found invalid frame pointer") 
    722            .          .           				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 
    723            .          .           				throw("bad frame pointer") 
    724            .          .           			} 
    725            .          .           		} 
    726            .          .           		// On AMD64, this is the caller's frame pointer saved in the current 
    727            .          .           		// frame. 
    728            .          .           		// On ARM64, this is the frame pointer of the caller's caller saved 
    729            .          .           		// by the caller in its frame (one word below its SP). 
    730         40ms       40ms           		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))                                                               p := *pp                                                     stack.go:612                    if adjinfo.old.lo <= p && p < adjinfo.old.hi {               stack.go:626
    731            .          .           	} 
    732            .          .            
    733         20ms      2.07s           	locals, args, objs := frame.getStackMap(true) 
    734            .          .            
    735            .          .           	// Adjust local variables if stack frame has been allocated. 
    736         10ms       10ms           	if locals.n > 0 { 
    737            .          .           		size := uintptr(locals.n) * goarch.PtrSize 
    738         10ms      150ms           		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 
    739            .          .           	} 
    740            .          .            
    741            .          .           	// Adjust arguments. 
    742            .          .           	if args.n > 0 { 
    743            .          .           		if stackDebug >= 3 { 
    744            .          .           			print("      args\n") 
    745            .          .           		} 
    746         40ms      270ms           		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 
    747            .          .           	} 
    748            .          .            
    749            .          .           	// Adjust pointers in all stack objects (whether they are live or not). 
    750            .          .           	// See comments in mgcmark.go:scanframeworker. 
    751         30ms       30ms           	if frame.varp != 0 { 
    752            .          .           		for i := range objs { 
    753            .          .           			obj := &objs[i] 
    754            .          .           			off := obj.off 
    755            .          .           			base := frame.varp // locals base pointer 
    756         20ms       20ms           			if off >= 0 { 
    757            .          .           				base = frame.argp // arguments and return values base pointer 
    758            .          .           			} 
    759            .          .           			p := base + uintptr(off) 
    760            .          .           			if p < frame.sp { 
    761            .          .           				// Object hasn't been allocated in the frame yet. 
    762            .          .           				// (Happens when the stack bounds check fails and 
    763            .          .           				// we call into morestack.) 
    764            .          .           				continue 
    765            .          .           			} 
    766         40ms       40ms           			ptrBytes, gcData := obj.gcdata()                                                                       if datap.gofunc <= ptr && ptr < datap.end {          stack.go:1344
                                     ⋮
                                     ⋮

    767         10ms       10ms           			for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize { 
    768         20ms       20ms           				if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 { 
    769         30ms       30ms           					adjustpointer(adjinfo, unsafe.Pointer(p+i))                                                                                       if adjinfo.old.lo <= p && p < adjinfo.old.hi { stack.go:626
                                     ⋮
                                     ⋮
                                                                                      if adjinfo.old.lo <= p && p < adjinfo.old.hi { stack.go:626

    770            .          .           				} 
    771            .          .           			} 
    772            .          .           		} 
    773            .          .           	} 
    774         10ms       10ms           } 
    775            .          .            
    776            .          .           func adjustctxt(gp *g, adjinfo *adjustinfo) { 
    777            .          .           	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt)) 
    778            .          .           	if !framepointer_enabled { 
    779            .          .           		return 

runtime.adjustctxt

/usr/lib/go/src/runtime/stack.go

  Total:        20ms       20ms (flat, cum) 0.042%
    785            .          .           			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 
    786            .          .           			throw("bad top frame pointer") 
    787            .          .           		} 
    788            .          .           	} 
    789            .          .           	oldfp := gp.sched.bp 
    790         10ms       10ms           	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))                                                       if adjinfo.old.lo <= p && p < adjinfo.old.hi {                       stack.go:626

    791            .          .           	if GOARCH == "arm64" { 
    792            .          .           		// On ARM64, the frame pointer is saved one word *below* the SP, 
    793            .          .           		// which is not copied or adjusted in any frame. Do it explicitly 
    794            .          .           		// here. 
    795            .          .           		if oldfp == gp.sched.sp-goarch.PtrSize { 
    796            .          .           			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize) 
    797         10ms       10ms           			adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp))                                                                       p := *pp                                             stack.go:612

    798            .          .           		} 
    799            .          .           	} 
    800            .          .           } 
    801            .          .            

runtime.adjustdefers

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
    803            .          .           	// Adjust pointers in the Defer structs. 
    804            .          .           	// We need to do this first because we need to adjust the 
    805            .          .           	// defer.link fields so we always work on the new stack. 
    806            .          .           	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer)) 
    807         10ms       10ms           	for d := gp._defer; d != nil; d = d.link { 
    808            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 
    809            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 
    810            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.link)) 
    811            .          .           	} 
    812            .          .           } 

runtime.adjustsudogs

/usr/lib/go/src/runtime/stack.go

  Total:        30ms       30ms (flat, cum) 0.063%
    818            .          .           } 
    819            .          .            
    820            .          .           func adjustsudogs(gp *g, adjinfo *adjustinfo) { 
    821            .          .           	// the data elements pointed to by a SudoG structure 
    822            .          .           	// might be in the stack. 
    823         30ms       30ms           	for s := gp.waiting; s != nil; s = s.waitlink { 
    824            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&s.elem)) 
    825            .          .           	} 
    826            .          .           } 
    827            .          .            
    828            .          .           func fillstack(stk stack, b byte) { 

runtime.copystack

/usr/lib/go/src/runtime/stack.go

  Total:        70ms      140ms (flat, cum)  0.29%
    894            .          .           	return sgsize 
    895            .          .           } 
    896            .          .            
    897            .          .           // Copies gp's stack to a new stack of a different size. 
    898            .          .           // Caller must have changed gp status to Gcopystack. 
    899         20ms       20ms           func copystack(gp *g, newsize uintptr) { 
    900         10ms       10ms           	if gp.syscallsp != 0 { 
    901            .          .           		throw("stack growth not allowed in system call") 
    902            .          .           	} 
    903            .          .           	old := gp.stack 
    904         10ms       10ms           	if old.lo == 0 { 
    905            .          .           		throw("nil stackbase") 
    906            .          .           	} 
    907            .          .           	used := old.hi - gp.sched.sp 
    908            .          .           	// Add just the difference to gcController.addScannableStack. 
    909            .          .           	// g0 stacks never move, so this will never account for them. 
    910            .          .           	// It's also fine if we have no P, addScannableStack can deal with 
    911            .          .           	// that case. 
    912         20ms       20ms           	gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))                                                       if pp == nil {                                                       mgcpacer.go:918            c.maxStackScan.Add(pp.maxStackScanDelta)                             mgcpacer.go:924
                                                          return Xadd64(&u.value, delta)                                   types.go:344

    913            .          .            
    914            .          .           	// allocate new stack 
    915         10ms       80ms           	new := stackalloc(uint32(newsize)) 
    916            .          .           	if stackPoisonCopy != 0 { 
    917            .          .           		fillstack(new, 0xfd) 
    918            .          .           	} 
    919            .          .           	if stackDebug >= 1 { 
    920            .          .           		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 

runtime.copystack

/usr/lib/go/src/runtime/stack.go

  Total:        60ms      5.56s (flat, cum) 11.64%
    933            .          .           			// parking on a channel, but it is safe to grow since we do that 
    934            .          .           			// ourselves and explicitly don't want to synchronize with channels 
    935            .          .           			// since we could self-deadlock. 
    936            .          .           			throw("racy sudog adjustment due to parking on channel") 
    937            .          .           		} 
    938         30ms       30ms           		adjustsudogs(gp, &adjinfo)                                                               for s := gp.waiting; s != nil; s = s.waitlink {              stack.go:823

    939            .          .           	} else { 
    940            .          .           		// sudogs may be pointing in to the stack and gp has 
    941            .          .           		// released channel locks, so other goroutines could 
    942            .          .           		// be writing to gp's stack. Find the highest such 
    943            .          .           		// pointer so we can handle everything there and below 
    944            .          .           		// carefully. (This shouldn't be far from the bottom 
    945            .          .           		// of the stack, so there's little cost in handling 
    946            .          .           		// everything below it carefully.) 
    947            .          .           		adjinfo.sghi = findsghi(gp, old) 
    948            .          .            
    949            .          .           		// Synchronize with channel ops and copy the part of 
    950            .          .           		// the stack they may interact with. 
    951            .          .           		ncopy -= syncadjustsudogs(gp, used, &adjinfo) 
    952            .          .           	} 
    953            .          .            
    954            .          .           	// Copy the stack (or the rest of it) to the new location 
    955            .      110ms           	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 
    956            .          .            
    957            .          .           	// Adjust remaining structures that have pointers into stacks. 
    958            .          .           	// We have to do most of these before we traceback the new 
    959            .          .           	// stack because gentraceback uses them. 
    960            .       20ms           	adjustctxt(gp, &adjinfo) 
    961            .       10ms           	adjustdefers(gp, &adjinfo) 
    962            .          .           	adjustpanics(gp, &adjinfo) 
    963         10ms       10ms           	if adjinfo.sghi != 0 { 
    964            .          .           		adjinfo.sghi += adjinfo.delta 
    965            .          .           	} 
    966            .          .            
    967            .          .           	// Swap out old stack for new one 
    968         10ms       10ms           	gp.stack = new 
    969            .          .           	gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request 
    970            .          .           	gp.sched.sp = new.hi - used 
    971            .          .           	gp.stktopsp += adjinfo.delta 
    972            .          .            
    973            .          .           	// Adjust pointers in the new stack. 
    974            .       10ms           	var u unwinder 
    975            .      2.43s           	for u.init(gp, 0); u.valid(); u.next() {             u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags)           traceback.go:129

    976         10ms      2.80s           		adjustframe(&u.frame, &adjinfo) 
    977            .          .           	} 
    978            .          .            
    979            .          .           	if valgrindenabled { 
    980            .          .           		if gp.valgrindStackID == 0 { 
    981            .          .           			gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi)) 
    982            .          .           		} else { 
    983            .          .           			valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi)) 
    984            .          .           		} 
    985            .          .           	} 
    986            .          .            
    987            .          .           	// free old stack 
    988            .          .           	if stackPoisonCopy != 0 { 
    989            .          .           		fillstack(old, 0xfc) 
    990            .          .           	} 
    991            .      130ms           	stackfree(old) 
    992            .          .           } 
    993            .          .            
    994            .          .           // round x up to a power of 2. 
    995            .          .           func round2(x int32) int32 { 
    996            .          .           	s := uint(0) 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1010            .          .           // This must be nowritebarrierrec because it can be called as part of 
   1011            .          .           // stack growth from other nowritebarrierrec functions, but the 
   1012            .          .           // compiler doesn't check this. 
   1013            .          .           // 
   1014            .          .           //go:nowritebarrierrec 
   1015         10ms       10ms           func newstack() { 
   1016            .          .           	thisg := getg() 
   1017            .          .           	// TODO: double check all gp. shouldn't be getg(). 
   1018            .          .           	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 
   1019            .          .           		throw("stack growth after fork") 
   1020            .          .           	} 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1055            .          .           	thisg.m.morebuf.g = 0 
   1056            .          .            
   1057            .          .           	// NOTE: stackguard0 may change underfoot, if another thread 
   1058            .          .           	// is about to try to preempt gp. Read it just once and use that same 
   1059            .          .           	// value now and below. 
   1060         10ms       10ms           	stackguard0 := atomic.Loaduintptr(&gp.stackguard0) 
   1061            .          .            
   1062            .          .           	// Be conservative about where we preempt. 
   1063            .          .           	// We are interested in preempting user Go code, not runtime code. 
   1064            .          .           	// If we're holding locks, mallocing, or preemption is disabled, don't 
   1065            .          .           	// preempt. 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        30ms       30ms (flat, cum) 0.063%
   1079            .          .           			gp.stackguard0 = gp.stack.lo + stackGuard 
   1080            .          .           			gogo(&gp.sched) // never return 
   1081            .          .           		} 
   1082            .          .           	} 
   1083            .          .            
   1084         20ms       20ms           	if gp.stack.lo == 0 { 
   1085            .          .           		throw("missing stack in newstack") 
   1086            .          .           	} 
   1087            .          .           	sp := gp.sched.sp 
   1088            .          .           	if goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.I386 || goarch.ArchFamily == goarch.WASM { 
   1089            .          .           		// The call to morestack cost a word. 
   1090            .          .           		sp -= goarch.PtrSize 
   1091            .          .           	} 
   1092            .          .           	if stackDebug >= 1 || sp < gp.stack.lo { 
   1093            .          .           		print("runtime: newstack sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n", 
   1094            .          .           			"\tmorebuf={pc:", hex(morebuf.pc), " sp:", hex(morebuf.sp), " lr:", hex(morebuf.lr), "}\n", 
   1095            .          .           			"\tsched={pc:", hex(gp.sched.pc), " sp:", hex(gp.sched.sp), " lr:", hex(gp.sched.lr), " ctxt:", gp.sched.ctxt, "}\n") 
   1096            .          .           	} 
   1097            .          .           	if sp < gp.stack.lo { 
   1098            .          .           		print("runtime: gp=", gp, ", goid=", gp.goid, ", gp->status=", hex(readgstatus(gp)), "\n ") 
   1099            .          .           		print("runtime: split stack overflow: ", hex(sp), " < ", hex(gp.stack.lo), "\n") 
   1100            .          .           		throw("runtime: split stack overflow") 
   1101            .          .           	} 
   1102            .          .            
   1103         10ms       10ms           	if preempt { 
   1104            .          .           		if gp == thisg.m.g0 { 
   1105            .          .           			throw("runtime: preempt g0") 
   1106            .          .           		} 
   1107            .          .           		if thisg.m.p == 0 && thisg.m.locks == 0 { 
   1108            .          .           			throw("runtime: g is running but p is not") 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        50ms      6.15s (flat, cum) 12.87%
   1131            .          .           	newsize := oldsize * 2 
   1132            .          .            
   1133            .          .           	// Make sure we grow at least as much as needed to fit the new frame. 
   1134            .          .           	// (This is just an optimization - the caller of morestack will 
   1135            .          .           	// recheck the bounds on return.) 
   1136            .       40ms           	if f := findfunc(gp.sched.pc); f.valid() { 
   1137            .      250ms           		max := uintptr(funcMaxSPDelta(f)) 
   1138            .          .           		needed := max + stackGuard 
   1139            .          .           		used := gp.stack.hi - gp.sched.sp 
   1140            .          .           		for newsize-used < needed { 
   1141            .          .           			newsize *= 2 
   1142            .          .           		} 
   1143            .          .           	} 
   1144            .          .            
   1145            .          .           	if stackguard0 == stackForceMove { 
   1146            .          .           		// Forced stack movement used for debugging. 
   1147            .          .           		// Don't double the stack (or we may quickly run out 
   1148            .          .           		// if this is done repeatedly). 
   1149            .          .           		newsize = oldsize 
   1150            .          .           	} 
   1151            .          .            
   1152         30ms       30ms           	if newsize > maxstacksize || newsize > maxstackceiling { 
   1153            .          .           		if maxstacksize < maxstackceiling { 
   1154            .          .           			print("runtime: goroutine stack exceeds ", maxstacksize, "-byte limit\n") 
   1155            .          .           		} else { 
   1156            .          .           			print("runtime: goroutine stack exceeds ", maxstackceiling, "-byte limit\n") 
   1157            .          .           		} 
   1158            .          .           		print("runtime: sp=", hex(sp), " stack=[", hex(gp.stack.lo), ", ", hex(gp.stack.hi), "]\n") 
   1159            .          .           		throw("stack overflow") 
   1160            .          .           	} 
   1161            .          .            
   1162            .          .           	// The goroutine must be executing in order to call newstack, 
   1163            .          .           	// so it must be Grunning (or Gscanrunning). 
   1164         10ms       30ms           	casgstatus(gp, _Grunning, _Gcopystack) 
   1165            .          .            
   1166            .          .           	// The concurrent GC will not scan the stack while we are doing the copy since 
   1167            .          .           	// the gp is in a Gcopystack status. 
   1168            .      5.70s           	copystack(gp, newsize) 
   1169            .          .           	if stackDebug >= 1 { 
   1170            .          .           		print("stack grow done\n") 
   1171            .          .           	} 
   1172         10ms      100ms           	casgstatus(gp, _Gcopystack, _Grunning) 
   1173            .          .           	gogo(&gp.sched) 
   1174            .          .           } 
   1175            .          .            
   1176            .          .           //go:nosplit 
   1177            .          .           func nilfunc() { 

runtime.gostartcallfn

/usr/lib/go/src/runtime/stack.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1185            .          .           	if fv != nil { 
   1186            .          .           		fn = unsafe.Pointer(fv.fn) 
   1187            .          .           	} else { 
   1188            .          .           		fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc)) 
   1189            .          .           	} 
   1190         10ms       10ms           	gostartcall(gobuf, fn, unsafe.Pointer(fv))                                                       if buf.lr != 0 {                                                     sys_arm64.go:12

   1191         10ms       10ms           } 
   1192            .          .            
   1193            .          .           // isShrinkStackSafe returns whether it's safe to attempt to shrink 
   1194            .          .           // gp's stack. Shrinking the stack is only safe when we have precise 
   1195            .          .           // pointer maps for all frames on the stack. The caller must hold the 
   1196            .          .           // _Gscan bit for gp or must be running gp itself. 

runtime.(*stackObjectRecord).gcdata

/usr/lib/go/src/runtime/stack.go

  Total:        40ms       40ms (flat, cum) 0.084%
   1339            .          .           // Note that this bitmask might be larger than internal/abi.MaxPtrmaskBytes. 
   1340            .          .           func (r *stackObjectRecord) gcdata() (uintptr, *byte) { 
   1341            .          .           	ptr := uintptr(unsafe.Pointer(r)) 
   1342            .          .           	var mod *moduledata 
   1343            .          .           	for datap := &firstmoduledata; datap != nil; datap = datap.next { 
   1344         40ms       40ms           		if datap.gofunc <= ptr && ptr < datap.end { 
   1345            .          .           			mod = datap 
   1346            .          .           			break 
   1347            .          .           		} 
   1348            .          .           	} 
   1349            .          .           	// If you get a panic here due to a nil mod, 

runtime.heapBitsInSpan

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
     74            .          .           // 
     75            .          .           //go:nosplit 
     76            .          .           func heapBitsInSpan(userSize uintptr) bool { 
     77            .          .           	// N.B. gc.MinSizeForMallocHeader is an exclusive minimum so that this function is 
     78            .          .           	// invariant under size-class rounding on its input. 
     79         10ms       10ms           	return userSize <= gc.MinSizeForMallocHeader 
     80            .          .           } 
     81            .          .            
     82            .          .           // typePointers is an iterator over the pointers in a heap object. 
     83            .          .           // 
     84            .          .           // Iteration through this type implements the tiling algorithm described at the 

runtime.bulkBarrierPreWrite

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    384            .          .           // 
    385            .          .           // Callers must perform cgo checks if goexperiment.CgoCheck2. 
    386            .          .           // 
    387            .          .           //go:nosplit 
    388            .          .           func bulkBarrierPreWrite(dst, src, size uintptr, typ *abi.Type) { 
    389         10ms       10ms           	if (dst|src|size)&(goarch.PtrSize-1) != 0 { 
    390            .          .           		throw("bulkBarrierPreWrite: unaligned arguments") 
    391            .          .           	} 
    392            .          .           	if !writeBarrier.enabled { 
    393            .          .           		return 
    394            .          .           	} 

runtime.(*mspan).initHeapBits

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms      230ms (flat, cum)  0.48%
    507            .          .           // initHeapBits initializes the heap bitmap for a span. 
    508            .          .           func (s *mspan) initHeapBits() { 
    509            .          .           	if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 { 
    510            .          .           		b := s.heapBits() 
    511            .          .           		for i := range b { 
    512         10ms       10ms           			b[i] = ^uintptr(0) 
    513            .          .           		} 
    514            .          .           	} else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk { 
    515            .          .           		b := s.heapBits() 
    516            .      210ms           		clear(b) 
    517            .          .           	} 
    518            .          .           	if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) { 
    519            .       10ms           		s.initInlineMarkBits()                                                               s.inlineMarkBits().init(s.spanclass, s.needzero != 0)        mgcmark_greenteagc.go:190

    520            .          .           	} 
    521            .          .           } 
    522            .          .            
    523            .          .           // heapBits returns the heap ptr/scalar bits stored at the end of the span for 
    524            .          .           // small object spans and heap arena spans. 

runtime.spanHeapBitsRange

/usr/lib/go/src/runtime/mbitmap.go

  Total:        60ms       60ms (flat, cum)  0.13%
    568            .          .            
    569            .          .           //go:nosplit 
    570            .          .           func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) { 
    571            .          .           	size = spanSize / goarch.PtrSize / 8 
    572            .          .           	base = spanBase + spanSize - size 
    573         20ms       20ms           	if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {                                                       return heapBitsInSpan(size) && size >= 16                            mgcmark_greenteagc.go:259
    574         40ms       40ms           		base -= unsafe.Sizeof(spanInlineMarkBits{}) 
    575            .          .           	} 
    576            .          .           	return 
    577            .          .           } 
    578            .          .            
    579            .          .           // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. 

runtime.(*mspan).writeHeapBitsSmall

/usr/lib/go/src/runtime/mbitmap.go

  Total:       840ms      840ms (flat, cum)  1.76%
    619            .          .           // 
    620            .          .           // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. 
    621            .          .           // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. 
    622            .          .           // 
    623            .          .           //go:nosplit 
    624         10ms       10ms           func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) { 
    625            .          .           	// The objects here are always really small, so a single load is sufficient. 
    626        100ms      100ms           	src0 := readUintptr(getGCMask(typ))                                                       if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {                            type.go:89
    627            .          .            
    628            .          .           	// Create repetitions of the bitmap if we have a small slice backing store. 
    629            .          .           	scanSize = typ.PtrBytes 
    630            .          .           	src := src0 
    631        150ms      150ms           	if typ.Size_ == goarch.PtrSize { 
    632            .          .           		src = (1 << (dataSize / goarch.PtrSize)) - 1 
    633            .          .           	} else { 
    634            .          .           		// N.B. We rely on dataSize being an exact multiple of the type size. 
    635            .          .           		// The alternative is to be defensive and mask out src to the length 
    636            .          .           		// of dataSize. The purpose is to save on one additional masking operation. 
    637            .          .           		if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { 
    638            .          .           			throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") 
    639            .          .           		} 
    640         40ms       40ms           		for i := typ.Size_; i < dataSize; i += typ.Size_ { 
    641         20ms       20ms           			src |= src0 << (i / goarch.PtrSize) 
    642            .          .           			scanSize += typ.Size_ 
    643            .          .           		} 
    644            .          .           		if asanenabled { 
    645            .          .           			// Mask src down to dataSize. dataSize is going to be a strange size because of 
    646            .          .           			// the redzone required for allocations when asan is enabled. 
    647            .          .           			src &= (1 << (dataSize / goarch.PtrSize)) - 1 
    648            .          .           		} 
    649            .          .           	} 
    650            .          .            
    651            .          .           	// Since we're never writing more than one uintptr's worth of bits, we're either going 
    652            .          .           	// to do one or two writes. 
    653        120ms      120ms           	dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)                                                       return s.startAddr                                                   mheap.go:523
                                     ⋮
                                     ⋮
                                                      if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {   mbitmap.go:573
                                                          return heapBitsInSpan(size) && size >= 16                        mgcmark_greenteagc.go:259            return s.startAddr                                                   mheap.go:523            if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) {   mbitmap.go:573            base -= unsafe.Sizeof(spanInlineMarkBits{})                          mbitmap.go:574

    654            .          .           	dst := unsafe.Pointer(dstBase) 
    655         10ms       10ms           	o := (x - span.base()) / goarch.PtrSize 
    656            .          .           	i := o / ptrBits 
    657            .          .           	j := o % ptrBits 
    658         50ms       50ms           	bits := span.elemsize / goarch.PtrSize 
    659         30ms       30ms           	if j+bits > ptrBits { 
    660            .          .           		// Two writes. 
    661            .          .           		bits0 := ptrBits - j 
    662            .          .           		bits1 := bits - bits0 
    663            .          .           		dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) 
    664         30ms       30ms           		dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))                                                               return unsafe.Pointer(uintptr(p) + x)                        stubs.go:25

    665            .          .           		*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) 
    666         30ms       30ms           		*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) 
    667            .          .           	} else { 
    668            .          .           		// One write. 
    669        150ms      150ms           		dst := (*uintptr)(add(dst, i*goarch.PtrSize)) 
    670         10ms       10ms           		*dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j) 
    671            .          .           	} 
    672            .          .            
    673            .          .           	const doubleCheck = false 
    674            .          .           	if doubleCheck { 
    675            .          .           		srcRead := span.heapBitsSmallForAddr(x) 
    676            .          .           		if srcRead != src { 
    677            .          .           			print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n") 
    678            .          .           			print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n") 
    679            .          .           			print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n") 
    680            .          .           			throw("bad pointer bits written for small object") 
    681            .          .           		} 
    682            .          .           	} 
    683         90ms       90ms           	return 
    684            .          .           } 
    685            .          .            
    686            .          .           // heapSetType* functions record that the new allocation [x, x+size) 
    687            .          .           // holds in [x, x+dataSize) one or more values of type typ. 
    688            .          .           // (The number of values is given by dataSize / typ.Size.) 

runtime.heapSetTypeNoHeader

/usr/lib/go/src/runtime/mbitmap.go

  Total:        90ms      930ms (flat, cum)  1.95%
    704            .          .            
    705            .          .           func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr { 
    706            .          .           	if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) { 
    707            .          .           		throw("tried to write heap bits, but no heap bits in span") 
    708            .          .           	} 
    709         90ms      930ms           	scanSize := span.writeHeapBitsSmall(x, dataSize, typ) 
    710            .          .           	if doubleCheckHeapSetType { 
    711            .          .           		doubleCheckHeapType(x, dataSize, typ, nil, span) 
    712            .          .           	} 
    713            .          .           	return scanSize 
    714            .          .           } 

runtime.addb

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1007            .          .           //go:nosplit 
   1008            .          .           func addb(p *byte, n uintptr) *byte { 
   1009            .          .           	// Note: wrote out full expression instead of calling add(p, n) 
   1010            .          .           	// to reduce the number of temporaries generated by the 
   1011            .          .           	// compiler for this trivial expression during inlining. 
   1012         20ms       20ms           	return (*byte)(unsafe.Pointer(uintptr(unsafe.Pointer(p)) + n)) 
   1013            .          .           } 
   1014            .          .            
   1015            .          .           // subtractb returns the byte pointer p-n. 
   1016            .          .           // 
   1017            .          .           //go:nowritebarrier 

runtime.(*mspan).refillAllocCache

/usr/lib/go/src/runtime/mbitmap.go

  Total:       140ms      140ms (flat, cum)  0.29%
   1071            .          .           // refillAllocCache takes 8 bytes s.allocBits starting at whichByte 
   1072            .          .           // and negates them so that ctz (count trailing zeros) instructions 
   1073            .          .           // can be used. It then places these 8 bytes into the cached 64 bit 
   1074            .          .           // s.allocCache. 
   1075            .          .           func (s *mspan) refillAllocCache(whichByte uint16) { 
   1076        120ms      120ms           	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))             return addb(&b.x, n)                                                 mheap.go:2870

   1077            .          .           	aCache := uint64(0) 
   1078            .          .           	aCache |= uint64(bytes[0]) 
   1079            .          .           	aCache |= uint64(bytes[1]) << (1 * 8) 
   1080            .          .           	aCache |= uint64(bytes[2]) << (2 * 8) 
   1081            .          .           	aCache |= uint64(bytes[3]) << (3 * 8) 
   1082            .          .           	aCache |= uint64(bytes[4]) << (4 * 8) 
   1083            .          .           	aCache |= uint64(bytes[5]) << (5 * 8) 
   1084            .          .           	aCache |= uint64(bytes[6]) << (6 * 8) 
   1085            .          .           	aCache |= uint64(bytes[7]) << (7 * 8) 
   1086         20ms       20ms           	s.allocCache = ^aCache 
   1087            .          .           } 
   1088            .          .            
   1089            .          .           // nextFreeIndex returns the index of the next free object in s at 

runtime.(*mspan).nextFreeIndex

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms      160ms (flat, cum)  0.33%
   1090            .          .           // or after s.freeindex. 
   1091            .          .           // There are hardware instructions that can be used to make this 
   1092            .          .           // faster if profiling warrants it. 
   1093            .      140ms           func (s *mspan) nextFreeIndex() uint16 { 
   1094            .          .           	sfreeindex := s.freeindex 
   1095            .          .           	snelems := s.nelems 
   1096         10ms       10ms           	if sfreeindex == snelems { 
   1097         10ms       10ms           		return sfreeindex 
   1098            .          .           	} 
   1099            .          .           	if sfreeindex > snelems { 
   1100            .          .           		throw("s.freeindex > s.nelems") 
   1101            .          .           	} 
   1102            .          .            

runtime.(*mspan).nextFreeIndex

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       70ms (flat, cum)  0.15%
   1122            .          .           	if result >= snelems { 
   1123            .          .           		s.freeindex = snelems 
   1124            .          .           		return snelems 
   1125            .          .           	} 
   1126            .          .            
   1127         10ms       10ms           	s.allocCache >>= uint(bitIndex + 1) 
   1128            .          .           	sfreeindex = result + 1 
   1129            .          .            
   1130            .          .           	if sfreeindex%64 == 0 && sfreeindex != snelems { 
   1131            .          .           		// We just incremented s.freeindex so it isn't 0. 
   1132            .          .           		// As each 1 in s.allocCache was encountered and used for allocation 
   1133            .          .           		// it was shifted away. At this point s.allocCache contains all 0s. 
   1134            .          .           		// Refill s.allocCache so that it corresponds 
   1135            .          .           		// to the bits at s.allocBits starting at s.freeindex. 
   1136            .          .           		whichByte := sfreeindex / 8 
   1137            .       60ms           		s.refillAllocCache(whichByte) 
   1138            .          .           	} 
   1139            .          .           	s.freeindex = sfreeindex 
   1140            .          .           	return result 
   1141            .          .           } 
   1142            .          .            

runtime.findObject

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1331            .          .           	} 
   1332            .          .           	// If p is a bad pointer, it may not be in s's bounds. 
   1333            .          .           	// 
   1334            .          .           	// Check s.state to synchronize with span initialization 
   1335            .          .           	// before checking other fields. See also spanOfHeap. 
   1336         20ms       20ms           	if state := s.state.get(); state != mSpanInUse || p < s.base() || p >= s.limit { 
   1337            .          .           		// Pointers into stacks are also ok, the runtime manages these explicitly. 
   1338            .          .           		if state == mSpanManual { 
   1339            .          .           			return 
   1340            .          .           		} 
   1341            .          .           		// The following ensures that we are rigorous about what data 

runtime.typeBitsBulkBarrier

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1424            .          .           	if typ.Size_ != size { 
   1425            .          .           		println("runtime: typeBitsBulkBarrier with type ", toRType(typ).string(), " of size ", typ.Size_, " but memory size", size) 
   1426            .          .           		throw("runtime: invalid typeBitsBulkBarrier") 
   1427            .          .           	} 
   1428            .          .           	if !writeBarrier.enabled { 
   1429         20ms       20ms           		return 
   1430            .          .           	} 
   1431            .          .           	ptrmask := getGCMask(typ) 
   1432            .          .           	buf := &getg().m.p.ptr().wbBuf 
   1433            .          .           	var bits uint32 
   1434            .          .           	for i := uintptr(0); i < typ.PtrBytes; i += goarch.PtrSize { 

runtime.(*mSpanStateBox).get

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
    412            .          .           // It is nosplit because it's called indirectly by typedmemclr, 
    413            .          .           // which must not be preempted. 
    414            .          .            
    415            .          .           //go:nosplit 
    416            .          .           func (b *mSpanStateBox) get() mSpanState { 
    417         20ms       20ms           	return mSpanState(b.s.Load())                                                       return Load8(&u.value)                                               types.go:124
    418            .          .           } 
    419            .          .            
    420            .          .           type mspan struct { 
    421            .          .           	_    sys.NotInHeap 
    422            .          .           	next *mspan     // next span in list, or nil if none 

runtime.(*mspan).base

/usr/lib/go/src/runtime/mheap.go

  Total:        80ms       80ms (flat, cum)  0.17%
    518            .          .           	userArenaChunkFree    addrRange     // interval for managing chunk allocation 
    519            .          .           	largeType             *_type        // malloc header for large objects. 
    520            .          .           } 
    521            .          .            
    522            .          .           func (s *mspan) base() uintptr { 
    523         80ms       80ms           	return s.startAddr 
    524            .          .           } 
    525            .          .            
    526            .          .           func (s *mspan) layout() (size, n, total uintptr) { 
    527            .          .           	total = s.npages << gc.PageShift 
    528            .          .           	size = s.elemsize 

runtime.makeSpanClass

/usr/lib/go/src/runtime/mheap.go

  Total:       160ms      160ms (flat, cum)  0.33%
    589            .          .           	numSpanClasses = gc.NumSizeClasses << 1 
    590            .          .           	tinySpanClass  = spanClass(tinySizeClass<<1 | 1) 
    591            .          .           ) 
    592            .          .            
    593            .          .           func makeSpanClass(sizeclass uint8, noscan bool) spanClass { 
    594        160ms      160ms           	return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))                                                       return int(*(*uint8)(unsafe.Pointer(&x)))                            stubs.go:394
                                     ⋮
                                     ⋮

    595            .          .           } 
    596            .          .            
    597            .          .           //go:nosplit 
    598            .          .           func (sc spanClass) sizeclass() int8 { 
    599            .          .           	return int8(sc >> 1) 

runtime.spanOf

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
    717            .          .           	} 
    718            .          .           	l2 := mheap_.arenas[ri.l1()] 
    719            .          .           	if arenaL1Bits != 0 && l2 == nil { // Should never happen if there's no L1. 
    720            .          .           		return nil 
    721            .          .           	} 
    722         10ms       10ms           	ha := l2[ri.l2()] 
    723            .          .           	if ha == nil { 
    724            .          .           		return nil 
    725            .          .           	} 
    726         10ms       10ms           	return ha.spans[(p/pageSize)%pagesPerArena] 
    727            .          .           } 
    728            .          .            
    729            .          .           // spanOfUnchecked is equivalent to spanOf, but the caller must ensure 
    730            .          .           // that p points into an allocated heap arena. 
    731            .          .           // 

runtime.spanOfUnchecked

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
    732            .          .           // Must be nosplit because it has callers that are nosplit. 
    733            .          .           // 
    734            .          .           //go:nosplit 
    735            .          .           func spanOfUnchecked(p uintptr) *mspan { 
    736            .          .           	ai := arenaIndex(p) 
    737         20ms       20ms           	return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] 
    738            .          .           } 
    739            .          .            
    740            .          .           // spanOfHeap is like spanOf, but returns nil if p does not point to a 
    741            .          .           // heap object. 

runtime.spanOfHeap

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    743            .          .           // Must be nosplit because it has callers that are nosplit. 
    744            .          .           // 
    745            .          .           //go:nosplit 
    746            .          .           func spanOfHeap(p uintptr) *mspan { 
    747         10ms       10ms           	s := spanOf(p)                                                       return ha.spans[(p/pageSize)%pagesPerArena]                          mheap.go:726

    748            .          .           	// s is nil if it's never been allocated. Otherwise, we check 
    749            .          .           	// its state first because we don't trust this pointer, so we 
    750            .          .           	// have to synchronize with span initialization. Then, it's 
    751            .          .           	// still possible we picked up a stale span pointer, so we 
    752            .          .           	// have to check the span's bounds. 

runtime.pageIndexOf

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    758            .          .            
    759            .          .           // pageIndexOf returns the arena, page index, and page mask for pointer p. 
    760            .          .           // The caller must ensure p is in the heap. 
    761            .          .           func pageIndexOf(p uintptr) (arena *heapArena, pageIdx uintptr, pageMask uint8) { 
    762            .          .           	ai := arenaIndex(p) 
    763         10ms       10ms           	arena = mheap_.arenas[ai.l1()][ai.l2()] 
    764            .          .           	pageIdx = ((p / pageSize) / 8) % uintptr(len(arena.pageInUse)) 
    765            .          .           	pageMask = byte(1 << ((p / pageSize) % 8)) 
    766            .          .           	return 
    767            .          .           } 
    768            .          .            

runtime.(*mheap).alloc

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1001            .          .           // 
   1002            .          .           // spanclass indicates the span's size class and scannability. 
   1003            .          .           // 
   1004            .          .           // Returns a span that has been fully initialized. span.needzero indicates 
   1005            .          .           // whether the span has been zeroed. Note that it may not be. 
   1006         10ms       10ms           func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan { 
   1007            .          .           	// Don't do any operations that lock the heap on the G stack. 
   1008            .          .           	// It might trigger stack growth, and the stack growth code needs 

runtime.(*mheap).alloc.func1

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms      1.49s (flat, cum)  3.12%
   1009            .          .           	// to be able to allocate heap. 
   1010            .          .           	var s *mspan 
   1011         10ms      760ms           	systemstack(func() { 
   1012            .          .           		// To prevent excessive heap growth, before allocating n pages 
   1013            .          .           		// we need to sweep and reclaim at least n pages. 
   1014         10ms       10ms           		if !isSweepDone() {                                                               return sweep.active.isDone()                                 mgcsweep.go:458
                                                                  return a.state.Load() == sweepDrainedMask                mgcsweep.go:214

   1015            .          .           			h.reclaim(npages) 
   1016            .          .           		} 
   1017            .      720ms           		s = h.allocSpan(npages, spanAllocHeap, spanclass) 
   1018            .          .           	}) 
   1019            .          .           	return s 
   1020            .          .           } 
   1021            .          .            
   1022            .          .           // allocManual allocates a manually-managed span of npage pages. 

runtime.(*mheap).allocNeedsZero

/usr/lib/go/src/runtime/mheap.go

  Total:       120ms      120ms (flat, cum)  0.25%
   1067            .          .           // allocator can otherwise prove the memory it's allocating is already zero because 
   1068            .          .           // they're fresh from the operating system. It updates heapArena metadata that is 
   1069            .          .           // critical for future page allocations. 
   1070            .          .           // 
   1071            .          .           // There are no locking constraints on this method. 
   1072         20ms       20ms           func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) { 
   1073            .          .           	for npage > 0 { 
   1074            .          .           		ai := arenaIndex(base) 
   1075         10ms       10ms           		ha := h.arenas[ai.l1()][ai.l2()] 
   1076            .          .            
   1077         10ms       10ms           		zeroedBase := atomic.Loaduintptr(&ha.zeroedBase) 
   1078         80ms       80ms           		arenaBase := base % heapArenaBytes 
   1079            .          .           		if arenaBase < zeroedBase { 
   1080            .          .           			// We extended into the non-zeroed part of the 
   1081            .          .           			// arena, so this region needs to be zeroed before use. 
   1082            .          .           			// 
   1083            .          .           			// zeroedBase is monotonically increasing, so if we see this now then 

runtime.(*mheap).tryAllocMSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1141            .          .           	if pp == nil || pp.mspancache.len == 0 { 
   1142            .          .           		return nil 
   1143            .          .           	} 
   1144            .          .           	// Pull off the last entry in the cache. 
   1145            .          .           	s := pp.mspancache.buf[pp.mspancache.len-1] 
   1146         10ms       10ms           	pp.mspancache.len-- 
   1147            .          .           	return s 
   1148            .          .           } 
   1149            .          .            
   1150            .          .           // allocMSpanLocked allocates an mspan object. 
   1151            .          .           // 

runtime.(*mheap).allocMSpanLocked

/usr/lib/go/src/runtime/mheap.go

  Total:           0      110ms (flat, cum)  0.23%
   1167            .          .           	} 
   1168            .          .           	// Refill the cache if necessary. 
   1169            .          .           	if pp.mspancache.len == 0 { 
   1170            .          .           		const refillCount = len(pp.mspancache.buf) / 2 
   1171            .          .           		for i := 0; i < refillCount; i++ { 
   1172            .      110ms           			pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc()) 
   1173            .          .           		} 
   1174            .          .           		pp.mspancache.len = refillCount 
   1175            .          .           	} 
   1176            .          .           	// Pull off the last entry in the cache. 
   1177            .          .           	s := pp.mspancache.buf[pp.mspancache.len-1] 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms      100ms (flat, cum)  0.21%
   1240            .          .           		c := &pp.pcache 
   1241            .          .            
   1242            .          .           		// If the cache is empty, refill it. 
   1243            .          .           		if c.empty() { 
   1244            .          .           			lock(&h.lock) 
   1245            .       40ms           			*c = h.pages.allocToCache() 
   1246            .          .           			unlock(&h.lock) 
   1247            .          .           		} 
   1248            .          .            
   1249            .          .           		// Try to allocate from the cache. 
   1250            .       50ms           		base, scav = c.alloc(npages) 
   1251            .          .           		if base != 0 { 
   1252         10ms       10ms           			s = h.tryAllocMSpan()                                                                       pp.mspancache.len--                                  mheap.go:1146

   1253            .          .           			if s != nil { 
   1254            .          .           				goto HaveSpan 
   1255            .          .           			} 
   1256            .          .           			// We have a base but no mspan, so we need 
   1257            .          .           			// to lock the heap. 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      120ms (flat, cum)  0.25%
   1294            .          .           	if base == 0 { 
   1295            .          .           		// Try to acquire a base address. 
   1296            .          .           		base, scav = h.pages.alloc(npages) 
   1297            .          .           		if base == 0 { 
   1298            .          .           			var ok bool 
   1299            .       10ms           			growth, ok = h.grow(npages) 
   1300            .          .           			if !ok { 
   1301            .          .           				unlock(&h.lock) 
   1302            .          .           				return nil 
   1303            .          .           			} 
   1304            .          .           			base, scav = h.pages.alloc(npages) 
   1305            .          .           			if base == 0 { 
   1306            .          .           				throw("grew heap, but no adequate free space found") 
   1307            .          .           			} 
   1308            .          .           		} 
   1309            .          .           	} 
   1310            .          .           	if s == nil { 
   1311            .          .           		// We failed to get an mspan earlier, so grab 
   1312            .          .           		// one now that we have the heap lock. 
   1313            .      110ms           		s = h.allocMSpanLocked() 
   1314            .          .           	} 
   1315            .          .           	unlock(&h.lock) 
   1316            .          .            
   1317            .          .           HaveSpan: 
   1318            .          .           	// Decide if we need to scavenge in response to what we just allocated. 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        50ms      500ms (flat, cum)  1.05%
   1388            .          .           		} 
   1389            .          .           		scavenge.assistTime.Add(now - start) 
   1390            .          .           	} 
   1391            .          .            
   1392            .          .           	// Initialize the span. 
   1393            .      380ms           	h.initSpan(s, typ, spanclass, base, npages) 
   1394            .          .            
   1395            .          .           	if valgrindenabled { 
   1396            .          .           		valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) 
   1397            .          .           	} 
   1398            .          .            
   1399            .          .           	// Commit and account for any scavenged memory that the span now owns. 
   1400            .          .           	nbytes := npages * pageSize 
   1401            .          .           	if scav != 0 { 
   1402            .          .           		// sysUsed all the pages that are actually available 
   1403            .          .           		// in the span since some of them might be scavenged. 
   1404            .          .           		sysUsed(unsafe.Pointer(base), nbytes, scav) 
   1405            .       10ms           		gcController.heapReleased.add(-int64(scav)) 
   1406            .          .           	} 
   1407            .          .           	// Update stats. 
   1408            .       20ms           	gcController.heapFree.add(-int64(nbytes - scav)) 
   1409            .          .           	if typ == spanAllocHeap { 
   1410            .       30ms           		gcController.heapInUse.add(int64(nbytes)) 
   1411            .          .           	} 
   1412            .          .           	// Update consistent stats. 
   1413            .          .           	stats := memstats.heapStats.acquire() 
   1414         40ms       40ms           	atomic.Xaddint64(&stats.committed, int64(scav)) 
   1415         10ms       10ms           	atomic.Xaddint64(&stats.released, -int64(scav)) 
   1416            .          .           	switch typ { 
   1417            .          .           	case spanAllocHeap: 
   1418            .          .           		atomic.Xaddint64(&stats.inHeap, int64(nbytes)) 
   1419            .          .           	case spanAllocStack: 
   1420            .          .           		atomic.Xaddint64(&stats.inStacks, int64(nbytes)) 
   1421            .          .           	case spanAllocWorkBuf: 
   1422            .          .           		atomic.Xaddint64(&stats.inWorkBufs, int64(nbytes)) 
   1423            .          .           	} 
   1424            .       10ms           	memstats.heapStats.release() 
   1425            .          .            
   1426            .          .           	// Trace the span alloc. 
   1427            .          .           	if traceAllocFreeEnabled() { 
   1428            .          .           		trace := traceAcquire() 
   1429            .          .           		if trace.ok() { 

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      240ms (flat, cum)   0.5%
   1437            .          .           // initSpan initializes a blank span s which will represent the range 
   1438            .          .           // [base, base+npages*pageSize). typ is the type of span being allocated. 
   1439            .          .           func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { 
   1440            .          .           	// At this point, both s != nil and base != 0, and the heap 
   1441            .          .           	// lock is no longer held. Initialize the span. 
   1442            .      120ms           	s.init(base, npages) 
   1443            .      120ms           	if h.allocNeedsZero(base, npages) { 
   1444            .          .           		s.needzero = 1 
   1445            .          .           	} 
   1446            .          .           	nbytes := npages * pageSize 
   1447            .          .           	if typ.manual() { 
   1448            .          .           		s.manualFreeList = 0 

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0       80ms (flat, cum)  0.17%
   1482            .          .            
   1483            .          .           		// Initialize mark and allocation structures. 
   1484            .          .           		s.freeindex = 0 
   1485            .          .           		s.freeIndexForScan = 0 
   1486            .          .           		s.allocCache = ^uint64(0) // all 1s indicating all free. 
   1487            .       80ms           		s.gcmarkBits = newMarkBits(uintptr(s.nelems)) 
   1488            .          .           		s.allocBits = newAllocBits(uintptr(s.nelems)) 
   1489            .          .            
   1490            .          .           		// Adjust s.limit down to the object-containing part of the span. 
   1491            .          .           		s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) 
   1492            .          .            

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        60ms       60ms (flat, cum)  0.13%
   1521            .          .           		// Mark in-use span in arena page bitmap. 
   1522            .          .           		// 
   1523            .          .           		// This publishes the span to the page sweeper, so 
   1524            .          .           		// it's imperative that the span be completely initialized 
   1525            .          .           		// prior to this line. 
   1526         20ms       20ms           		arena, pageIdx, pageMask := pageIndexOf(s.base())                     arena = mheap_.arenas[ai.l1()][ai.l2()]                      mheap.go:763

   1527         10ms       10ms           		atomic.Or8(&arena.pageInUse[pageIdx], pageMask) 
   1528            .          .            
   1529            .          .           		// Mark packed span. 
   1530         20ms       20ms           		if gcUsesSpanInlineMarkBits(s.elemsize) { 
   1531            .          .           			atomic.Or8(&arena.pageUseSpanInlineMarkBits[pageIdx], pageMask) 
   1532            .          .           		} 
   1533            .          .            
   1534            .          .           		// Update related page sweeper stats. 
   1535         10ms       10ms           		h.pagesInUse.Add(npages) 
   1536            .          .           	} 
   1537            .          .            
   1538            .          .           	// Make sure the newly allocated span will be observed 
   1539            .          .           	// by the GC before pointers into the span are published. 
   1540            .          .           	publicationBarrier() 

runtime.(*mheap).grow

/usr/lib/go/src/runtime/mheap.go

  Total:           0       10ms (flat, cum) 0.021%
   1611            .          .           	// Transition the space we're going to use from Reserved to Prepared. 
   1612            .          .           	// 
   1613            .          .           	// The allocation is always aligned to the heap arena 
   1614            .          .           	// size which is always > physPageSize, so its safe to 
   1615            .          .           	// just add directly to heapReleased. 
   1616            .       10ms           	sysMap(unsafe.Pointer(v), nBase-v, &gcController.heapReleased, "heap") 
   1617            .          .            
   1618            .          .           	// The memory just allocated counts as both released 
   1619            .          .           	// and idle, even though it's not yet backed by spans. 
   1620            .          .           	stats := memstats.heapStats.acquire() 
   1621            .          .           	atomic.Xaddint64(&stats.released, int64(nBase-v)) 

runtime.(*mheap).freeSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      420ms (flat, cum)  0.88%
   1628            .          .           	return totalGrowth, true 
   1629            .          .           } 
   1630            .          .            
   1631            .          .           // Free the span back into the heap. 
   1632            .          .           func (h *mheap) freeSpan(s *mspan) { 
   1633            .      420ms           	systemstack(func() { 
   1634            .          .           		// Trace the span free. 
   1635            .          .           		if traceAllocFreeEnabled() { 
   1636            .          .           			trace := traceAcquire() 
   1637            .          .           			if trace.ok() { 

runtime.(*sweepLocked).sweep.(*mheap).freeSpan.func2

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms      420ms (flat, cum)  0.88%
   1639            .          .           				traceRelease(trace) 
   1640            .          .           			} 
   1641            .          .           		} 
   1642            .          .            
   1643         10ms      120ms           		lock(&h.lock) 
   1644            .          .           		if msanenabled { 
   1645            .          .           			// Tell msan that this entire span is no longer in use. 
   1646            .          .           			base := unsafe.Pointer(s.base()) 
   1647            .          .           			bytes := s.npages << gc.PageShift 
   1648            .          .           			msanfree(base, bytes) 
   1649            .          .           		} 
   1650            .          .           		if asanenabled { 
   1651            .          .           			// Tell asan that this entire span is no longer in use. 
   1652            .          .           			base := unsafe.Pointer(s.base()) 
   1653            .          .           			bytes := s.npages << gc.PageShift 
   1654            .          .           			asanpoison(base, bytes) 
   1655            .          .           		} 
   1656            .          .           		if valgrindenabled { 
   1657            .          .           			base := s.base() 
   1658            .          .           			valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base)) 
   1659            .          .           		} 
   1660            .      260ms           		h.freeSpanLocked(s, spanAllocHeap) 
   1661            .       40ms           		unlock(&h.lock) 
   1662            .          .           	}) 
   1663            .          .           } 
   1664            .          .            
   1665            .          .           // freeManual frees a manually-managed span returned by allocManual. 
   1666            .          .           // typ must be the same as the spanAllocType passed to the allocManual that 

runtime.(*mheap).freeSpanLocked

/usr/lib/go/src/runtime/mheap.go

  Total:        80ms      260ms (flat, cum)  0.54%
   1694            .          .           } 
   1695            .          .            
   1696            .          .           func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { 
   1697            .          .           	assertLockHeld(&h.lock) 
   1698            .          .            
   1699         10ms       10ms           	switch s.state.get() {                                                       return mSpanState(b.s.Load())                                        mheap.go:417
                                                          return Load8(&u.value)                                           types.go:124

   1700            .          .           	case mSpanManual: 
   1701            .          .           		if s.allocCount != 0 { 
   1702            .          .           			throw("mheap.freeSpanLocked - invalid stack free") 
   1703            .          .           		} 
   1704            .          .           	case mSpanInUse: 
   1705            .          .           		if s.isUserArenaChunk { 
   1706            .          .           			throw("mheap.freeSpanLocked - invalid free of user arena chunk") 
   1707            .          .           		} 
   1708            .          .           		if s.allocCount != 0 || s.sweepgen != h.sweepgen { 
   1709            .          .           			print("mheap.freeSpanLocked - span ", s, " ptr ", hex(s.base()), " allocCount ", s.allocCount, " sweepgen ", s.sweepgen, "/", h.sweepgen, "\n") 
   1710            .          .           			throw("mheap.freeSpanLocked - invalid free") 
   1711            .          .           		} 
   1712         30ms       30ms           		h.pagesInUse.Add(-s.npages)                                                               return Xadduintptr(&u.value, delta)                          types.go:420

   1713            .          .            
   1714            .          .           		// Clear in-use bit in arena page bitmap. 
   1715            .          .           		arena, pageIdx, pageMask := pageIndexOf(s.base()) 
   1716            .          .           		atomic.And8(&arena.pageInUse[pageIdx], ^pageMask) 
   1717            .          .            
   1718            .          .           		// Clear small heap span bit if necessary. 
   1719         10ms       10ms           		if gcUsesSpanInlineMarkBits(s.elemsize) { 
   1720            .          .           			atomic.And8(&arena.pageUseSpanInlineMarkBits[pageIdx], ^pageMask) 
   1721            .          .           		} 
   1722            .          .           	default: 
   1723            .          .           		throw("mheap.freeSpanLocked - invalid span state") 
   1724            .          .           	} 
   1725            .          .            
   1726            .          .           	// Update stats. 
   1727            .          .           	// 
   1728            .          .           	// Mirrors the code in allocSpan. 
   1729            .          .           	nbytes := s.npages * pageSize 
   1730            .       10ms           	gcController.heapFree.add(int64(nbytes)) 
   1731            .          .           	if typ == spanAllocHeap { 
   1732            .       30ms           		gcController.heapInUse.add(-int64(nbytes)) 
   1733            .          .           	} 
   1734            .          .           	// Update consistent stats. 
   1735            .          .           	stats := memstats.heapStats.acquire() 
   1736            .          .           	switch typ { 
   1737            .          .           	case spanAllocHeap: 
   1738         30ms       30ms           		atomic.Xaddint64(&stats.inHeap, -int64(nbytes)) 
   1739            .          .           	case spanAllocStack: 
   1740            .          .           		atomic.Xaddint64(&stats.inStacks, -int64(nbytes)) 
   1741            .          .           	case spanAllocWorkBuf: 
   1742            .          .           		atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes)) 
   1743            .          .           	} 
   1744            .       10ms           	memstats.heapStats.release() 
   1745            .          .            
   1746            .          .           	// Mark the space as free. 
   1747            .      130ms           	h.pages.free(s.base(), s.npages) 
   1748            .          .            
   1749            .          .           	// Free the span structure. We no longer have a use for it. 
   1750            .          .           	s.state.set(mSpanDead) 
   1751            .          .           	h.freeMSpanLocked(s) 
   1752            .          .           } 

runtime.(*mspan).init

/usr/lib/go/src/runtime/mheap.go

  Total:       120ms      120ms (flat, cum)  0.25%
   1782            .          .           } 
   1783            .          .            
   1784            .          .           // Initialize a new span with the given start and npages. 
   1785            .          .           func (span *mspan) init(base uintptr, npages uintptr) { 
   1786            .          .           	// span is *not* zeroed. 
   1787         20ms       20ms           	span.next = nil 
   1788        100ms      100ms           	span.prev = nil 
   1789            .          .           	span.list = nil 
   1790            .          .           	span.startAddr = base 
   1791            .          .           	span.npages = npages 
   1792            .          .           	span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans 
   1793            .          .           	span.allocCount = 0 

runtime.spanHasNoSpecials

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   2031            .          .            
   2032            .          .           // spanHasNoSpecials marks a span as having no specials in the arena bitmap. 
   2033            .          .           func spanHasNoSpecials(s *mspan) { 
   2034            .          .           	arenaPage := (s.base() / pageSize) % pagesPerArena 
   2035            .          .           	ai := arenaIndex(s.base()) 
   2036         10ms       10ms           	ha := mheap_.arenas[ai.l1()][ai.l2()] 
   2037            .          .           	atomic.And8(&ha.pageSpecials[arenaPage/8], ^(uint8(1) << (arenaPage % 8))) 
   2038            .          .           } 
   2039            .          .            
   2040            .          .           // addspecial adds the special record s to the list of special records for 
   2041            .          .           // the object p. All fields of s should be filled in except for 

runtime.addspecial

/usr/lib/go/src/runtime/mheap.go

  Total:        30ms       60ms (flat, cum)  0.13%
   2042            .          .           // offset & next, which this routine will fill in. 
   2043            .          .           // Returns true if the special was successfully added, false otherwise. 
   2044            .          .           // (The add will fail only if a record with the same p and s->kind 
   2045            .          .           // already exists unless force is set to true.) 
   2046            .          .           func addspecial(p unsafe.Pointer, s *special, force bool) bool { 
   2047            .       10ms           	span := spanOfHeap(uintptr(p)) 
   2048            .          .           	if span == nil { 
   2049            .          .           		throw("addspecial on invalid pointer") 
   2050            .          .           	} 
   2051            .          .            
   2052            .          .           	// Ensure that the span is swept. 
   2053            .          .           	// Sweeping accesses the specials list w/o locks, so we have 
   2054            .          .           	// to synchronize with it. And it's just much safer. 
   2055            .          .           	mp := acquirem() 
   2056            .       10ms           	span.ensureSwept() 
   2057            .          .            
   2058            .          .           	offset := uintptr(p) - span.base() 
   2059            .          .           	kind := s.kind 
   2060            .          .            
   2061            .       10ms           	lock(&span.speciallock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   2062            .          .            
   2063            .          .           	// Find splice point, check for existing record. 
   2064         10ms       10ms           	iter, exists := span.specialFindSplicePoint(offset, kind)                                                       if s == nil {                                                        mheap.go:2124

   2065            .          .           	if !exists || force { 
   2066            .          .           		// Splice in record, fill in offset. 
   2067            .          .           		s.offset = offset 
   2068            .          .           		s.next = *iter 
   2069            .          .           		*iter = s 
   2070            .          .           		spanHasSpecials(span) 
   2071            .          .           	} 
   2072            .          .            
   2073         20ms       20ms           	unlock(&span.speciallock) 
   2074            .          .           	releasem(mp) 
   2075            .          .           	// We're converting p to a uintptr and looking it up, and we 
   2076            .          .           	// don't want it to die and get swept while we're doing so. 
   2077            .          .           	KeepAlive(p) 
   2078            .          .           	return !exists || force // already exists or addition was forced 

runtime.removespecial

/usr/lib/go/src/runtime/mheap.go

  Total:        50ms       60ms (flat, cum)  0.13%
   2088            .          .           	} 
   2089            .          .            
   2090            .          .           	// Ensure that the span is swept. 
   2091            .          .           	// Sweeping accesses the specials list w/o locks, so we have 
   2092            .          .           	// to synchronize with it. And it's just much safer. 
   2093         10ms       10ms           	mp := acquirem()                                                       return gp.m                                                          runtime1.go:631

   2094            .       10ms           	span.ensureSwept() 
   2095            .          .            
   2096            .          .           	offset := uintptr(p) - span.base() 
   2097            .          .            
   2098            .          .           	var result *special 
   2099            .          .           	lock(&span.speciallock) 
   2100            .          .            
   2101         10ms       10ms           	iter, exists := span.specialFindSplicePoint(offset, kind)                                                       if offset == uintptr(s.offset) && kind == s.kind {                   mheap.go:2127

   2102            .          .           	if exists { 
   2103            .          .           		s := *iter 
   2104            .          .           		*iter = s.next 
   2105            .          .           		result = s 
   2106            .          .           	} 
   2107            .          .           	if span.specials == nil { 
   2108         10ms       10ms           		spanHasNoSpecials(span)                                                               ha := mheap_.arenas[ai.l1()][ai.l2()]                        mheap.go:2036

   2109            .          .           	} 
   2110         10ms       10ms           	unlock(&span.speciallock) 
   2111         10ms       10ms           	releasem(mp)                                                       if mp.locks == 0 && gp.preempt {                                     runtime1.go:638

   2112            .          .           	return result 
   2113            .          .           } 
   2114            .          .            
   2115            .          .           // Find a splice point in the sorted list and check for an already existing 
   2116            .          .           // record. Returns a pointer to the next-reference in the list predecessor. 

runtime.(*mspan).specialFindSplicePoint

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   2119            .          .           	// Find splice point, check for existing record. 
   2120            .          .           	iter := &span.specials 
   2121            .          .           	found := false 
   2122            .          .           	for { 
   2123            .          .           		s := *iter 
   2124         10ms       10ms           		if s == nil { 
   2125            .          .           			break 
   2126            .          .           		} 
   2127         10ms       10ms           		if offset == uintptr(s.offset) && kind == s.kind { 
   2128            .          .           			found = true 
   2129            .          .           			break 
   2130            .          .           		} 
   2131            .          .           		if offset < uintptr(s.offset) || (offset == uintptr(s.offset) && kind < s.kind) { 
   2132            .          .           			break 

runtime.addfinalizer

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms      230ms (flat, cum)  0.48%
   2149            .          .           	ot      *ptrtype // May be a heap pointer, but always live. 
   2150            .          .           } 
   2151            .          .            
   2152            .          .           // Adds a finalizer to the object p. Returns true if it succeeded. 
   2153            .          .           func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 
   2154            .       30ms           	lock(&mheap_.speciallock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   2155            .       40ms           	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 
   2156            .      100ms           	unlock(&mheap_.speciallock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2157         10ms       10ms           	s.special.kind = _KindSpecialFinalizer 
   2158            .          .           	s.fn = f 
   2159            .          .           	s.nret = nret 
   2160            .          .           	s.fint = fint 
   2161            .          .           	s.ot = ot 
   2162            .       50ms           	if addspecial(p, &s.special, false) { 
   2163            .          .           		// This is responsible for maintaining the same 
   2164            .          .           		// GC-related invariants as markrootSpans in any 
   2165            .          .           		// situation where it's possible that markrootSpans 
   2166            .          .           		// has already run but mark termination hasn't yet. 
   2167            .          .           		if gcphase != _GCoff { 

runtime.removefinalizer

/usr/lib/go/src/runtime/mheap.go

  Total:           0      110ms (flat, cum)  0.23%
   2188            .          .           	return false 
   2189            .          .           } 
   2190            .          .            
   2191            .          .           // Removes the finalizer (if any) from the object p. 
   2192            .          .           func removefinalizer(p unsafe.Pointer) { 
   2193            .       60ms           	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 
   2194            .          .           	if s == nil { 
   2195            .          .           		return // there wasn't a finalizer to remove 
   2196            .          .           	} 
   2197            .       10ms           	lock(&mheap_.speciallock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   2198            .          .           	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 
   2199            .       40ms           	unlock(&mheap_.speciallock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2200            .          .           } 
   2201            .          .            
   2202            .          .           // The described object has a cleanup set for it. 
   2203            .          .           type specialCleanup struct { 
   2204            .          .           	_       sys.NotInHeap 

runtime.setprofilebucket

/usr/lib/go/src/runtime/mheap.go

  Total:           0       10ms (flat, cum) 0.021%
   2750            .          .           	lock(&mheap_.speciallock) 
   2751            .          .           	s := (*specialprofile)(mheap_.specialprofilealloc.alloc()) 
   2752            .          .           	unlock(&mheap_.speciallock) 
   2753            .          .           	s.special.kind = _KindSpecialProfile 
   2754            .          .           	s.b = b 
   2755            .       10ms           	if !addspecial(p, &s.special, false) { 
   2756            .          .           		throw("setprofilebucket: profile already set") 
   2757            .          .           	} 
   2758            .          .           } 
   2759            .          .            
   2760            .          .           // specialReachable tracks whether an object is reachable on the next 

runtime.(*gcBits).bytep

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   2865            .          .           	x uint8 
   2866            .          .           } 
   2867            .          .            
   2868            .          .           // bytep returns a pointer to the n'th byte of b. 
   2869            .          .           func (b *gcBits) bytep(n uintptr) *uint8 { 
   2870         10ms       10ms           	return addb(&b.x, n) 
   2871            .          .           } 
   2872            .          .            
   2873            .          .           // bitp returns a pointer to the byte containing bit n and a mask for 
   2874            .          .           // selecting that bit from *bytep. 
   2875            .          .           func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { 

runtime.(*gcBitsArena).tryAlloc

/usr/lib/go/src/runtime/mheap.go

  Total:       110ms      110ms (flat, cum)  0.23%
   2901            .          .           } 
   2902            .          .            
   2903            .          .           // tryAlloc allocates from b or returns nil if b does not have enough room. 
   2904            .          .           // This is safe to call concurrently. 
   2905            .          .           func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { 
   2906         30ms       30ms           	if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { 
   2907            .          .           		return nil 
   2908            .          .           	} 
   2909            .          .           	// Try to allocate from this block. 
   2910         70ms       70ms           	end := atomic.Xadduintptr(&b.free, bytes) 
   2911            .          .           	if end > uintptr(len(b.bits)) { 
   2912            .          .           		return nil 
   2913            .          .           	} 
   2914            .          .           	// There was enough room. 
   2915         10ms       10ms           	start := end - bytes 
   2916            .          .           	return &b.bits[start] 
   2917            .          .           } 
   2918            .          .            
   2919            .          .           // newMarkBits returns a pointer to 8 byte aligned bytes 
   2920            .          .           // to be used for a span's mark bits. 

runtime.newMarkBits

/usr/lib/go/src/runtime/mheap.go

  Total:       120ms      130ms (flat, cum)  0.27%
   2922            .          .           	blocksNeeded := (nelems + 63) / 64 
   2923            .          .           	bytesNeeded := blocksNeeded * 8 
   2924            .          .            
   2925            .          .           	// Try directly allocating from the current head arena. 
   2926            .          .           	head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) 
   2927        110ms      110ms           	if p := head.tryAlloc(bytesNeeded); p != nil {                                                       end := atomic.Xadduintptr(&b.free, bytes)                            mheap.go:2910            if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { mheap.go:2906            end := atomic.Xadduintptr(&b.free, bytes)                            mheap.go:2910            start := end - bytes                                                 mheap.go:2915

   2928         10ms       10ms           		return p 
   2929            .          .           	} 
   2930            .          .            
   2931            .          .           	// There's not enough room in the head arena. We may need to 
   2932            .          .           	// allocate a new arena. 
   2933            .          .           	lock(&gcBitsArenas.lock) 
   2934            .          .           	// Try the head arena again, since it may have changed. Now 
   2935            .          .           	// that we hold the lock, the list head can't change, but its 
   2936            .          .           	// free position still can. 
   2937            .          .           	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 
   2938            .          .           		unlock(&gcBitsArenas.lock) 
   2939            .          .           		return p 
   2940            .          .           	} 
   2941            .          .            
   2942            .          .           	// Allocate a new arena. This may temporarily drop the lock. 
   2943            .       10ms           	fresh := newArenaMayUnlock() 
   2944            .          .           	// If newArenaMayUnlock dropped the lock, another thread may 
   2945            .          .           	// have put a fresh arena on the "next" list. Try allocating 
   2946            .          .           	// from next again. 
   2947            .          .           	if p := gcBitsArenas.next.tryAlloc(bytesNeeded); p != nil { 
   2948            .          .           		// Put fresh back on the free list. 

runtime.newArenaMayUnlock

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   3024            .          .           		} 
   3025            .          .           		lock(&gcBitsArenas.lock) 
   3026            .          .           	} else { 
   3027            .          .           		result = gcBitsArenas.free 
   3028            .          .           		gcBitsArenas.free = gcBitsArenas.free.next 
   3029         10ms       10ms           		memclrNoHeapPointers(unsafe.Pointer(result), gcBitsChunkBytes) 
   3030            .          .           	} 
   3031            .          .           	result.next = nil 
   3032            .          .           	// If result.bits is not 8 byte aligned adjust index so 
   3033            .          .           	// that &result.bits[result.free] is 8 byte aligned. 
   3034            .          .           	if unsafe.Offsetof(gcBitsArena{}.bits)&7 == 0 { 

runtime.memclrNoHeapPointers

/usr/lib/go/src/runtime/memclr_arm64.s

  Total:       350ms      350ms (flat, cum)  0.73%
     12            .          .           	CMP	$16, R1 
     13            .          .           	// If n is equal to 16 bytes, use zero_exact_16 to zero 
     14            .          .           	BEQ	zero_exact_16 
     15            .          .            
     16            .          .           	// If n is greater than 16 bytes, use zero_by_16 to zero 
     17         10ms       10ms           	BHI	zero_by_16 
     18            .          .            
     19            .          .           	// n is less than 16 bytes 
     20            .          .           	ADD	R1, R0, R7 
     21            .          .           	TBZ	$3, R1, less_than_8 
     22            .          .           	MOVD	ZR, (R0) 
     23         30ms       30ms           	MOVD	ZR, -8(R7) 
     24         10ms       10ms           	RET 
     25            .          .            
     26            .          .           less_than_8: 
     27            .          .           	TBZ	$2, R1, less_than_4 
     28            .          .           	MOVW	ZR, (R0) 
     29         10ms       10ms           	MOVW	ZR, -4(R7) 
     30            .          .           	RET 
     31            .          .            
     32            .          .           less_than_4: 
     33            .          .           	CBZ	R1, ending 
     34            .          .           	MOVB	ZR, (R0) 
     35            .          .           	TBZ	$1, R1, ending 
     36            .          .           	MOVH	ZR, -2(R7) 
     37            .          .            
     38            .          .           ending: 
     39            .          .           	RET 
     40            .          .            
     41            .          .           zero_exact_16: 
     42            .          .           	// n is exactly 16 bytes 
     43         10ms       10ms           	STP	(ZR, ZR), (R0) 
     44         10ms       10ms           	RET 
     45            .          .            
     46            .          .           zero_by_16: 
     47            .          .           	// n greater than 16 bytes, check if the start address is aligned 
     48            .          .           	NEG	R0, R4 
     49         10ms       10ms           	ANDS	$15, R4, R4 
     50            .          .           	// Try zeroing using zva if the start address is aligned with 16 
     51            .          .           	BEQ	try_zva 
     52            .          .            
     53            .          .           	// Non-aligned store 
     54            .          .           	STP	(ZR, ZR), (R0) 
     55            .          .           	// Make the destination aligned 
     56         10ms       10ms           	SUB	R4, R1, R1 
     57            .          .           	ADD	R4, R0, R0 
     58            .          .           	B	try_zva 
     59            .          .            
     60            .          .           tail_maybe_long: 
     61            .          .           	CMP	$64, R1 
     62            .          .           	BHS	no_zva 
     63            .          .            
     64            .          .           tail63: 
     65         10ms       10ms           	ANDS	$48, R1, R3 
     66            .          .           	BEQ	last16 
     67            .          .           	CMPW	$32, R3 
     68         10ms       10ms           	BEQ	last48 
     69            .          .           	BLT	last32 
     70         10ms       10ms           	STP.P	(ZR, ZR), 16(R0) 
     71            .          .           last48: 
     72         30ms       30ms           	STP.P	(ZR, ZR), 16(R0) 
     73            .          .           last32: 
     74         20ms       20ms           	STP.P	(ZR, ZR), 16(R0) 
     75            .          .           	// The last store length is at most 16, so it is safe to use 
     76            .          .           	// stp to write last 16 bytes 
     77            .          .           last16: 
     78         40ms       40ms           	ANDS	$15, R1, R1 
     79         20ms       20ms           	CBZ	R1, last_end 
     80         20ms       20ms           	ADD	R1, R0, R0 
     81            .          .           	STP	(ZR, ZR), -16(R0) 
     82            .          .           last_end: 
     83         10ms       10ms           	RET 
     84            .          .            
     85            .          .           	PCALIGN	$16 
     86            .          .           no_zva: 
     87            .          .           	SUB	$16, R0, R0 
     88            .          .           	SUB	$64, R1, R1 
     89            .          .            
     90            .          .           loop_64: 
     91         10ms       10ms           	STP	(ZR, ZR), 16(R0) 
     92         40ms       40ms           	STP	(ZR, ZR), 32(R0) 
     93            .          .           	STP	(ZR, ZR), 48(R0) 
     94            .          .           	STP.W	(ZR, ZR), 64(R0) 
     95            .          .           	SUBS	$64, R1, R1 
     96            .          .           	BGE	loop_64 
     97            .          .           	ANDS	$63, R1, ZR 
     98            .          .           	ADD	$16, R0, R0 
     99            .          .           	BNE	tail63 
    100            .          .           	RET 
    101            .          .            
    102            .          .           	PCALIGN	$16 
    103            .          .           try_zva: 
    104            .          .           	// Try using the ZVA feature to zero entire cache lines 
    105            .          .           	// It is not meaningful to use ZVA if the block size is less than 64, 
    106            .          .           	// so make sure that n is greater than or equal to 64 
    107         10ms       10ms           	CMP	$63, R1 
    108            .          .           	BLE	tail63 
    109            .          .            
    110            .          .           	CMP	$128, R1 
    111            .          .           	// Ensure n is at least 128 bytes, so that there is enough to copy after 
    112            .          .           	// alignment. 
    113         10ms       10ms           	BLT	no_zva 
    114            .          .           	// Check if ZVA is allowed from user code, and if so get the block size 
    115         10ms       10ms           	MOVW	block_size<>(SB), R5 
    116            .          .           	TBNZ	$31, R5, no_zva 
    117            .          .           	CBNZ	R5, zero_by_line 
    118            .          .           	// DCZID_EL0 bit assignments 
    119            .          .           	// [63:5] Reserved 
    120            .          .           	// [4]    DZP, if bit set DC ZVA instruction is prohibited, else permitted 

runtime.memclrNoHeapPointers

/usr/lib/go/src/runtime/memclr_arm64.s

  Total:       520ms      520ms (flat, cum)  1.09%
    139            .          .            
    140            .          .           	PCALIGN	$16 
    141            .          .           zero_by_line: 
    142            .          .           	CMP	R5, R1 
    143            .          .           	// Not enough memory to reach alignment 
    144         10ms       10ms           	BLO	no_zva 
    145            .          .           	SUB	$1, R5, R6 
    146            .          .           	NEG	R0, R4 
    147            .          .           	ANDS	R6, R4, R4 
    148            .          .           	// Already aligned 
    149            .          .           	BEQ	aligned 
    150            .          .            
    151            .          .           	// check there is enough to copy after alignment 
    152            .          .           	SUB	R4, R1, R3 
    153            .          .            
    154            .          .           	// Check that the remaining length to ZVA after alignment 
    155            .          .           	// is greater than 64. 
    156            .          .           	CMP	$64, R3 
    157            .          .           	CCMP	GE, R3, R5, $10  // condition code GE, NZCV=0b1010 
    158            .          .           	BLT	no_zva 
    159            .          .            
    160            .          .           	// We now have at least 64 bytes to zero, update n 
    161         10ms       10ms           	MOVD	R3, R1 
    162            .          .            
    163            .          .           loop_zva_prolog: 
    164            .          .           	STP	(ZR, ZR), (R0) 
    165         50ms       50ms           	STP	(ZR, ZR), 16(R0) 
    166            .          .           	STP	(ZR, ZR), 32(R0) 
    167            .          .           	SUBS	$64, R4, R4 
    168            .          .           	STP	(ZR, ZR), 48(R0) 
    169            .          .           	ADD	$64, R0, R0 
    170            .          .           	BGE	loop_zva_prolog 
    171            .          .            
    172            .          .           	ADD	R4, R0, R0 
    173            .          .            
    174            .          .           aligned: 
    175            .          .           	SUB	R5, R1, R1 
    176            .          .            
    177            .          .           	PCALIGN	$16 
    178            .          .           loop_zva: 
    179        320ms      320ms           	WORD	$0xd50b7420 // DC ZVA, R0 
    180        110ms      110ms           	ADD	R5, R0, R0 
    181            .          .           	SUBS	R5, R1, R1 
    182            .          .           	BHS	loop_zva 
    183            .          .           	ANDS	R6, R1, R1 
    184            .          .           	BNE	tail_maybe_long 
    185         20ms       20ms           	RET 
    186            .          .            
    187            .          .           GLOBL block_size<>(SB), NOPTR, $8 

runtime.(*stkframe).argMapInternal

/usr/lib/go/src/runtime/stkframe.go

  Total:        50ms       50ms (flat, cum)   0.1%
     89            .          .           // this if non-nil, and otherwise fetch the argument map using the 
     90            .          .           // current PC. 
     91            .          .           // 
     92            .          .           // hasReflectStackObj indicates that this frame also has a reflect 
     93            .          .           // function stack object, which the caller must synthesize. 
     94         20ms       20ms           func (frame *stkframe) argMapInternal() (argMap bitvector, hasReflectStackObj bool) { 
     95            .          .           	f := frame.fn 
     96            .          .           	if f.args != abi.ArgsSizeUnknown { 
     97         10ms       10ms           		argMap.n = f.args / goarch.PtrSize 
     98         20ms       20ms           		return 
     99            .          .           	} 
    100            .          .           	// Extract argument bitmaps for reflect stubs from the calls they made to reflect. 
    101            .          .           	switch funcname(f) { 
    102            .          .           	case "reflect.makeFuncStub", "reflect.methodValueCall": 
    103            .          .           		// These take a *reflect.methodValue as their 

runtime.(*stkframe).getStackMap

/usr/lib/go/src/runtime/stkframe.go

  Total:       760ms      2.05s (flat, cum)  4.29%
    152            .          .           	return 
    153            .          .           } 
    154            .          .            
    155            .          .           // getStackMap returns the locals and arguments live pointer maps, and 
    156            .          .           // stack object list for frame. 
    157         20ms       20ms           func (frame *stkframe) getStackMap(debug bool) (locals, args bitvector, objs []stackObjectRecord) { 
    158            .          .           	targetpc := frame.continpc 
    159         10ms       10ms           	if targetpc == 0 { 
    160            .          .           		// Frame is dead. Return empty bitvectors. 
    161            .          .           		return 
    162            .          .           	} 
    163            .          .            
    164            .          .           	f := frame.fn 
    165            .          .           	pcdata := int32(-1) 
    166         50ms       70ms           	if targetpc != f.entry() {                                                       return f.datap.textAddr(f.entryOff)                                  symtab.go:894
    167            .          .           		// Back up to the CALL. If we're at the function entry 
    168            .          .           		// point, we want to use the entry map (-1), even if 
    169            .          .           		// the first instruction of the function changes the 
    170            .          .           		// stack map. 
    171            .          .           		targetpc-- 
    172            .      1.22s           		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, targetpc) 
    173            .          .           	} 
    174            .          .           	if pcdata == -1 { 
    175            .          .           		// We do not have a valid pcdata value but there might be a 
    176            .          .           		// stackmap for this function. It is likely that we are looking 
    177            .          .           		// at the function prologue, assume so and hope for the best. 
    178            .          .           		pcdata = 0 
    179            .          .           	} 
    180            .          .            
    181            .          .           	// Local variables. 
    182         50ms       50ms           	size := frame.varp - frame.sp 
    183            .          .           	var minsize uintptr 
    184            .          .           	switch goarch.ArchFamily { 
    185            .          .           	case goarch.ARM64: 
    186            .          .           		minsize = sys.StackAlign 
    187            .          .           	default: 
    188            .          .           		minsize = sys.MinFrameSize 
    189            .          .           	} 
    190            .          .           	if size > minsize { 
    191            .          .           		stackid := pcdata 
    192         50ms       50ms           		stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))                                                               raw := base + uintptr(off)                                   symtab.go:1272                    if i < 0 || i >= f.nfuncdata {                               symtab.go:1259

    193        210ms      210ms           		if stkmap == nil || stkmap.n <= 0 { 
    194            .          .           			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 
    195            .          .           			throw("missing stackmap") 
    196            .          .           		} 
    197            .          .           		// If nbit == 0, there's no work to do. 
    198         50ms       50ms           		if stkmap.nbit > 0 { 
    199         10ms       10ms           			if stackid < 0 || stackid >= stkmap.n { 
    200            .          .           				// don't know where we are 
    201            .          .           				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 
    202            .          .           				throw("bad symbol table") 
    203            .          .           			} 
    204         10ms       10ms           			locals = stackmapdata(stkmap, stackid)                                                                       return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} symtab.go:1330

    205            .          .           			if stackDebug >= 3 && debug { 
    206            .          .           				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n") 
    207            .          .           			} 
    208            .          .           		} else if stackDebug >= 3 && debug { 
    209            .          .           			print("      no locals to adjust\n") 
    210            .          .           		} 
    211            .          .           	} 
    212            .          .            
    213            .          .           	// Arguments. First fetch frame size and special-case argument maps. 
    214            .          .           	var isReflect bool 
    215            .       50ms           	args, isReflect = frame.argMapInternal() 
    216         10ms       10ms           	if args.n > 0 && args.bytedata == nil { 
    217            .          .           		// Non-empty argument frame, but not a special map. 
    218            .          .           		// Fetch the argument map at pcdata. 
    219         60ms       60ms           		stackmap := (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))                     base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses symtab.go:1262
    220        100ms      100ms           		if stackmap == nil || stackmap.n <= 0 { 
    221            .          .           			print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n") 
    222            .          .           			throw("missing stackmap") 
    223            .          .           		} 
    224            .          .           		if pcdata < 0 || pcdata >= stackmap.n { 
    225            .          .           			// don't know where we are 
    226            .          .           			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 
    227            .          .           			throw("bad symbol table") 
    228            .          .           		} 
    229            .          .           		if stackmap.nbit == 0 { 
    230            .          .           			args.n = 0 
    231            .          .           		} else { 
    232         30ms       30ms           			args = stackmapdata(stackmap, pcdata)                             return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} symtab.go:1330

    233            .          .           		} 
    234            .          .           	} 
    235            .          .            
    236            .          .           	// stack objects. 
    237            .          .           	if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") && 
    238            .          .           		unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect { 
    239            .          .           		// For reflect.makeFuncStub and reflect.methodValueCall, 
    240            .          .           		// we need to fake the stack object record. 
    241            .          .           		// These frames contain an internal/abi.RegArgs at a hard-coded offset. 
    242            .          .           		// This offset matches the assembly code on amd64 and arm64. 
    243            .          .           		objs = methodValueCallFrameObjs[:] 
    244            .          .           	} else { 
    245         20ms       20ms           		p := funcdata(f, abi.FUNCDATA_StackObjects)                                                               if i < 0 || i >= f.nfuncdata {                               symtab.go:1259

    246            .          .           		if p != nil { 
    247            .          .           			n := *(*uintptr)(p) 
    248            .          .           			p = add(p, goarch.PtrSize) 
    249            .          .           			r0 := (*stackObjectRecord)(noescape(p)) 
    250            .          .           			objs = unsafe.Slice(r0, int(n)) 
    251            .          .           			// Note: the noescape above is needed to keep 
    252            .          .           			// getStackMap from "leaking param content: 
    253            .          .           			// frame".  That leak propagates up to getgcmask, then 
    254            .          .           			// GCMask, then verifyGCInfo, which converts the stack 
    255            .          .           			// gcinfo tests into heap gcinfo tests :( 
    256            .          .           		} 
    257            .          .           	} 
    258            .          .            
    259         80ms       80ms           	return 
    260            .          .           } 
    261            .          .            
    262            .          .           var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit 
    263            .          .            
    264            .          .           func stkobjinit() { 

runtime.sellock

/usr/lib/go/src/runtime/select.go

  Total:        30ms      260ms (flat, cum)  0.54%
     29            .          .            
     30            .          .           func selectsetpc(pc *uintptr) { 
     31            .          .           	*pc = sys.GetCallerPC() 
     32            .          .           } 
     33            .          .            
     34         10ms       10ms           func sellock(scases []scase, lockorder []uint16) { 
     35            .          .           	var c *hchan 
     36            .          .           	for _, o := range lockorder { 
     37            .          .           		c0 := scases[o].c 
     38         10ms       10ms           		if c0 != c { 
     39            .          .           			c = c0 
     40         10ms      240ms           			lock(&c.lock)                                                                       lockWithRank(l, getLockRank(l))                      lock_spinbit.go:152
                                                                          lock2(l)                                         lockrank_off.go:24
     41            .          .           		} 
     42            .          .           	} 
     43            .          .           } 
     44            .          .            
     45            .          .           func selunlock(scases []scase, lockorder []uint16) { 

runtime.selunlock

/usr/lib/go/src/runtime/select.go

  Total:        10ms      230ms (flat, cum)  0.48%
     54            .          .           	for i := len(lockorder) - 1; i >= 0; i-- { 
     55            .          .           		c := scases[lockorder[i]].c 
     56            .          .           		if i > 0 && c == scases[lockorder[i-1]].c { 
     57            .          .           			continue // will unlock it on the next iteration 
     58            .          .           		} 
     59         10ms      230ms           		unlock(&c.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35
     60            .          .           	} 
     61            .          .           } 
     62            .          .            
     63            .          .           func selparkcommit(gp *g, _ unsafe.Pointer) bool { 
     64            .          .           	// There are unlocked sudogs that point into gp's stack. Stack 

runtime.selparkcommit

/usr/lib/go/src/runtime/select.go

  Total:           0       50ms (flat, cum)   0.1%
     88            .          .           			// any sudog with that channel may change, 
     89            .          .           			// including c and waitlink. Since multiple 
     90            .          .           			// sudogs may have the same channel, we unlock 
     91            .          .           			// only after we've passed the last instance 
     92            .          .           			// of a channel. 
     93            .       50ms           			unlock(&lastc.lock)                                                                       unlockWithRank(l)                                    lock_spinbit.go:261
                                                                          unlock2(l)                                       lockrank_off.go:35

     94            .          .           		} 
     95            .          .           		lastc = sg.c 
     96            .          .           	} 
     97            .          .           	if lastc != nil { 
     98            .          .           		unlock(&lastc.lock) 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:        20ms       20ms (flat, cum) 0.042%
    117            .          .           // 
    118            .          .           // selectgo returns the index of the chosen scase, which matches the 
    119            .          .           // ordinal position of its respective select{recv,send,default} call. 
    120            .          .           // Also, if the chosen scase was a receive operation, it reports whether 
    121            .          .           // a value was received. 
    122         10ms       10ms           func selectgo(cas0 *scase, order0 *uint16, pc0 *uintptr, nsends, nrecvs int, block bool) (int, bool) { 
    123            .          .           	gp := getg() 
    124            .          .           	if debugSelect { 
    125            .          .           		print("select: cas0=", cas0, "\n") 
    126            .          .           	} 
    127            .          .            
    128            .          .           	// NOTE: In order to maintain a lean stack size, the number of scases 
    129            .          .           	// is capped at 65536. 
    130            .          .           	cas1 := (*[1 << 16]scase)(unsafe.Pointer(cas0)) 
    131            .          .           	order1 := (*[1 << 17]uint16)(unsafe.Pointer(order0)) 
    132            .          .            
    133         10ms       10ms           	ncases := nsends + nrecvs 
    134            .          .           	scases := cas1[:ncases:ncases] 
    135            .          .           	pollorder := order1[:ncases:ncases] 
    136            .          .           	lockorder := order1[ncases:][:ncases:ncases] 
    137            .          .           	// NOTE: pollorder/lockorder's underlying array was not zero-initialized by compiler. 
    138            .          .            

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:       390ms      600ms (flat, cum)  1.26%
    150            .          .           		} 
    151            .          .           		return pcs[casi] 
    152            .          .           	} 
    153            .          .            
    154            .          .           	var t0 int64 
    155         10ms       10ms           	if blockprofilerate > 0 { 
    156            .          .           		t0 = cputicks() 
    157            .          .           	} 
    158            .          .            
    159            .          .           	// The compiler rewrites selects that statically have 
    160            .          .           	// only 0 or 1 cases plus default into simpler constructs. 
    161            .          .           	// The only way we can end up with such small sel.ncase 
    162            .          .           	// values here is for a larger select in which most channels 
    163            .          .           	// have been nilled out. The general code handles those 
    164            .          .           	// cases correctly, and they are rare enough not to bother 
    165            .          .           	// optimizing (and needing to test). 
    166            .          .            
    167            .          .           	// generate permuted order 
    168            .          .           	norder := 0 
    169            .          .           	allSynctest := true 
    170            .          .           	for i := range scases { 
    171            .          .           		cas := &scases[i] 
    172            .          .            
    173            .          .           		// Omit cases without channels from the poll and lock orders. 
    174            .          .           		if cas.c == nil { 
    175         10ms       10ms           			cas.elem = nil // allow GC 
    176            .          .           			continue 
    177            .          .           		} 
    178            .          .            
    179         40ms       40ms           		if cas.c.bubble != nil { 
    180            .          .           			if getg().bubble != cas.c.bubble { 
    181            .          .           				fatal("select on synctest channel from outside bubble") 
    182            .          .           			} 
    183            .          .           		} else { 
    184            .          .           			allSynctest = false 
    185            .          .           		} 
    186            .          .            
    187         20ms       20ms           		if cas.c.timer != nil { 
    188            .          .           			cas.c.timer.maybeRunChan(cas.c) 
    189            .          .           		} 
    190            .          .            
    191         10ms       10ms           		j := cheaprandn(uint32(norder + 1))                                                               return uint32((uint64(cheaprand()) * uint64(n)) >> 32)       rand.go:293

    192         30ms       30ms           		pollorder[norder] = pollorder[j] 
    193         10ms       10ms           		pollorder[j] = uint16(i) 
    194            .          .           		norder++ 
    195            .          .           	} 
    196            .          .           	pollorder = pollorder[:norder] 
    197            .          .           	lockorder = lockorder[:norder] 
    198            .          .            
    199            .          .           	waitReason := waitReasonSelect 
    200            .          .           	if gp.bubble != nil && allSynctest { 
    201            .          .           		// Every channel selected on is in a synctest bubble, 
    202            .          .           		// so this goroutine will count as idle while selecting. 
    203            .          .           		waitReason = waitReasonSynctestSelect 
    204            .          .           	} 
    205            .          .            
    206            .          .           	// sort the cases by Hchan address to get the locking order. 
    207            .          .           	// simple heap sort, to guarantee n log n time and constant stack footprint. 
    208            .          .           	for i := range lockorder { 
    209            .          .           		j := i 
    210            .          .           		// Start with the pollorder to permute cases on the same channel. 
    211            .          .           		c := scases[pollorder[i]].c 
    212         30ms       30ms           		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {                                      ⋮
                                     ⋮
                                                              return uintptr(unsafe.Pointer(c))                            select.go:546

    213            .          .           			k := (j - 1) / 2 
    214         20ms       20ms           			lockorder[j] = lockorder[k] 
    215            .          .           			j = k 
    216            .          .           		} 
    217         20ms       20ms           		lockorder[j] = pollorder[i] 
    218            .          .           	} 
    219         10ms       10ms           	for i := len(lockorder) - 1; i >= 0; i-- { 
    220            .          .           		o := lockorder[i] 
    221         10ms       10ms           		c := scases[o].c 
    222            .          .           		lockorder[i] = lockorder[0] 
    223            .          .           		j := 0 
    224            .          .           		for { 
    225         30ms       30ms           			k := j*2 + 1 
    226            .          .           			if k >= i { 
    227            .          .           				break 
    228            .          .           			} 
    229         30ms       30ms           			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { 
    230            .          .           				k++ 
    231            .          .           			} 
    232         60ms       60ms           			if c.sortkey() < scases[lockorder[k]].c.sortkey() { 
    233            .          .           				lockorder[j] = lockorder[k] 
    234            .          .           				j = k 
    235            .          .           				continue 
    236            .          .           			} 
    237            .          .           			break 
    238            .          .           		} 
    239         10ms       10ms           		lockorder[j] = o 
    240            .          .           	} 
    241            .          .            
    242            .          .           	if debugSelect { 
    243            .          .           		for i := 0; i+1 < len(lockorder); i++ { 
    244            .          .           			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { 
    245            .          .           				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 
    246            .          .           				throw("select: broken sort") 
    247            .          .           			} 
    248            .          .           		} 
    249            .          .           	} 
    250            .          .            
    251            .          .           	// lock all the channels involved in the select 
    252            .      210ms           	sellock(scases, lockorder) 
    253            .          .            
    254            .          .           	var ( 
    255            .          .           		sg     *sudog 
    256            .          .           		c      *hchan 
    257            .          .           		k      *scase 
    258            .          .           		sglist *sudog 
    259            .          .           		sgnext *sudog 
    260            .          .           		qp     unsafe.Pointer 
    261            .          .           		nextp  **sudog 
    262            .          .           	) 
    263            .          .            
    264            .          .           	// pass 1 - look for something already waiting 
    265            .          .           	var casi int 
    266            .          .           	var cas *scase 
    267            .          .           	var caseSuccess bool 
    268            .          .           	var caseReleaseTime int64 = -1 
    269            .          .           	var recvOK bool 
    270         10ms       10ms           	for _, casei := range pollorder { 
    271            .          .           		casi = int(casei) 
    272            .          .           		cas = &scases[casi] 
    273            .          .           		c = cas.c 
    274            .          .            
    275            .          .           		if casi >= nsends { 
    276         30ms       30ms           			sg = c.sendq.dequeue()                                                                       q.first = nil                                        chan.go:894                            q.first = y                                          chan.go:898                            if sgp == nil {                                      chan.go:889

    277            .          .           			if sg != nil { 
    278            .          .           				goto recv 
    279            .          .           			} 
    280            .          .           			if c.qcount > 0 { 
    281            .          .           				goto bufrecv 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:       170ms      290ms (flat, cum)  0.61%
    309            .          .           	// pass 2 - enqueue on all chans 
    310            .          .           	if gp.waiting != nil { 
    311            .          .           		throw("gp.waiting != nil") 
    312            .          .           	} 
    313            .          .           	nextp = &gp.waiting 
    314         10ms       10ms           	for _, casei := range lockorder { 
    315            .          .           		casi = int(casei) 
    316            .          .           		cas = &scases[casi] 
    317            .          .           		c = cas.c 
    318            .       70ms           		sg := acquireSudog() 
    319            .          .           		sg.g = gp 
    320            .          .           		sg.isSelect = true 
    321            .          .           		// No stack splits between assigning elem and enqueuing 
    322            .          .           		// sg on gp.waiting where copystack can find it. 
    323            .          .           		sg.elem = cas.elem 
    324            .          .           		sg.releasetime = 0 
    325            .          .           		if t0 != 0 { 
    326            .          .           			sg.releasetime = -1 
    327            .          .           		} 
    328            .          .           		sg.c = c 
    329            .          .           		// Construct waiting list in lock order. 
    330            .          .           		*nextp = sg 
    331            .          .           		nextp = &sg.waitlink 
    332            .          .            
    333            .          .           		if casi < nsends { 
    334            .          .           			c.sendq.enqueue(sg) 
    335            .          .           		} else { 
    336         20ms       20ms           			c.recvq.enqueue(sg)                                                                       sgp.next = nil                                       chan.go:873                            sgp.prev = nil                                       chan.go:876

    337            .          .           		} 
    338            .          .            
    339            .          .           		if c.timer != nil { 
    340            .          .           			blockTimerChan(c) 
    341            .          .           		} 
    342            .          .           	} 
    343            .          .            
    344            .          .           	// wait for someone to wake us up 
    345            .          .           	gp.param = nil 
    346            .          .           	// Signal to anyone trying to shrink our stack that we're about 
    347            .          .           	// to park on a channel. The window between when this G's status 
    348            .          .           	// changes and when we set gp.activeStackChans is not safe for 
    349            .          .           	// stack shrinking. 
    350            .          .           	gp.parkingOnChan.Store(true) 
    351            .          .           	gopark(selparkcommit, nil, waitReason, traceBlockSelect, 1) 
    352         20ms       20ms           	gp.activeStackChans = false 
    353            .          .            
    354            .       50ms           	sellock(scases, lockorder) 
    355            .          .            
    356            .          .           	gp.selectDone.Store(0) 
    357            .          .           	sg = (*sudog)(gp.param) 
    358            .          .           	gp.param = nil 
    359            .          .            
    360            .          .           	// pass 3 - dequeue from unsuccessful chans 
    361            .          .           	// otherwise they stack up on quiet channels 
    362            .          .           	// record the successful case, if any. 
    363            .          .           	// We singly-linked up the SudoGs in lock order. 
    364            .          .           	casi = -1 
    365            .          .           	cas = nil 
    366            .          .           	caseSuccess = false 
    367            .          .           	sglist = gp.waiting 
    368            .          .           	// Clear all elem before unlinking from gp.waiting. 
    369         90ms       90ms           	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 
    370            .          .           		sg1.isSelect = false 
    371         30ms       30ms           		sg1.elem = nil 
    372            .          .           		sg1.c = nil 
    373            .          .           	} 
    374            .          .           	gp.waiting = nil 
    375            .          .            
    376            .          .           	for _, casei := range lockorder { 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:        30ms       80ms (flat, cum)  0.17%
    389            .          .           		} else { 
    390            .          .           			c = k.c 
    391            .          .           			if int(casei) < nsends { 
    392            .          .           				c.sendq.dequeueSudoG(sglist) 
    393            .          .           			} else { 
    394         20ms       20ms           				c.recvq.dequeueSudoG(sglist)                                                                               x := sgp.prev                                select.go:628                                    if q.first == sgp {                          select.go:655

    395            .          .           			} 
    396            .          .           		} 
    397            .          .           		sgnext = sglist.waitlink 
    398            .          .           		sglist.waitlink = nil 
    399            .       50ms           		releaseSudog(sglist) 
    400            .          .           		sglist = sgnext 
    401            .          .           	} 
    402            .          .            
    403         10ms       10ms           	if cas == nil { 
    404            .          .           		throw("selectgo: bad wakeup") 
    405            .          .           	} 
    406            .          .            
    407            .          .           	c = cas.c 
    408            .          .            

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:           0      260ms (flat, cum)  0.54%
    438            .          .           		} else if cas.elem != nil { 
    439            .          .           			asanwrite(cas.elem, c.elemtype.Size_) 
    440            .          .           		} 
    441            .          .           	} 
    442            .          .            
    443            .       60ms           	selunlock(scases, lockorder) 
    444            .          .           	goto retc 
    445            .          .            
    446            .          .           bufrecv: 
    447            .          .           	// can receive from buffer 
    448            .          .           	if raceenabled { 
    449            .          .           		if cas.elem != nil { 
    450            .          .           			raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) 
    451            .          .           		} 
    452            .          .           		racenotify(c, c.recvx, nil) 
    453            .          .           	} 
    454            .          .           	if msanenabled && cas.elem != nil { 
    455            .          .           		msanwrite(cas.elem, c.elemtype.Size_) 
    456            .          .           	} 
    457            .          .           	if asanenabled && cas.elem != nil { 
    458            .          .           		asanwrite(cas.elem, c.elemtype.Size_) 
    459            .          .           	} 
    460            .          .           	recvOK = true 
    461            .          .           	qp = chanbuf(c, c.recvx) 
    462            .          .           	if cas.elem != nil { 
    463            .       40ms           		typedmemmove(c.elemtype, cas.elem, qp) 
    464            .          .           	} 
    465            .          .           	typedmemclr(c.elemtype, qp) 
    466            .          .           	c.recvx++ 
    467            .          .           	if c.recvx == c.dataqsiz { 
    468            .          .           		c.recvx = 0 
    469            .          .           	} 
    470            .          .           	c.qcount-- 
    471            .      160ms           	selunlock(scases, lockorder) 
    472            .          .           	goto retc 
    473            .          .            
    474            .          .           bufsend: 
    475            .          .           	// can send to buffer 
    476            .          .           	if raceenabled { 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:           0       50ms (flat, cum)   0.1%
    492            .          .           	selunlock(scases, lockorder) 
    493            .          .           	goto retc 
    494            .          .            
    495            .          .           recv: 
    496            .          .           	// can receive from sleeping sender (sg) 
    497            .       30ms           	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) 
    498            .          .           	if debugSelect { 
    499            .          .           		print("syncrecv: cas0=", cas0, " c=", c, "\n") 
    500            .          .           	} 
    501            .          .           	recvOK = true 
    502            .          .           	goto retc 
    503            .          .            
    504            .          .           rclose: 
    505            .          .           	// read at end of closed channel 
    506            .       10ms           	selunlock(scases, lockorder) 
    507            .          .           	recvOK = false 
    508            .          .           	if cas.elem != nil { 
    509            .       10ms           		typedmemclr(c.elemtype, cas.elem) 
    510            .          .           	} 
    511            .          .           	if raceenabled { 
    512            .          .           		raceacquire(c.raceaddr()) 
    513            .          .           	} 
    514            .          .           	goto retc 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:        10ms       10ms (flat, cum) 0.021%
    530            .          .           	} 
    531            .          .           	goto retc 
    532            .          .            
    533            .          .           retc: 
    534            .          .           	if caseReleaseTime > 0 { 
    535         10ms       10ms           		blockevent(caseReleaseTime-t0, 1) 
    536            .          .           	} 
    537            .          .           	return casi, recvOK 
    538            .          .            
    539            .          .           sclose: 
    540            .          .           	// send on closed channel 

runtime.(*hchan).sortkey

/usr/lib/go/src/runtime/select.go

  Total:        10ms       10ms (flat, cum) 0.021%
    541            .          .           	selunlock(scases, lockorder) 
    542            .          .           	panic(plainError("send on closed channel")) 
    543            .          .           } 
    544            .          .            
    545            .          .           func (c *hchan) sortkey() uintptr { 
    546         10ms       10ms           	return uintptr(unsafe.Pointer(c)) 
    547            .          .           } 
    548            .          .            
    549            .          .           // A runtimeSelect is a single case passed to rselect. 
    550            .          .           // This must match ../reflect/value.go:/runtimeSelect 
    551            .          .           type runtimeSelect struct { 

runtime.(*waitq).dequeueSudoG

/usr/lib/go/src/runtime/select.go

  Total:        10ms       10ms (flat, cum) 0.021%
    623            .          .           	} 
    624            .          .           	return chosen, recvOK 
    625            .          .           } 
    626            .          .            
    627            .          .           func (q *waitq) dequeueSudoG(sgp *sudog) { 
    628         10ms       10ms           	x := sgp.prev 
    629            .          .           	y := sgp.next 
    630            .          .           	if x != nil { 
    631            .          .           		if y != nil { 
    632            .          .           			// middle of queue 
    633            .          .           			x.next = y 

runtime.(*waitq).dequeueSudoG

/usr/lib/go/src/runtime/select.go

  Total:        10ms       10ms (flat, cum) 0.021%
    650            .          .           		return 
    651            .          .           	} 
    652            .          .            
    653            .          .           	// x==y==nil. Either sgp is the only element in the queue, 
    654            .          .           	// or it has already been removed. Use q.first to disambiguate. 
    655         10ms       10ms           	if q.first == sgp { 
    656            .          .           		q.first = nil 
    657            .          .           		q.last = nil 
    658            .          .           	} 
    659            .          .           } 

runtime.(*unwinder).init

/usr/lib/go/src/runtime/traceback.go

  Total:           0      350ms (flat, cum)  0.73%
    124            .          .           	// provide a "valid" method. Alternatively, this could start in a "before 
    125            .          .           	// the first frame" state and "next" could return whether it was able to 
    126            .          .           	// move to the next frame, but that's both more awkward to use in a "for" 
    127            .          .           	// loop and is harder to implement because we have to do things differently 
    128            .          .           	// for the first frame. 
    129            .      350ms           	u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags) 
    130            .          .           } 
    131            .          .            
    132            .          .           func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) { 
    133            .          .           	// Don't call this "g"; it's too easy get "g" and "gp" confused. 
    134            .          .           	if ourg := getg(); ourg == gp && ourg == ourg.m.curg { 

runtime.(*unwinder).initAt

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       10ms (flat, cum) 0.021%
    162            .          .           				lr0 = gp.sched.lr 
    163            .          .           			} 
    164            .          .           		} 
    165            .          .           	} 
    166            .          .            
    167         10ms       10ms           	var frame stkframe 
    168            .          .           	frame.pc = pc0 
    169            .          .           	frame.sp = sp0 
    170            .          .           	if usesLR { 
    171            .          .           		frame.lr = lr0 
    172            .          .           	} 

runtime.(*unwinder).initAt

/usr/lib/go/src/runtime/traceback.go

  Total:        40ms      340ms (flat, cum)  0.71%
    195            .          .           		// LR are not touched. 
    196            .          .           		frame.pc = frame.lr 
    197            .          .           		frame.lr = 0 
    198            .          .           	} 
    199            .          .            
    200            .       20ms           	f := findfunc(frame.pc) 
    201            .          .           	if !f.valid() { 
    202            .          .           		if flags&unwindSilentErrors == 0 { 
    203            .          .           			print("runtime: g ", gp.goid, " gp=", gp, ": unknown pc ", hex(frame.pc), "\n") 
    204            .          .           			tracebackHexdump(gp.stack, &frame, 0) 
    205            .          .           		} 
    206            .          .           		if flags&(unwindPrintErrors|unwindSilentErrors) == 0 { 
    207            .          .           			throw("unknown pc") 
    208            .          .           		} 
    209            .          .           		*u = unwinder{} 
    210            .          .           		return 
    211            .          .           	} 
    212            .          .           	frame.fn = f 
    213            .          .            
    214            .          .           	// Populate the unwinder. 
    215            .       20ms           	*u = unwinder{ 
    216            .          .           		frame:        frame, 
    217            .          .           		g:            gp.guintptr(), 
    218         40ms       40ms           		cgoCtxt:      len(gp.cgoCtxt) - 1, 
    219            .          .           		calleeFuncID: abi.FuncIDNormal, 
    220            .          .           		flags:        flags, 
    221            .          .           	} 
    222            .          .            
    223            .          .           	isSyscall := frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.syscallpc && sp0 == gp.syscallsp 
    224            .      260ms           	u.resolveInternal(true, isSyscall) 
    225            .          .           } 
    226            .          .            
    227            .          .           func (u *unwinder) valid() bool { 
    228            .          .           	return u.frame.pc != 0 
    229            .          .           } 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:       370ms      370ms (flat, cum)  0.77%
    252            .          .           func (u *unwinder) resolveInternal(innermost, isSyscall bool) { 
    253            .          .           	frame := &u.frame 
    254            .          .           	gp := u.g.ptr() 
    255            .          .            
    256            .          .           	f := frame.fn 
    257        310ms      310ms           	if f.pcsp == 0 { 
    258            .          .           		// No frame information, must be external function, like race support. 
    259            .          .           		// See golang.org/issue/13568. 
    260            .          .           		u.finishInternal() 
    261            .          .           		return 
    262            .          .           	} 
    263            .          .            
    264            .          .           	// Compute function info flags. 
    265            .          .           	flag := f.flag 
    266         50ms       50ms           	if f.funcID == abi.FuncID_cgocallback { 
    267            .          .           		// cgocallback does write SP to switch from the g0 to the curg stack, 
    268            .          .           		// but it carefully arranges that during the transition BOTH stacks 
    269            .          .           		// have cgocallback frame valid for unwinding through. 
    270            .          .           		// So we don't need to exclude it with the other SP-writing functions. 
    271            .          .           		flag &^= abi.FuncFlagSPWrite 
    272            .          .           	} 
    273         10ms       10ms           	if isSyscall { 
    274            .          .           		// Some Syscall functions write to SP, but they do so only after 
    275            .          .           		// saving the entry PC/SP using entersyscall. 
    276            .          .           		// Since we are using the entry PC/SP, the later SP write doesn't matter. 
    277            .          .           		flag &^= abi.FuncFlagSPWrite 
    278            .          .           	} 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        20ms      1.51s (flat, cum)  3.16%
    318            .          .           				} 
    319            .          .           				gp = gp.m.curg 
    320            .          .           				u.g.set(gp) 
    321            .          .           				frame.sp = gp.sched.sp 
    322            .          .           				u.cgoCtxt = len(gp.cgoCtxt) - 1 
    323         10ms       10ms           				flag &^= abi.FuncFlagSPWrite 
    324            .          .           			} 
    325            .          .           		} 
    326         10ms      1.50s           		frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc))                                                               x, _ := pcvalue(f, f.pcsp, targetpc, true)                   symtab.go:1203
    327            .          .           		if !usesLR { 
    328            .          .           			// On x86, call instruction pushes return PC before entering new function. 
    329            .          .           			frame.fp += goarch.PtrSize 
    330            .          .           		} 
    331            .          .           	} 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       10ms (flat, cum) 0.021%
    366            .          .           		} 
    367            .          .           		frame.lr = 0 
    368            .          .           	} else { 
    369            .          .           		var lrPtr uintptr 
    370            .          .           		if usesLR { 
    371         10ms       10ms           			if innermost && frame.sp < frame.fp || frame.lr == 0 { 
    372            .          .           				lrPtr = frame.sp 
    373            .          .           				frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 
    374            .          .           			} 
    375            .          .           		} else { 
    376            .          .           			if frame.lr == 0 { 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       10ms (flat, cum) 0.021%
    405            .          .           	// Other architectures may make different decisions. 
    406            .          .           	if frame.varp > frame.sp && framepointer_enabled { 
    407            .          .           		frame.varp -= goarch.PtrSize 
    408            .          .           	} 
    409            .          .            
    410         10ms       10ms           	frame.argp = frame.fp + sys.MinFrameSize 
    411            .          .            
    412            .          .           	// Determine frame's 'continuation PC', where it can continue. 
    413            .          .           	// Normally this is the return address on the stack, but if sigpanic 
    414            .          .           	// is immediately below this function on the stack, then the frame 
    415            .          .           	// stopped executing due to a trap, and frame.pc is probably not 

runtime.(*unwinder).next

/usr/lib/go/src/runtime/traceback.go

  Total:        40ms      430ms (flat, cum)   0.9%
    435            .          .           			frame.continpc = 0 
    436            .          .           		} 
    437            .          .           	} 
    438            .          .           } 
    439            .          .            
    440         40ms       40ms           func (u *unwinder) next() { 
    441            .          .           	frame := &u.frame 
    442            .          .           	f := frame.fn 
    443            .          .           	gp := u.g.ptr() 
    444            .          .            
    445            .          .           	// Do not unwind past the bottom of the stack. 
    446            .          .           	if frame.lr == 0 { 
    447            .       20ms           		u.finishInternal() 
    448            .          .           		return 
    449            .          .           	} 
    450            .      370ms           	flr := findfunc(frame.lr) 
    451            .          .           	if !flr.valid() { 
    452            .          .           		// This happens if you get a profiling interrupt at just the wrong time. 
    453            .          .           		// In that context it is okay to stop early. 
    454            .          .           		// But if no error flags are set, we're doing a garbage collection and must 
    455            .          .           		// get everything, so crash loudly. 

runtime.(*unwinder).next

/usr/lib/go/src/runtime/traceback.go

  Total:        30ms      1.67s (flat, cum)  3.50%
    488            .          .           		u.flags &^= unwindTrap 
    489            .          .           	} 
    490            .          .            
    491            .          .           	// Unwind to next frame. 
    492            .          .           	u.calleeFuncID = f.funcID 
    493         10ms       10ms           	frame.fn = flr 
    494            .          .           	frame.pc = frame.lr 
    495            .          .           	frame.lr = 0 
    496            .          .           	frame.sp = frame.fp 
    497            .          .           	frame.fp = 0 
    498            .          .            
    499            .          .           	// On link register architectures, sighandler saves the LR on stack 
    500            .          .           	// before faking a call. 
    501            .          .           	if usesLR && injectedCall { 
    502            .          .           		x := *(*uintptr)(unsafe.Pointer(frame.sp)) 
    503            .          .           		frame.sp += alignUp(sys.MinFrameSize, sys.StackAlign) 
    504            .          .           		f = findfunc(frame.pc) 
    505            .          .           		frame.fn = f 
    506            .          .           		if !f.valid() { 
    507            .          .           			frame.pc = x 
    508            .          .           		} else if funcspdelta(f, frame.pc) == 0 { 
    509            .          .           			frame.lr = x 
    510            .          .           		} 
    511            .          .           	} 
    512            .          .            
    513         10ms      1.65s           	u.resolveInternal(false, false) 
    514         10ms       10ms           } 
    515            .          .            
    516            .          .           // finishInternal is an unwinder-internal helper called after the stack has been 

runtime.(*unwinder).finishInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       10ms (flat, cum) 0.021%
    517            .          .           // exhausted. It sets the unwinder to an invalid state and checks that it 
    518            .          .           // successfully unwound the entire stack. 
    519         10ms       10ms           func (u *unwinder) finishInternal() { 
    520            .          .           	u.frame.pc = 0 
    521            .          .            
    522            .          .           	// Note that panic != nil is okay here: there can be leftover panics, 
    523            .          .           	// because the defers on the panic stack do not nest in frame order as 
    524            .          .           	// they do on the defer stack. If you have: 

runtime.(*unwinder).finishInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       10ms (flat, cum) 0.021%
    558            .          .           	// callbacks only happen when everything is stopped nicely. 
    559            .          .           	// At other times, such as when gathering a stack for a profiling signal 
    560            .          .           	// or when printing a traceback during a crash, everything may not be 
    561            .          .           	// stopped nicely, and the stack walk may not be able to complete. 
    562            .          .           	gp := u.g.ptr() 
    563         10ms       10ms           	if u.flags&(unwindPrintErrors|unwindSilentErrors) == 0 && u.frame.sp != gp.stktopsp { 
    564            .          .           		print("runtime: g", gp.goid, ": frame.sp=", hex(u.frame.sp), " top=", hex(gp.stktopsp), "\n") 
    565            .          .           		print("\tstack=[", hex(gp.stack.lo), "-", hex(gp.stack.hi), "\n") 
    566            .          .           		throw("traceback did not unwind completely") 
    567            .          .           	} 
    568            .          .           } 

runtime.tracebackPCs

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms       50ms (flat, cum)   0.1%
    618            .          .           // 
    619            .          .           // Callers should set the unwindSilentErrors flag on u. 
    620            .          .           func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int { 
    621            .          .           	var cgoBuf [32]uintptr 
    622            .          .           	n := 0 
    623            .       20ms           	for ; n < len(pcBuf) && u.valid(); u.next() { 
    624            .          .           		f := u.frame.fn 
    625            .          .           		cgoN := u.cgoCallers(cgoBuf[:]) 
    626            .          .            
    627            .          .           		// TODO: Why does &u.cache cause u to escape? (Same in traceback2) 
    628            .       20ms           		for iu, uf := newInlineUnwinder(f, u.symPC()); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) { 
    629         10ms       10ms           			sf := iu.srcFunc(uf)                                                                       return u.f.srcFunc()                                 symtabinl.go:118

    630            .          .           			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(u.calleeFuncID) { 
    631            .          .           				// ignore wrappers 
    632            .          .           			} else if skip > 0 { 
    633            .          .           				skip-- 
    634            .          .           			} else { 

runtime.callers

/usr/lib/go/src/runtime/traceback.go

  Total:           0       50ms (flat, cum)   0.1%
   1092            .          .           func callers(skip int, pcbuf []uintptr) int { 
   1093            .          .           	sp := sys.GetCallerSP() 
   1094            .          .           	pc := sys.GetCallerPC() 
   1095            .          .           	gp := getg() 
   1096            .          .           	var n int 
   1097            .       50ms           	systemstack(func() { 
   1098            .          .           		var u unwinder 

runtime.callers.func1

/usr/lib/go/src/runtime/traceback.go

  Total:           0       50ms (flat, cum)   0.1%
   1099            .          .           		u.initAt(pc, sp, 0, gp, unwindSilentErrors) 
   1100            .       50ms           		n = tracebackPCs(&u, skip, pcbuf) 
   1101            .          .           	}) 
   1102            .          .           	return n 
   1103            .          .           } 
   1104            .          .            
   1105            .          .           func gcallers(gp *g, skip int, pcbuf []uintptr) int { 

runtime.isSystemGoroutine

/usr/lib/go/src/runtime/traceback.go

  Total:        80ms      260ms (flat, cum)  0.54%
   1364            .          .           // If fixed is true, any goroutine that can vary between user and 
   1365            .          .           // system (that is, the finalizer goroutine) is considered a user 
   1366            .          .           // goroutine. 
   1367            .          .           func isSystemGoroutine(gp *g, fixed bool) bool { 
   1368            .          .           	// Keep this in sync with internal/trace.IsSystemGoroutine. 
   1369         20ms      120ms           	f := findfunc(gp.startpc) 
   1370            .          .           	if !f.valid() { 
   1371            .          .           		return false 
   1372            .          .           	} 
   1373         30ms       30ms           	if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_corostart || f.funcID == abi.FuncID_handleAsyncEvent { 
   1374            .          .           		return false 
   1375            .          .           	} 
   1376            .          .           	if f.funcID == abi.FuncID_runFinalizers { 
   1377            .          .           		// We include the finalizer goroutine if it's calling 
   1378            .          .           		// back into user code. 
   1379            .          .           		if fixed { 
   1380            .          .           			// This goroutine can vary. In fixed mode, 
   1381            .          .           			// always consider it a user goroutine. 
   1382            .          .           			return false 
   1383            .          .           		} 
   1384            .          .           		return fingStatus.Load()&fingRunningFinalizer == 0 
   1385            .          .           	} 
   1386         10ms       10ms           	if f.funcID == abi.FuncID_runCleanups { 
   1387            .          .           		// We include the cleanup goroutines if they're calling 
   1388            .          .           		// back into user code. 
   1389            .          .           		if fixed { 
   1390            .          .           			// This goroutine can vary. In fixed mode, 
   1391            .          .           			// always consider it a user goroutine. 
   1392            .          .           			return false 
   1393            .          .           		} 
   1394            .          .           		return !gp.runningCleanups.Load() 
   1395            .          .           	} 
   1396         20ms      100ms           	return stringslite.HasPrefix(funcname(f), "runtime.")             return f.datap.funcName(f.nameOff)                                   symtab.go:1142
   1397            .          .           } 
   1398            .          .            
   1399            .          .           // SetCgoTraceback records three C functions to use to gather 
   1400            .          .           // traceback information from C code and to convert that traceback 
   1401            .          .           // information into symbolic information. These are used when printing 

internal/runtime/maps.(*Map).getWithoutKeySmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        70ms      150ms (flat, cum)  0.31%
     12            .          .           	"internal/race" 
     13            .          .           	"internal/runtime/sys" 
     14            .          .           	"unsafe" 
     15            .          .           ) 
     16            .          .            
     17         10ms       90ms           func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer { 
     18            .          .           	g := groupReference{ 
     19         10ms       10ms           		data: m.dirPtr, 
     20            .          .           	} 
     21            .          .            
     22            .          .           	ctrls := *g.ctrls() 
     23            .          .           	slotKey := g.key(typ, 0) 
     24            .          .           	slotSize := typ.SlotSize 
     25            .          .            
     26            .          .           	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight, 
     27            .          .           	// where there are 8 keys to check, all of which don't quick-match the lookup key. 
     28            .          .           	// In that case, we can save hashing the lookup key. That savings is worth this extra code 
     29            .          .           	// for strings that are long enough that hashing is expensive. 
     30         50ms       50ms           	if len(key) > 64 { 
     31            .          .           		// String hashing and equality might be expensive. Do a quick check first. 
     32            .          .           		j := abi.SwissMapGroupSlots 
     33            .          .           		for i := range abi.SwissMapGroupSlots { 
     34            .          .           			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) { 
     35            .          .           				if j < abi.SwissMapGroupSlots { 

internal/runtime/maps.(*Map).getWithoutKeySmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       190ms      430ms (flat, cum)   0.9%
     54            .          .           		return nil 
     55            .          .           	} 
     56            .          .            
     57            .          .           dohash: 
     58            .          .           	// This path will cost 1 hash and 1+ε comparisons. 
     59         10ms      150ms           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed) 
     60            .          .           	h2 := uint8(h2(hash)) 
     61         10ms       10ms           	ctrls = *g.ctrls() 
     62            .          .           	slotKey = g.key(typ, 0) 
     63            .          .            
     64         10ms       10ms           	for range abi.SwissMapGroupSlots { 
     65        110ms      210ms           		if uint8(ctrls) == h2 && key == *(*string)(slotKey) { 
     66            .          .           			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) 
     67            .          .           		} 
     68         30ms       30ms           		slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) 
     69            .          .           		ctrls >>= 8 
     70            .          .           	} 
     71         20ms       20ms           	return nil 
     72            .          .           } 
     73            .          .            
     74            .          .           // Returns true if a and b might be equal. 
     75            .          .           // Returns false if a and b are definitely not equal. 
     76            .          .           // Requires len(a)>=8. 

runtime.mapaccess1_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        30ms      140ms (flat, cum)  0.29%
    105            .          .           		callerpc := sys.GetCallerPC() 
    106            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess1_faststr) 
    107            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
    108            .          .           	} 
    109            .          .            
    110         10ms       10ms           	if m == nil || m.Used() == 0 { 
    111            .          .           		return unsafe.Pointer(&zeroVal[0]) 
    112            .          .           	} 
    113            .          .            
    114            .          .           	if m.writing != 0 { 
    115            .          .           		fatal("concurrent map read and map write") 
    116            .          .           		return nil 
    117            .          .           	} 
    118            .          .            
    119            .          .           	if m.dirLen <= 0 { 
    120            .      110ms           		elem := m.getWithoutKeySmallFastStr(typ, key) 
    121         10ms       10ms           		if elem == nil { 
    122            .          .           			return unsafe.Pointer(&zeroVal[0]) 
    123            .          .           		} 
    124         10ms       10ms           		return elem 
    125            .          .           	} 
    126            .          .            
    127            .          .           	k := key 
    128            .          .           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) 
    129            .          .            

runtime.mapaccess2_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       110ms      750ms (flat, cum)  1.57%
    157            .          .           		} 
    158            .          .           	} 
    159            .          .           } 
    160            .          .            
    161            .          .           //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr 
    162         10ms      150ms           func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) { 
    163            .          .           	if race.Enabled && m != nil { 
    164            .          .           		callerpc := sys.GetCallerPC() 
    165            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr) 
    166            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
    167            .          .           	} 
    168            .          .            
    169         70ms       70ms           	if m == nil || m.Used() == 0 { 
    170            .          .           		return unsafe.Pointer(&zeroVal[0]), false 
    171            .          .           	} 
    172            .          .            
    173            .          .           	if m.writing != 0 { 
    174            .          .           		fatal("concurrent map read and map write") 
    175            .          .           		return nil, false 
    176            .          .           	} 
    177            .          .            
    178            .          .           	if m.dirLen <= 0 { 
    179            .      470ms           		elem := m.getWithoutKeySmallFastStr(typ, key) 
    180            .          .           		if elem == nil { 
    181            .          .           			return unsafe.Pointer(&zeroVal[0]), false 
    182            .          .           		} 
    183            .          .           		return elem, true 
    184            .          .           	} 
    185            .          .            
    186            .          .           	k := key 
    187            .       10ms           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) 
    188            .          .            
    189            .          .           	// Select table. 
    190            .          .           	idx := m.directoryIndex(hash) 
    191            .          .           	t := m.directoryAt(idx) 
    192            .          .            
    193            .          .           	// Probe table. 
    194            .          .           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    195            .          .           	for ; ; seq = seq.next() { 
    196            .          .           		g := t.groups.group(typ, seq.offset) 
    197            .          .            
    198         20ms       20ms           		match := g.ctrls().matchH2(h2(hash))                                                               return ctrlGroupMatchH2(g, h)                                group.go:154
                                                                  v := uint64(g) ^ (bitsetLSB * uint64(h))                 group.go:170

    199            .          .            
    200            .          .           		for match != 0 { 
    201            .          .           			i := match.first() 
    202            .          .            
    203            .          .           			slotKey := g.key(typ, i) 
    204            .       20ms           			if key == *(*string)(slotKey) { 
    205            .          .           				slotElem := unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) 
    206         10ms       10ms           				return slotElem, true 
    207            .          .           			} 
    208            .          .           			match = match.removeFirst() 
    209            .          .           		} 
    210            .          .            
    211            .          .           		match = g.ctrls().matchEmpty() 

internal/runtime/maps.(*Map).putSlotSmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       100ms      100ms (flat, cum)  0.21%
    215            .          .           			return unsafe.Pointer(&zeroVal[0]), false 
    216            .          .           		} 
    217            .          .           	} 
    218            .          .           } 
    219            .          .            
    220         20ms       20ms           func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer { 
    221            .          .           	g := groupReference{ 
    222            .          .           		data: m.dirPtr, 
    223            .          .           	} 
    224            .          .            
    225         20ms       20ms           	match := g.ctrls().matchH2(h2(hash))                                                       return h & 0x7f                                                      map.go:191            return ctrlGroupMatchH2(g, h)                                        group.go:154

    226            .          .            
    227            .          .           	// Look for an existing slot containing this key. 
    228            .          .           	for match != 0 { 
    229            .          .           		i := match.first() 
    230            .          .            
    231            .          .           		slotKey := g.key(typ, i) 
    232            .          .           		if key == *(*string)(slotKey) { 
    233            .          .           			// Key needs update, as the backing storage may differ. 
    234            .          .           			*(*string)(slotKey) = key 
    235            .          .           			slotElem := g.elem(typ, i) 
    236            .          .           			return slotElem 
    237            .          .           		} 
    238            .          .           		match = match.removeFirst() 
    239            .          .           	} 
    240            .          .            
    241            .          .           	// There can't be deleted slots, small maps can't have them 
    242            .          .           	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 
    243            .          .           	// more efficient than matchEmpty. 
    244         10ms       10ms           	match = g.ctrls().matchEmptyOrDeleted()                                                       return (*ctrlGroup)(g.data)                                          group.go:280

    245            .          .           	if match == 0 { 
    246            .          .           		fatal("small map with no empty slot (concurrent map writes?)") 
    247            .          .           	} 
    248            .          .            
    249         10ms       10ms           	i := match.first()                                                       return bitsetFirst(b)                                                group.go:50
                                                          return uintptr(sys.TrailingZeros64(uint64(b))) >> 3              group.go:58

    250            .          .            
    251         10ms       10ms           	slotKey := g.key(typ, i)                                                       offset := groupSlotsOffset + i*typ.SlotSize                          group.go:285

    252         20ms       20ms           	*(*string)(slotKey) = key 
    253            .          .            
    254         10ms       10ms           	slotElem := g.elem(typ, i)                                                       offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff            group.go:292

    255            .          .            
    256            .          .           	g.ctrls().set(i, ctrl(h2(hash))) 
    257            .          .           	m.used++ 
    258            .          .            
    259            .          .           	return slotElem 

runtime.mapassign_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        90ms      870ms (flat, cum)  1.82%
    267            .          .           	if race.Enabled { 
    268            .          .           		callerpc := sys.GetCallerPC() 
    269            .          .           		pc := abi.FuncPCABIInternal(runtime_mapassign_faststr) 
    270            .          .           		race.WritePC(unsafe.Pointer(m), callerpc, pc) 
    271            .          .           	} 
    272         20ms       20ms           	if m.writing != 0 { 
    273            .          .           		fatal("concurrent map writes") 
    274            .          .           	} 
    275            .          .            
    276            .          .           	k := key 
    277         20ms      120ms           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) 
    278            .          .            
    279            .          .           	// Set writing after calling Hasher, since Hasher may panic, in which 
    280            .          .           	// case we have not actually done a write. 
    281            .          .           	m.writing ^= 1 // toggle, see comment on writing 
    282            .          .            
    283         10ms       10ms           	if m.dirPtr == nil { 
    284            .      560ms           		m.growToSmall(typ) 
    285            .          .           	} 
    286            .          .            
    287         10ms       10ms           	if m.dirLen == 0 { 
    288            .          .           		if m.used < abi.SwissMapGroupSlots { 
    289            .      100ms           			elem := m.putSlotSmallFastStr(typ, hash, key) 
    290            .          .            
    291         10ms       10ms           			if m.writing == 0 { 
    292            .          .           				fatal("concurrent map writes") 
    293            .          .           			} 
    294         20ms       20ms           			m.writing ^= 1 
    295            .          .            
    296            .          .           			return elem 
    297            .          .           		} 
    298            .          .            
    299            .          .           		// Can't fit another entry, grow to full size map. 
    300            .       20ms           		m.growToTable(typ) 
    301            .          .           	} 
    302            .          .            
    303            .          .           	var slotElem unsafe.Pointer 
    304            .          .           outer: 
    305            .          .           	for { 

runtime.mapassign_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        10ms       10ms (flat, cum) 0.021%
    368            .          .           				t.pruneTombstones(typ, m) 
    369            .          .           			} 
    370            .          .            
    371            .          .           			// If there is room left to grow, just insert the new entry. 
    372            .          .           			if t.growthLeft > 0 { 
    373         10ms       10ms           				slotKey := g.key(typ, i)                                                                               offset := groupSlotsOffset + i*typ.SlotSize  group.go:285

    374            .          .           				*(*string)(slotKey) = key 
    375            .          .            
    376            .          .           				slotElem = g.elem(typ, i) 
    377            .          .            
    378            .          .           				g.ctrls().set(i, ctrl(h2(hash))) 

runtime.mapdelete_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        10ms       50ms (flat, cum)   0.1%
    407            .          .            
    408            .          .           	if m == nil || m.Used() == 0 { 
    409            .          .           		return 
    410            .          .           	} 
    411            .          .            
    412            .       40ms           	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) 
    413         10ms       10ms           } 

git.urbach.dev/cli/q/src/ssa.(*IR).AddBlock

/home/user/q/src/ssa/IR.go

  Total:           0       50ms (flat, cum)   0.1%
      5            .          .           	Blocks []*Block 
      6            .          .           } 
      7            .          .            
      8            .          .           // AddBlock adds a new block to the function. 
      9            .          .           func (ir *IR) AddBlock(block *Block) { 
     10            .       50ms           	ir.Blocks = append(ir.Blocks, block) 
     11            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*IR).Append

/home/user/q/src/ssa/IR.go

  Total:       150ms      600ms (flat, cum)  1.26%
     13            .          .           // Append adds a new value to the last block. 
     14         40ms       40ms           func (ir *IR) Append(instr Value) Value { 
     15            .      420ms           	existing := ir.Block().FindExisting(instr) 
     16            .          .            
     17            .          .           	if existing != nil { 
     18            .          .           		return existing 
     19            .          .           	} 
     20            .          .            
     21         90ms      120ms           	ir.Block().Append(instr)                                                       return ir.Blocks[len(ir.Blocks)-1]                                   IR.go:27            b.Instructions = append(b.Instructions, value)                       Block.go:150
                                     ⋮
                                     ⋮
                                                      return ir.Blocks[len(ir.Blocks)-1]                                   IR.go:27            b.Instructions = append(b.Instructions, value)                       Block.go:150            return ir.Blocks[len(ir.Blocks)-1]                                   IR.go:27

     22         20ms       20ms           	return instr 
     23            .          .           } 
     24            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).Block

/home/user/q/src/ssa/IR.go

  Total:        60ms       60ms (flat, cum)  0.13%
     25            .          .           // Block returns the last block. 
     26            .          .           func (ir *IR) Block() *Block { 
     27         60ms       60ms           	return ir.Blocks[len(ir.Blocks)-1] 
     28            .          .           } 
     29            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).ComputeUsers

/home/user/q/src/ssa/IR.go

  Total:       270ms      1.06s (flat, cum)  2.22%
     31            .          .           func (ir *IR) ComputeUsers() { 
     32            .          .           	for _, block := range ir.Blocks { 
     33         30ms       30ms           		for _, value := range block.Instructions { 
     34        100ms      380ms           			for _, input := range value.Inputs() { 
     35        140ms      650ms           				input.AddUser(value) 
     36            .          .           			} 
     37            .          .           		} 
     38            .          .           	} 
     39            .          .           } 
     40            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).CountValues

/home/user/q/src/ssa/IR.go

  Total:        20ms       20ms (flat, cum) 0.042%
     41            .          .           // CountValues returns the total number of values. 
     42            .          .           func (ir *IR) CountValues() int { 
     43            .          .           	count := 0 
     44            .          .            
     45            .          .           	for _, block := range ir.Blocks { 
     46         20ms       20ms           		count += len(block.Instructions) 
     47            .          .           	} 
     48            .          .            
     49            .          .           	return count 

git.urbach.dev/cli/q/src/ssa.(*IR).ExitBlocks

/home/user/q/src/ssa/IR.go

  Total:        20ms      180ms (flat, cum)  0.38%
     51            .          .            
     52            .          .           // ExitBlocks is an iterator for all exit blocks. 
     53            .          .           func (ir *IR) ExitBlocks(yield func(*Block) bool) { 
     54         10ms       10ms           	for _, block := range ir.Blocks { 
     55            .          .           		_, returns := block.Last().(*Return) 
     56            .          .            
     57            .          .           		if !returns { 
     58            .          .           			continue 
     59            .          .           		} 
     60            .          .            
     61         10ms      170ms           		if !yield(block) {                                                               for _, value := range exitBlock.Identifiers {                verifyDeallocation.go:12
                                     ⋮
                                     ⋮
                                                              resource, isResource := value.Type().(*types.Resource)       verifyDeallocation.go:23                    for _, value := range exitBlock.Identifiers {                verifyDeallocation.go:12
     62            .          .           			return 
     63            .          .           		} 
     64            .          .           	} 

git.urbach.dev/cli/q/src/ssa.(*IR).IsIdentified

/home/user/q/src/ssa/IR.go

  Total:        20ms       90ms (flat, cum)  0.19%
     66            .          .            
     67            .          .           // IsIdentified returns true if the value can be obtained from one of the identifiers. 
     68            .          .           func (ir *IR) IsIdentified(value Value) bool { 
     69         10ms       10ms           	for _, block := range ir.Blocks { 
     70         10ms       80ms           		if block.IsIdentified(value) {                                                               for _, existing := range b.Identifiers {                     Block.go:237
                                     ⋮
                                     ⋮

     71            .          .           			return true 
     72            .          .           		} 
     73            .          .           	} 
     74            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).ReplaceAll

/home/user/q/src/ssa/IR.go

  Total:        70ms       70ms (flat, cum)  0.15%
     76            .          .           } 
     77            .          .            
     78            .          .           // ReplaceAll replaces all occurrences of the given `old` value with the `new` value. 
     79            .          .           func (ir *IR) ReplaceAll(old Value, new Value) { 
     80         30ms       30ms           	for _, block := range ir.Blocks { 
     81            .          .           		for _, value := range block.Instructions { 
     82         40ms       40ms           			value.Replace(old, new) 
     83            .          .           		} 
     84            .          .           	} 
     85            .          .           } 

runtime.makeslicecopy

/usr/lib/go/src/runtime/slice.go

  Total:        30ms      1.92s (flat, cum)  4.02%
     33            .          .           	panic(errorString("makeslice: cap out of range")) 
     34            .          .           } 
     35            .          .            
     36            .          .           // makeslicecopy allocates a slice of "tolen" elements of type "et", 
     37            .          .           // then copies "fromlen" elements of type "et" into that new allocation from "from". 
     38         20ms      170ms           func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 
     39            .          .           	var tomem, copymem uintptr 
     40            .          .           	if uintptr(tolen) > uintptr(fromlen) { 
     41            .          .           		var overflow bool 
     42            .          .           		tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) 
     43            .          .           		if overflow || tomem > maxAlloc || tolen < 0 { 
     44            .          .           			panicmakeslicelen() 
     45            .          .           		} 
     46            .          .           		copymem = et.Size_ * uintptr(fromlen) 
     47            .          .           	} else { 
     48            .          .           		// fromlen is a known good length providing and equal or greater than tolen, 
     49            .          .           		// thereby making tolen a good slice length too as from and to slices have the 
     50            .          .           		// same element width. 
     51            .          .           		tomem = et.Size_ * uintptr(tolen) 
     52            .          .           		copymem = tomem 
     53            .          .           	} 
     54            .          .            
     55            .          .           	var to unsafe.Pointer 
     56            .          .           	if !et.Pointers() { 
     57            .      1.74s           		to = mallocgc(tomem, nil, false) 
     58         10ms       10ms           		if copymem < tomem { 
     59            .          .           			memclrNoHeapPointers(add(to, copymem), tomem-copymem) 
     60            .          .           		} 
     61            .          .           	} else { 
     62            .          .           		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 
     63            .          .           		to = mallocgc(tomem, et, true) 

runtime.makeslicecopy

/usr/lib/go/src/runtime/slice.go

  Total:           0       10ms (flat, cum) 0.021%
     82            .          .           	} 
     83            .          .           	if asanenabled { 
     84            .          .           		asanread(from, copymem) 
     85            .          .           	} 
     86            .          .            
     87            .       10ms           	memmove(to, from, copymem) 
     88            .          .            
     89            .          .           	return to 
     90            .          .           } 
     91            .          .            
     92            .          .           // makeslice should be an internal detail, 

runtime.makeslice

/usr/lib/go/src/runtime/slice.go

  Total:       120ms      2.49s (flat, cum)  5.21%
     96            .          .           // 
     97            .          .           // Do not remove or change the type signature. 
     98            .          .           // See go.dev/issue/67401. 
     99            .          .           // 
    100            .          .           //go:linkname makeslice 
    101         20ms       20ms           func makeslice(et *_type, len, cap int) unsafe.Pointer { 
    102         50ms       50ms           	mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) 
    103         30ms       30ms           	if overflow || mem > maxAlloc || len < 0 || len > cap { 
    104            .          .           		// NOTE: Produce a 'len out of range' error instead of a 
    105            .          .           		// 'cap out of range' error when someone does make([]T, bignumber). 
    106            .          .           		// 'cap out of range' is true too, but since the cap is only being 
    107            .          .           		// supplied implicitly, saying len is clearer. 
    108            .          .           		// See golang.org/issue/4085. 
    109            .          .           		mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) 
    110            .          .           		if overflow || mem > maxAlloc || len < 0 { 
    111            .          .           			panicmakeslicelen() 
    112            .          .           		} 
    113            .          .           		panicmakeslicecap() 
    114            .          .           	} 
    115            .          .            
    116         20ms      2.39s           	return mallocgc(mem, et, true) 
    117            .          .           } 
    118            .          .            
    119            .          .           func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { 
    120            .          .           	len := int(len64) 
    121            .          .           	if int64(len) != len64 { 

runtime.growslice

/usr/lib/go/src/runtime/slice.go

  Total:       280ms      280ms (flat, cum)  0.59%
    172            .          .           // 
    173            .          .           // Do not remove or change the type signature. 
    174            .          .           // See go.dev/issue/67401. 
    175            .          .           // 
    176            .          .           //go:linkname growslice 
    177         70ms       70ms           func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { 
    178            .          .           	oldLen := newLen - num 
    179            .          .           	if raceenabled { 
    180            .          .           		callerpc := sys.GetCallerPC() 
    181            .          .           		racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) 
    182            .          .           	} 
    183            .          .           	if msanenabled { 
    184            .          .           		msanread(oldPtr, uintptr(oldLen*int(et.Size_))) 
    185            .          .           	} 
    186            .          .           	if asanenabled { 
    187            .          .           		asanread(oldPtr, uintptr(oldLen*int(et.Size_))) 
    188            .          .           	} 
    189            .          .            
    190         70ms       70ms           	if newLen < 0 { 
    191            .          .           		panic(errorString("growslice: len out of range")) 
    192            .          .           	} 
    193            .          .            
    194         10ms       10ms           	if et.Size_ == 0 { 
    195            .          .           		// append should not create a slice with nil pointer but non-zero len. 
    196            .          .           		// We assume that append doesn't need to preserve oldPtr in this case. 
    197            .          .           		return slice{unsafe.Pointer(&zerobase), newLen, newLen} 
    198            .          .           	} 
    199            .          .            
    200         30ms       30ms           	newcap := nextslicecap(newLen, oldCap)                                                       doublecap := newcap + newcap                                         slice.go:291            if oldCap < threshold {                                              slice.go:297

    201            .          .            
    202            .          .           	var overflow bool 
    203            .          .           	var lenmem, newlenmem, capmem uintptr 
    204            .          .           	// Specialize for common values of et.Size. 
    205            .          .           	// For 1 we don't need any division/multiplication. 
    206            .          .           	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 
    207            .          .           	// For powers of 2, use a variable shift. 
    208            .          .           	noscan := !et.Pointers() 
    209            .          .           	switch { 
    210         10ms       10ms           	case et.Size_ == 1: 
    211            .          .           		lenmem = uintptr(oldLen) 
    212            .          .           		newlenmem = uintptr(newLen) 
    213            .          .           		capmem = roundupsize(uintptr(newcap), noscan) 
    214            .          .           		overflow = uintptr(newcap) > maxAlloc 
    215            .          .           		newcap = int(capmem) 
    216            .          .           	case et.Size_ == goarch.PtrSize: 
    217            .          .           		lenmem = uintptr(oldLen) * goarch.PtrSize 
    218            .          .           		newlenmem = uintptr(newLen) * goarch.PtrSize 
    219         20ms       20ms           		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)                                                               return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26
    220            .          .           		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize 
    221            .          .           		newcap = int(capmem / goarch.PtrSize) 
    222         10ms       10ms           	case isPowerOfTwo(et.Size_):                                                       return x&(x-1) == 0                                                  slice.go:351

    223            .          .           		var shift uintptr 
    224            .          .           		if goarch.PtrSize == 8 { 
    225            .          .           			// Mask shift for better code generation. 
    226            .          .           			shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 
    227            .          .           		} else { 
    228            .          .           			shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 
    229            .          .           		} 
    230            .          .           		lenmem = uintptr(oldLen) << shift 
    231            .          .           		newlenmem = uintptr(newLen) << shift 
    232         50ms       50ms           		capmem = roundupsize(uintptr(newcap)<<shift, noscan)                                                               return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26                    if !noscan && reqSize > gc.MinSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize) msize.go:20                    return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26                        return (n + a - 1) / a                                   stubs.go:382

    233         10ms       10ms           		overflow = uintptr(newcap) > (maxAlloc >> shift) 
    234            .          .           		newcap = int(capmem >> shift) 
    235            .          .           		capmem = uintptr(newcap) << shift 
    236            .          .           	default: 
    237            .          .           		lenmem = uintptr(oldLen) * et.Size_ 
    238            .          .           		newlenmem = uintptr(newLen) * et.Size_ 

runtime.growslice

/usr/lib/go/src/runtime/slice.go

  Total:       120ms      1.24s (flat, cum)  2.60%
    253            .          .           	// 
    254            .          .           	// func main() { 
    255            .          .           	//   s = append(s, d, d, d, d) 
    256            .          .           	//   print(len(s), "\n") 
    257            .          .           	// } 
    258         30ms       30ms           	if overflow || capmem > maxAlloc { 
    259            .          .           		panic(errorString("growslice: len out of range")) 
    260            .          .           	} 
    261            .          .            
    262            .          .           	var p unsafe.Pointer 
    263            .          .           	if !et.Pointers() { 
    264            .       70ms           		p = mallocgc(capmem, nil, false) 
    265            .          .           		// The append() that calls growslice is going to overwrite from oldLen to newLen. 
    266            .          .           		// Only clear the part that will not be overwritten. 
    267            .          .           		// The reflect_growslice() that calls growslice will manually clear 
    268            .          .           		// the region not cleared here. 
    269         20ms       40ms           		memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) 
    270            .          .           	} else { 
    271            .          .           		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 
    272         10ms      1.01s           		p = mallocgc(capmem, et, true) 
    273         10ms       10ms           		if lenmem > 0 && writeBarrier.enabled { 
    274            .          .           			// Only shade the pointers in oldPtr since we know the destination slice p 
    275            .          .           			// only contains nil pointers because it has been cleared during alloc. 
    276            .          .           			// 
    277            .          .           			// It's safe to pass a type to this function as an optimization because 
    278            .          .           			// from and to only ever refer to memory representing whole values of 
    279            .          .           			// type et. See the comment on bulkBarrierPreWrite. 
    280            .          .           			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et) 
    281            .          .           		} 
    282            .          .           	} 
    283            .       30ms           	memmove(p, oldPtr, lenmem) 
    284            .          .            
    285         50ms       50ms           	return slice{p, newLen, newcap} 
    286            .          .           } 
    287            .          .            

runtime.nextslicecap

/usr/lib/go/src/runtime/slice.go

  Total:        30ms       30ms (flat, cum) 0.063%
    289            .          .           func nextslicecap(newLen, oldCap int) int { 
    290            .          .           	newcap := oldCap 
    291         20ms       20ms           	doublecap := newcap + newcap 
    292            .          .           	if newLen > doublecap { 
    293            .          .           		return newLen 
    294            .          .           	} 
    295            .          .            
    296            .          .           	const threshold = 256 
    297         10ms       10ms           	if oldCap < threshold { 
    298            .          .           		return doublecap 
    299            .          .           	} 
    300            .          .           	for { 
    301            .          .           		// Transition from growing 2x for small slices 
    302            .          .           		// to growing 1.25x for large slices. This formula 

runtime.isPowerOfTwo

/usr/lib/go/src/runtime/slice.go

  Total:        10ms       10ms (flat, cum) 0.021%
    346            .          .           	new.len = old.len // preserve the old length 
    347            .          .           	return new 
    348            .          .           } 
    349            .          .            
    350            .          .           func isPowerOfTwo(x uintptr) bool { 
    351         10ms       10ms           	return x&(x-1) == 0 
    352            .          .           } 
    353            .          .            
    354            .          .           // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 
    355            .          .           func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 
    356            .          .           	if fromLen == 0 || toLen == 0 { 

internal/bytealg.MakeNoZero

/usr/lib/go/src/runtime/slice.go

  Total:        30ms      110ms (flat, cum)  0.23%
    390            .          .           	} 
    391            .          .           	return n 
    392            .          .           } 
    393            .          .            
    394            .          .           //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero 
    395         20ms       20ms           func bytealg_MakeNoZero(len int) []byte { 
    396            .          .           	if uintptr(len) > maxAlloc { 
    397            .          .           		panicmakeslicelen() 
    398            .          .           	} 
    399         10ms       10ms           	cap := roundupsize(uintptr(len), true)                                                       if reqSize <= maxSmallSize-gc.MallocHeaderSize {                     msize.go:18

    400            .       80ms           	return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] 
    401            .          .           } 

git.urbach.dev/cli/q/src/ssa.NewBlock

/home/user/q/src/ssa/Block.go

  Total:           0      240ms (flat, cum)   0.5%
     18            .          .           	Predecessors []*Block 
     19            .          .           } 
     20            .          .            
     21            .          .           // NewBlock creates a new basic block. 
     22            .          .           func NewBlock(label string) *Block { 
     23            .      130ms           	return &Block{ 
     24            .      110ms           		Instructions: make([]Value, 0, 8), 
     25            .          .           		Label:        label, 
     26            .          .           	} 
     27            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*Block).AddSuccessor

/home/user/q/src/ssa/Block.go

  Total:        60ms      650ms (flat, cum)  1.36%
     28            .          .            
     29            .          .           // AddSuccessor adds the given block as a successor. 
     30            .          .           func (b *Block) AddSuccessor(successor *Block) { 
     31         10ms      110ms           	successor.Predecessors = append(successor.Predecessors, b) 
     32            .          .            
     33            .          .           	if len(b.Protected) > 0 { 
     34            .          .           		if successor.Protected == nil { 
     35            .          .           			successor.Protected = make(map[Value][]Value, len(b.Protected)) 
     36            .          .           		} 
     37            .          .            
     38            .          .           		maps.Copy(successor.Protected, b.Protected) 
     39            .          .           	} 
     40            .          .            
     41            .          .           	if b.Identifiers == nil { 
     42            .          .           		return 
     43            .          .           	} 
     44            .          .            
     45            .          .           	if successor.Identifiers == nil { 
     46            .       90ms           		successor.Identifiers = make(map[string]Value, len(b.Identifiers)) 
     47            .          .            
     48            .          .           		if len(successor.Predecessors) == 1 { 
     49         30ms      310ms           			maps.Copy(successor.Identifiers, b.Identifiers)                                                                       dst[k] = v                                           maps.go:64                            for k, v := range src {                              maps.go:63
                                     ⋮
                                     ⋮
                                     ⋮

     50            .          .           			return 
     51            .          .           		} 
     52            .          .           	} 
     53            .          .            
     54         10ms       20ms           	keys := make(map[string]struct{}, max(len(b.Identifiers), len(successor.Identifiers))) 
     55            .          .            
     56            .       20ms           	for name := range successor.Identifiers { 
     57            .       10ms           		keys[name] = struct{}{} 
     58            .          .           	} 
     59            .          .            
     60            .       30ms           	for name := range b.Identifiers { 
     61            .       20ms           		keys[name] = struct{}{} 
     62            .          .           	} 
     63            .          .            
     64            .          .           	var modifiedStructs []string 
     65            .          .            
     66            .          .           	for name := range keys { 
     67            .       20ms           		oldValue, oldExists := successor.Identifiers[name] 
     68            .       10ms           		newValue, newExists := b.Identifiers[name] 
     69            .          .            
     70            .          .           		switch { 
     71            .          .           		case oldExists: 
     72            .          .           			if oldValue == newValue { 
     73            .          .           				continue 
     74            .          .           			} 
     75            .          .            
     76            .          .           			_, isStruct := oldValue.(*Struct) 
     77            .          .            
     78            .          .           			if isStruct { 
     79         10ms       10ms           				modifiedStructs = append(modifiedStructs, name) 
     80            .          .           				continue 
     81            .          .           			} 
     82            .          .            
     83            .          .           			definedLocally := successor.Index(oldValue) != -1 
     84            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).AddSuccessor

/home/user/q/src/ssa/Block.go

  Total:        20ms       70ms (flat, cum)  0.15%
    103            .          .            
    104            .          .           			for i := range phi.Arguments { 
    105            .          .           				phi.Arguments[i] = oldValue 
    106            .          .           			} 
    107            .          .            
    108            .       10ms           			successor.InsertAt(phi, 0)                                                                       b.Instructions = slices.Insert(b.Instructions, index, value) Block.go:259

    109            .          .           			successor.Identifiers[name] = phi 
    110            .          .            
    111            .          .           			if newExists { 
    112            .          .           				phi.Arguments = append(phi.Arguments, newValue) 
    113            .          .           			} else { 
    114            .          .           				phi.Arguments = append(phi.Arguments, Undefined) 
    115            .          .           			} 
    116            .          .            
    117            .          .           		case newExists: 
    118            .          .           			phi := &Phi{ 
    119            .       20ms           				Arguments: make([]Value, len(successor.Predecessors)-1, len(successor.Predecessors)), 
    120            .       10ms           				Typ:       newValue.Type(), 
    121            .          .           			} 
    122            .          .            
    123            .          .           			for i := range phi.Arguments { 
    124            .          .           				phi.Arguments[i] = Undefined 
    125            .          .           			} 
    126            .          .            
    127            .       10ms           			successor.InsertAt(phi, 0)                                                                       b.Instructions = slices.Insert(b.Instructions, index, value) Block.go:259

    128            .          .           			successor.Identifiers[name] = phi 
    129            .          .           			phi.Arguments = append(phi.Arguments, newValue) 
    130            .          .           		} 
    131            .          .           	} 
    132            .          .            
    133            .          .           	// Structs that were modified in branches need to be recreated 
    134            .          .           	// to use the new Phi values as their arguments. 
    135            .          .           	for _, name := range modifiedStructs { 
    136            .          .           		structure := successor.Identifiers[name].(*Struct) 
    137            .          .           		structType := structure.Typ.(*types.Struct) 
    138            .          .           		newStruct := &Struct{Typ: structType, Arguments: make(Arguments, len(structure.Arguments))} 
    139            .          .            
    140            .          .           		for i, field := range structType.Fields { 
    141            .          .           			newStruct.Arguments[i] = successor.Identifiers[name+"."+field.Name] 
    142            .          .           		} 
    143            .          .            
    144            .          .           		successor.Identifiers[name] = newStruct 
    145            .          .           	} 
    146         20ms       20ms           } 
    147            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).Append

/home/user/q/src/ssa/Block.go

  Total:        70ms      110ms (flat, cum)  0.23%
    149            .          .           func (b *Block) Append(value Value) { 
    150         70ms      110ms           	b.Instructions = append(b.Instructions, value) 
    151            .          .           } 
    152            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).CanReachPredecessor

/home/user/q/src/ssa/Block.go

  Total:           0       80ms (flat, cum)  0.17%
    153            .          .           // CanReachPredecessor checks if the `other` block appears as a predecessor or is the block itself. 
    154            .          .           func (b *Block) CanReachPredecessor(other *Block) bool { 
    155            .       80ms           	return b.canReachPredecessor(other, make(map[*Block]bool)) 
    156            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*Block).canReachPredecessor

/home/user/q/src/ssa/Block.go

  Total:        40ms       80ms (flat, cum)  0.17%
    158            .          .           // canReachPredecessor checks if the `other` block appears as a predecessor or is the block itself. 
    159         10ms       10ms           func (b *Block) canReachPredecessor(other *Block, traversed map[*Block]bool) bool { 
    160            .          .           	if other == b { 
    161         10ms       10ms           		return true 
    162            .          .           	} 
    163            .          .            
    164         10ms       10ms           	if traversed[b] { 
    165            .          .           		return false 
    166            .          .           	} 
    167            .          .            
    168            .       30ms           	traversed[b] = true 
    169            .          .            
    170            .          .           	for _, pre := range b.Predecessors { 
    171         10ms       20ms           		if pre.canReachPredecessor(other, traversed) { 
    172            .          .           			return true 
    173            .          .           		} 
    174            .          .           	} 
    175            .          .            
    176            .          .           	return false 

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting

/home/user/q/src/ssa/Block.go

  Total:       180ms      340ms (flat, cum)  0.71%
    180            .          .           func (b *Block) Contains(value Value) bool { 
    181            .          .           	return b.Index(value) != -1 
    182            .          .           } 
    183            .          .            
    184            .          .           // FindExisting returns an equal instruction that's already appended or `nil` if none could be found. 
    185         10ms       10ms           func (b *Block) FindExisting(instr Value) Value { 
    186         70ms      130ms           	if !instr.IsPure() { 
    187            .          .           		return nil 
    188            .          .           	} 
    189            .          .            
    190        100ms      200ms           	for _, existing := range slices.Backward(b.Instructions) {                                                       if !yield(i, s[i]) {                                                 iter.go:29                switch existing.(type) {                                         Block.go:197                if existing.IsPure() && instr.Equals(existing) {                 Block.go:191
                                     ⋮
                                     ⋮
                                     ⋮
                                                          if existing.IsPure() && instr.Equals(existing) {                 Block.go:191                switch existing.(type) {                                         Block.go:197
                                     ⋮
                                     ⋮
                                     ⋮

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting-range1

/home/user/q/src/ssa/Block.go

  Total:        50ms      150ms (flat, cum)  0.31%
    191         20ms      120ms           		if existing.IsPure() && instr.Equals(existing) { 
    192            .          .           			return existing 
    193            .          .           		} 
    194            .          .            
    195            .          .           		// If we encounter a call, we can't be sure that the value is still the same. 
    196            .          .           		// TODO: This is a bit too conservative. We could check if the call affects the value. 
    197         30ms       30ms           		switch existing.(type) { 
    198            .          .           		case *Call, *CallExtern: 

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting

/home/user/q/src/ssa/Block.go

  Total:        80ms       80ms (flat, cum)  0.17%
    200            .          .           		} 
    201         70ms       70ms           	} 
    202            .          .            
    203         10ms       10ms           	return nil 
    204            .          .           } 
    205            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).FindIdentifier

/home/user/q/src/ssa/Block.go

  Total:           0      310ms (flat, cum)  0.65%
    207            .          .           // can have and combines them to a phi instruction if necessary. 
    208            .          .           func (b *Block) FindIdentifier(name string) (value Value, exists bool) { 
    209            .      310ms           	value, exists = b.Identifiers[name] 
    210            .          .           	return 
    211            .          .           } 
    212            .          .            
    213            .          .           // IdentifiersFor returns an iterator for all the identifiers pointing to the given value. 
    214            .          .           func (b *Block) IdentifiersFor(value Value) iter.Seq[string] { 

git.urbach.dev/cli/q/src/ssa.(*Block).Identify

/home/user/q/src/ssa/Block.go

  Total:        20ms      510ms (flat, cum)  1.07%
    223            .          .           	} 
    224            .          .           } 
    225            .          .            
    226            .          .           // Identify adds a new identifier or changes an existing one. 
    227            .          .           func (b *Block) Identify(name string, value Value) { 
    228         10ms       10ms           	if b.Identifiers == nil { 
    229            .       60ms           		b.Identifiers = make(map[string]Value, 8) 
    230            .          .           	} 
    231            .          .            
    232         10ms      440ms           	b.Identifiers[name] = value 
    233            .          .           } 
    234            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).IsIdentified

/home/user/q/src/ssa/Block.go

  Total:        10ms       80ms (flat, cum)  0.17%
    235            .          .           // IsIdentified returns true if the value can be obtained from one of the identifiers. 
    236            .          .           func (b *Block) IsIdentified(value Value) bool { 
    237         10ms       80ms           	for _, existing := range b.Identifiers { 
    238            .          .           		if existing == value { 
    239            .          .           			return true 
    240            .          .           		} 
    241            .          .           	} 
    242            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).InsertAt

/home/user/q/src/ssa/Block.go

  Total:           0       20ms (flat, cum) 0.042%
    254            .          .           	return -1 
    255            .          .           } 
    256            .          .            
    257            .          .           // InsertAt inserts the `value` at the given `index`. 
    258            .          .           func (b *Block) InsertAt(value Value, index int) { 
    259            .       20ms           	b.Instructions = slices.Insert(b.Instructions, index, value) 
    260            .          .           } 
    261            .          .            
    262            .          .           // Last returns the last value. 
    263            .          .           func (b *Block) Last() Value { 
    264            .          .           	if len(b.Instructions) == 0 { 

git.urbach.dev/cli/q/src/ssa.(*Block).Phis

/home/user/q/src/ssa/Block.go

  Total:        10ms       30ms (flat, cum) 0.063%
    268            .          .           	return b.Instructions[len(b.Instructions)-1] 
    269            .          .           } 
    270            .          .            
    271            .          .           // Phis is an iterator for all phis at the top of the block. 
    272            .          .           func (b *Block) Phis(yield func(*Phi) bool) { 
    273         10ms       10ms           	for _, instr := range b.Instructions { 
    274            .          .           		phi, isPhi := instr.(*Phi) 
    275            .          .            
    276            .       20ms           		if !isPhi || !yield(phi) {                                                               instr.Replace(oldValue, phi)                                 compileLoop.go:135

    277            .          .           			return 
    278            .          .           		} 
    279            .          .           	} 
    280            .          .           } 
    281            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).RemoveAt

/home/user/q/src/ssa/Block.go

  Total:        10ms       10ms (flat, cum) 0.021%
    285            .          .            
    286            .          .           	for _, input := range value.Inputs() { 
    287            .          .           		input.RemoveUser(value) 
    288            .          .           	} 
    289            .          .            
    290         10ms       10ms           	b.Instructions[index] = nil 
    291            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*Block).RemoveNilValues

/home/user/q/src/ssa/Block.go

  Total:        20ms      100ms (flat, cum)  0.21%
    293            .          .           // RemoveNilValues removes all nil values from the block. 
    294         10ms       10ms           func (b *Block) RemoveNilValues() { 
    295         10ms       90ms           	b.Instructions = slices.DeleteFunc(b.Instructions, func(value Value) bool { 

git.urbach.dev/cli/q/src/ssa.(*Block).RemoveNilValues.func1

/home/user/q/src/ssa/Block.go

  Total:        10ms       10ms (flat, cum) 0.021%
    296         10ms       10ms           		return value == nil 
    297            .          .           	}) 
    298            .          .           } 
    299            .          .            
    300            .          .           // ReplaceAllUses replaces all uses of `old` with `new`. 
    301            .          .           func (b *Block) ReplaceAllUses(old Value, new Value) { 

git.urbach.dev/cli/q/src/ssa.(*Block).Unidentify

/home/user/q/src/ssa/Block.go

  Total:           0       70ms (flat, cum)  0.15%
    309            .          .           	return CleanLabel(b.Label) 
    310            .          .           } 
    311            .          .            
    312            .          .           // Unidentify deletes the identifier for the given value. 
    313            .          .           func (b *Block) Unidentify(value Value) { 
    314            .       20ms           	for name, existing := range b.Identifiers { 
    315            .          .           		if existing == value { 
    316            .       50ms           			delete(b.Identifiers, name) 
    317            .          .           			return 
    318            .          .           		} 
    319            .          .           	} 
    320            .          .           } 
    321            .          .            

git.urbach.dev/cli/q/src/token.Tokenize

/home/user/q/src/token/Tokenize.go

  Total:       470ms      1.15s (flat, cum)  2.41%
      2            .          .            
      3            .          .           // Tokenize turns the file contents into a list of tokens. 
      4            .          .           func Tokenize(buffer []byte) List { 
      5            .          .           	var ( 
      6            .          .           		i      Position 
      7            .      200ms           		tokens = make(List, 0, 8+len(buffer)/2) 
      8            .          .           	) 
      9            .          .            
     10         40ms       40ms           	for i < Position(len(buffer)) { 
     11        160ms      160ms           		switch buffer[i] { 
     12         20ms       20ms           		case ' ', '\t', '\r': 
     13            .          .           		case ',': 
     14            .          .           			tokens = append(tokens, Token{Kind: Separator, Position: i, Length: 1}) 
     15            .          .           		case '(': 
     16         20ms       20ms           			tokens = append(tokens, Token{Kind: GroupStart, Position: i, Length: 1}) 
     17            .          .           		case ')': 
     18         10ms       10ms           			tokens = append(tokens, Token{Kind: GroupEnd, Position: i, Length: 1}) 
     19         30ms       30ms           		case '{': 
     20            .          .           			tokens = append(tokens, Token{Kind: BlockStart, Position: i, Length: 1}) 
     21         20ms       20ms           		case '}': 
     22            .          .           			tokens = append(tokens, Token{Kind: BlockEnd, Position: i, Length: 1}) 
     23            .          .           		case '[': 
     24         10ms       10ms           			tokens = append(tokens, Token{Kind: ArrayStart, Position: i, Length: 1}) 
     25         20ms       20ms           		case ']': 
     26            .          .           			tokens = append(tokens, Token{Kind: ArrayEnd, Position: i, Length: 1}) 
     27            .          .           		case '\n': 
     28         30ms       30ms           			tokens = append(tokens, Token{Kind: NewLine, Position: i, Length: 1}) 
     29         10ms       10ms           		case '-': 
     30            .       10ms           			tokens, i = dash(tokens, buffer, i) 
     31         10ms       10ms           		case '/': 
     32            .       10ms           			tokens, i = slash(tokens, buffer, i) 
     33            .          .           			continue 
     34            .          .           		case '"', '\'': 
     35            .       10ms           			tokens, i = quote(tokens, buffer, i) 
     36            .          .           			continue 
     37            .          .           		case '0': 
     38            .       20ms           			tokens, i = zero(tokens, buffer, i) 
     39            .          .           			continue 
     40            .          .           		case '#': 
     41            .          .           			tokens, i = hash(tokens, buffer, i) 
     42            .          .           			continue 
     43            .          .           		default: 
     44         10ms       10ms           			if isIdentifierStart(buffer[i]) {                                                                       return isLetter(c) || c == '_'                       identifier.go:62
                                                                          return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') identifier.go:68

     45         20ms      330ms           				tokens, i = identifier(tokens, buffer, i) 
     46         20ms       20ms           				continue 
     47            .          .           			} 
     48            .          .            
     49            .          .           			if isDigit(buffer[i]) { 
     50            .       10ms           				tokens, i = digit(tokens, buffer, i) 
     51            .          .           				continue 
     52            .          .           			} 
     53            .          .            
     54         20ms       20ms           			if isOperator(buffer[i]) {                                                                       switch c {                                           operator.go:39                            case '=', ':', '.', '+', '-', '*', '/', '<', '>', '&', '|', '^', '%', '!': operator.go:40

     55         10ms      120ms           				tokens, i = operator(tokens, buffer, i) 
     56            .          .           				continue 
     57            .          .           			} 
     58            .          .            
     59            .          .           			tokens = append(tokens, Token{Kind: Invalid, Position: i, Length: 1}) 
     60            .          .           		} 
     61            .          .            
     62         10ms       10ms           		i++ 
     63            .          .           	} 
     64            .          .            
     65            .          .           	tokens = append(tokens, Token{Kind: EOF, Position: i, Length: 0}) 
     66            .          .           	return tokens 
     67            .          .           } 

internal/runtime/maps.newTable

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0      320ms (flat, cum)  0.67%
     74            .          .           func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint8) *table { 
     75            .          .           	if capacity < abi.SwissMapGroupSlots { 
     76            .          .           		capacity = abi.SwissMapGroupSlots 
     77            .          .           	} 
     78            .          .            
     79            .       10ms           	t := &table{ 
     80            .          .           		index:      index, 
     81            .          .           		localDepth: localDepth, 
     82            .          .           	} 
     83            .          .            
     84            .          .           	if capacity > maxTableCapacity { 
     85            .          .           		panic("initial table capacity too large") 
     86            .          .           	} 
     87            .          .            
     88            .          .           	// N.B. group count must be a power of two for probeSeq to visit every 
     89            .          .           	// group. 
     90            .          .           	capacity, overflow := alignUpPow2(capacity) 
     91            .          .           	if overflow { 
     92            .          .           		panic("rounded-up capacity overflows uint64") 
     93            .          .           	} 
     94            .          .            
     95            .      310ms           	t.reset(typ, uint16(capacity)) 
     96            .          .            
     97            .          .           	return t 
     98            .          .           } 

internal/runtime/maps.(*table).reset

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms      310ms (flat, cum)  0.65%
     99            .          .            
    100            .          .           // reset resets the table with new, empty groups with the specified new total 
    101            .          .           // capacity. 
    102         10ms       10ms           func (t *table) reset(typ *abi.SwissMapType, capacity uint16) { 
    103            .          .           	groupCount := uint64(capacity) / abi.SwissMapGroupSlots 
    104            .      300ms           	t.groups = newGroups(typ, groupCount)                                                       data:       newarray(typ.Group, int(length)),                        group.go:316

    105            .          .           	t.capacity = capacity 
    106            .          .           	t.growthLeft = t.maxGrowthLeft() 
    107            .          .            
    108            .          .           	for i := uint64(0); i <= t.groups.lengthMask; i++ { 
    109            .          .           		g := t.groups.group(typ, i) 

internal/runtime/maps.(*table).Delete

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
    419            .          .            
    420            .          .           // Delete returns true if it put a tombstone in t. 
    421            .          .           func (t *table) Delete(typ *abi.SwissMapType, m *Map, hash uintptr, key unsafe.Pointer) bool { 
    422            .          .           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    423            .          .           	for ; ; seq = seq.next() { 
    424         10ms       10ms           		g := t.groups.group(typ, seq.offset) 
    425            .          .           		match := g.ctrls().matchH2(h2(hash)) 
    426            .          .            
    427            .          .           		for match != 0 { 
    428            .          .           			i := match.first() 
    429            .          .            

internal/runtime/maps.(*table).Delete

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0       10ms (flat, cum) 0.021%
    454            .          .           					// Unlike keys, always clear the elem (even if 
    455            .          .           					// it contains no pointers), as compound 
    456            .          .           					// assignment operations depend on cleared 
    457            .          .           					// deleted values. See 
    458            .          .           					// https://go.dev/issue/25936. 
    459            .       10ms           					typedmemclr(typ.Elem, slotElem) 
    460            .          .           				} 
    461            .          .            
    462            .          .           				// Only a full group can appear in the middle 
    463            .          .           				// of a probe sequence (a group with at least 
    464            .          .           				// one empty slot terminates probing). Once a 

internal/runtime/maps.(*Iter).Init

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:       140ms      220ms (flat, cum)  0.46%
    646            .          .           	// are the group index. 
    647            .          .           	entryIdx uint64 
    648            .          .           } 
    649            .          .            
    650            .          .           // Init initializes Iter for iteration. 
    651         30ms       30ms           func (it *Iter) Init(typ *abi.SwissMapType, m *Map) { 
    652         20ms       20ms           	it.typ = typ 
    653            .          .            
    654         40ms       40ms           	if m == nil || m.used == 0 { 
    655         20ms       20ms           		return 
    656            .          .           	} 
    657            .          .            
    658            .          .           	dirIdx := 0 
    659            .          .           	var groupSmall groupReference 
    660         10ms       10ms           	if m.dirLen <= 0 { 
    661            .          .           		// Use dirIdx == -1 as sentinel for small maps. 
    662            .          .           		dirIdx = -1 
    663            .          .           		groupSmall.data = m.dirPtr 
    664            .          .           	} 
    665            .          .            
    666            .          .           	it.m = m 
    667            .       30ms           	it.entryOffset = rand() 
    668         10ms       60ms           	it.dirOffset = rand() 
    669         10ms       10ms           	it.globalDepth = m.globalDepth 
    670            .          .           	it.dirIdx = dirIdx 
    671            .          .           	it.group = groupSmall 
    672            .          .           	it.clearSeq = m.clearSeq 
    673            .          .           } 
    674            .          .            

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:       230ms      230ms (flat, cum)  0.48%
    778            .          .           // 
    779            .          .           // The table can be mutated during iteration, though there is no guarantee that 
    780            .          .           // the mutations will be visible to the iteration. 
    781            .          .           // 
    782            .          .           // Init must be called prior to Next. 
    783         10ms       10ms           func (it *Iter) Next() { 
    784            .          .           	if it.m == nil { 
    785            .          .           		// Map was empty at Iter.Init. 
    786         10ms       10ms           		it.key = nil 
    787            .          .           		it.elem = nil 
    788            .          .           		return 
    789            .          .           	} 
    790            .          .            
    791         10ms       10ms           	if it.m.writing != 0 { 
    792            .          .           		fatal("concurrent map iteration and map write") 
    793            .          .           		return 
    794            .          .           	} 
    795            .          .            
    796            .          .           	if it.dirIdx < 0 { 
    797            .          .           		// Map was small at Init. 
    798         70ms       70ms           		for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ { 
    799         10ms       10ms           			k := uintptr(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots 
    800            .          .            
    801         90ms       90ms           			if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {                             return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), i))    group.go:134

    802            .          .           				// Empty or deleted. 
    803            .          .           				continue 
    804            .          .           			} 
    805            .          .            
    806         20ms       20ms           			key := it.group.key(it.typ, k)                             offset := groupSlotsOffset + i*typ.SlotSize          group.go:285

    807            .          .           			if it.typ.IndirectKey() { 
    808            .          .           				key = *((*unsafe.Pointer)(key)) 
    809            .          .           			} 
    810            .          .            
    811            .          .           			// As below, if we have grown to a full map since Init, 
    812            .          .           			// we continue to use the old group to decide the keys 
    813            .          .           			// to return, but must look them up again in the new 
    814            .          .           			// tables. 
    815         10ms       10ms           			grown := it.m.dirLen > 0 
    816            .          .           			var elem unsafe.Pointer 
    817            .          .           			if grown { 
    818            .          .           				var ok bool 
    819            .          .           				newKey, newElem, ok := it.m.getWithKey(it.typ, key) 
    820            .          .           				if !ok { 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        20ms       20ms (flat, cum) 0.042%
    839            .          .           			} 
    840            .          .            
    841            .          .           			it.entryIdx++ 
    842            .          .           			it.key = key 
    843            .          .           			it.elem = elem 
    844         10ms       10ms           			return 
    845            .          .           		} 
    846         10ms       10ms           		it.key = nil 
    847            .          .           		it.elem = nil 
    848            .          .           		return 
    849            .          .           	} 
    850            .          .            
    851            .          .           	if it.globalDepth != it.m.globalDepth { 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
    885            .          .            
    886            .          .           		it.globalDepth = it.m.globalDepth 
    887            .          .           	} 
    888            .          .            
    889            .          .           	// Continue iteration until we find a full slot. 
    890         10ms       10ms           	for ; it.dirIdx < it.m.dirLen; it.nextDirIdx() { 
    891            .          .           		// Resolve the table. 
    892            .          .           		if it.tab == nil { 
    893            .          .           			dirIdx := int((uint64(it.dirIdx) + it.dirOffset) & uint64(it.m.dirLen-1)) 
    894            .          .           			newTab := it.m.directoryAt(uintptr(dirIdx)) 
    895            .          .           			if newTab.index != dirIdx { 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
    930            .          .           		// However, with a max load factor of 7/8, each slot in a 
    931            .          .           		// mostly full map has a high probability of being full. Thus 
    932            .          .           		// it is cheaper to check a single slot than do a full control 
    933            .          .           		// match. 
    934            .          .            
    935         10ms       10ms           		entryIdx := (it.entryIdx + it.entryOffset) & entryMask 
    936            .          .           		slotIdx := uintptr(entryIdx & (abi.SwissMapGroupSlots - 1)) 
    937            .          .           		if slotIdx == 0 || it.group.data == nil { 
    938            .          .           			// Only compute the group (a) when we switch 
    939            .          .           			// groups (slotIdx rolls over) and (b) on the 
    940            .          .           			// first iteration in this table (slotIdx may 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1093            .          .           				// this group. Continue 
   1094            .          .           				// to next group. 
   1095            .          .           				it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx) 
   1096            .          .           			} else { 
   1097            .          .           				// Next full slot. 
   1098         10ms       10ms           				i := groupMatch.first()                                                                               return bitsetFirst(b)                        group.go:50
                                                                                  return uintptr(sys.TrailingZeros64(uint64(b))) >> 3 group.go:58

   1099            .          .           				it.entryIdx += uint64(i - slotIdx) 
   1100            .          .           			} 
   1101            .          .            
   1102            .          .           			it.key = key 
   1103            .          .           			it.elem = elem 

internal/runtime/maps.(*table).rehash

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0       10ms (flat, cum) 0.021%
   1131            .          .           	// new allocation, so the existing grow support in iteration would 
   1132            .          .           	// continue to work. 
   1133            .          .            
   1134            .          .           	newCapacity := 2 * t.capacity 
   1135            .          .           	if newCapacity <= maxTableCapacity { 
   1136            .       10ms           		t.grow(typ, m, newCapacity) 
   1137            .          .           		return 
   1138            .          .           	} 
   1139            .          .            
   1140            .          .           	t.split(typ, m) 
   1141            .          .           } 

internal/runtime/maps.(*table).grow

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0       10ms (flat, cum) 0.021%
   1196            .          .           // grow the capacity of the table by allocating a new table with a bigger array 
   1197            .          .           // and uncheckedPutting each element of the table into the new table (we know 
   1198            .          .           // that no insertion here will Put an already-present value), and discard the 
   1199            .          .           // old table. 
   1200            .          .           func (t *table) grow(typ *abi.SwissMapType, m *Map, newCapacity uint16) { 
   1201            .       10ms           	newTable := newTable(typ, uint64(newCapacity), t.index, t.localDepth) 
   1202            .          .            
   1203            .          .           	if t.capacity > 0 { 
   1204            .          .           		for i := uint64(0); i <= t.groups.lengthMask; i++ { 
   1205            .          .           			g := t.groups.group(typ, i) 
   1206            .          .           			for j := uintptr(0); j < abi.SwissMapGroupSlots; j++ { 

runtime.gclinkptr.ptr

/usr/lib/go/src/runtime/mcache.go

  Total:        10ms       10ms (flat, cum) 0.021%
     71            .          .            
     72            .          .           // ptr returns the *gclink form of p. 
     73            .          .           // The result should be used for accessing fields, not stored 
     74            .          .           // in other data structures. 
     75            .          .           func (p gclinkptr) ptr() *gclink { 
     76         10ms       10ms           	return (*gclink)(unsafe.Pointer(p)) 
     77            .          .           } 
     78            .          .            
     79            .          .           type stackfreelist struct { 
     80            .          .           	list gclinkptr // linked list of free stacks 
     81            .          .           	size uintptr   // total size of stacks in list 

runtime.getMCache

/usr/lib/go/src/runtime/mcache.go

  Total:       240ms      240ms (flat, cum)   0.5%
    125            .          .           // 
    126            .          .           // Returns nil if we're not bootstrapping or we don't have a P. The caller's 
    127            .          .           // P must not change, so we must be in a non-preemptible state. 
    128            .          .           func getMCache(mp *m) *mcache { 
    129            .          .           	// Grab the mcache, since that's where stats live. 
    130         50ms       50ms           	pp := mp.p.ptr() 
    131            .          .           	var c *mcache 
    132         80ms       80ms           	if pp == nil { 
    133            .          .           		// We will be called without a P while bootstrapping, 
    134            .          .           		// in which case we use mcache0, which is set in mallocinit. 
    135            .          .           		// mcache0 is cleared when bootstrapping is complete, 
    136            .          .           		// by procresize. 
    137            .          .           		c = mcache0 
    138            .          .           	} else { 
    139        110ms      110ms           		c = pp.mcache 
    140            .          .           	} 
    141            .          .           	return c 
    142            .          .           } 
    143            .          .            

runtime.(*mcache).refill

/usr/lib/go/src/runtime/mcache.go

  Total:        60ms      1.99s (flat, cum)  4.17%
    145            .          .           // have at least one free object. The current span in c must be full. 
    146            .          .           // 
    147            .          .           // Must run in a non-preemptible context since otherwise the owner of 
    148            .          .           // c could change. 
    149         10ms       10ms           func (c *mcache) refill(spc spanClass) { 
    150            .          .           	// Return the current cached span to the central lists. 
    151            .          .           	s := c.alloc[spc] 
    152            .          .            
    153            .          .           	if s.allocCount != s.nelems { 
    154            .          .           		throw("refill of span with free space remaining") 
    155            .          .           	} 
    156            .          .           	if s != &emptymspan { 
    157            .          .           		// Mark this span as no longer cached. 
    158            .          .           		if s.sweepgen != mheap_.sweepgen+3 { 
    159            .          .           			throw("bad sweepgen in refill") 
    160            .          .           		} 
    161            .      720ms           		mheap_.central[spc].mcentral.uncacheSpan(s) 
    162            .          .            
    163            .          .           		// Count up how many slots were used and record it. 
    164            .       70ms           		stats := memstats.heapStats.acquire() 
    165            .          .           		slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) 
    166         10ms       10ms           		atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed) 
    167            .          .            
    168            .          .           		// Flush tinyAllocs. 
    169            .          .           		if spc == tinySpanClass { 
    170            .          .           			atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) 
    171            .          .           			c.tinyAllocs = 0 
    172            .          .           		} 
    173            .       20ms           		memstats.heapStats.release() 
    174            .          .            
    175            .          .           		// Count the allocs in inconsistent, internal stats. 
    176            .          .           		bytesAllocated := slotsUsed * int64(s.elemsize) 
    177         40ms       40ms           		gcController.totalAlloc.Add(bytesAllocated)                                                               return Xadd64(&u.value, delta)                               types.go:344
    178            .          .            
    179            .          .           		// Clear the second allocCount just to be safe. 
    180            .          .           		s.allocCountBeforeCache = 0 
    181            .          .           	} 
    182            .          .            
    183            .          .           	// Get a new cached span from the central lists. 
    184            .      1.12s           	s = mheap_.central[spc].mcentral.cacheSpan() 
    185            .          .           	if s == nil { 
    186            .          .           		throw("out of memory") 
    187            .          .           	} 
    188            .          .            
    189            .          .           	if s.allocCount == s.nelems { 

runtime.(*mcache).refill

/usr/lib/go/src/runtime/mcache.go

  Total:           0       70ms (flat, cum)  0.15%
    209            .          .           	// We pick an overestimate here because an underestimate leads 
    210            .          .           	// the pacer to believe that it's in better shape than it is, 
    211            .          .           	// which appears to lead to more memory used. See #53738 for 
    212            .          .           	// more details. 
    213            .          .           	usedBytes := uintptr(s.allocCount) * s.elemsize 
    214            .       70ms           	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc)) 
    215            .          .           	c.scanAlloc = 0 
    216            .          .            
    217            .          .           	c.alloc[spc] = s 
    218            .          .           } 
    219            .          .            

runtime.(*mcache).releaseAll

/usr/lib/go/src/runtime/mcache.go

  Total:        10ms       20ms (flat, cum) 0.042%
    284            .          .           			atomic.Xadd64(&stats.smallAllocCount[spanClass(i).sizeclass()], slotsUsed) 
    285            .          .           			memstats.heapStats.release() 
    286            .          .            
    287            .          .           			// Adjust the actual allocs in inconsistent, internal stats. 
    288            .          .           			// We assumed earlier that the full span gets allocated. 
    289         10ms       10ms           			gcController.totalAlloc.Add(slotsUsed * int64(s.elemsize))                                                                       return Xadd64(&u.value, delta)                       types.go:344

    290            .          .            
    291            .          .           			if s.sweepgen != sg+1 { 
    292            .          .           				// refill conservatively counted unallocated slots in gcController.heapLive. 
    293            .          .           				// Undo this. 
    294            .          .           				// 
    295            .          .           				// If this span was cached before sweep, then gcController.heapLive was totally 
    296            .          .           				// recomputed since caching this span, so we don't do this for stale spans. 
    297            .          .           				dHeapLive -= int64(s.nelems-s.allocCount) * int64(s.elemsize) 
    298            .          .           			} 
    299            .          .            
    300            .          .           			// Release the span to the mcentral. 
    301            .       10ms           			mheap_.central[i].mcentral.uncacheSpan(s) 
    302            .          .           			c.alloc[i] = &emptymspan 
    303            .          .           		} 
    304            .          .           	} 
    305            .          .           	// Clear tinyalloc pool. 
    306            .          .           	c.tiny = 0 

runtime.(*mcache).prepareForSweep

/usr/lib/go/src/runtime/mcache.go

  Total:        90ms       90ms (flat, cum)  0.19%
    325            .          .           	// could leave allocate-black on, allow allocation to continue 
    326            .          .           	// as usual, use a ragged barrier at the beginning of sweep to 
    327            .          .           	// ensure all cached spans are swept, and then disable 
    328            .          .           	// allocate-black. However, with this approach it's difficult 
    329            .          .           	// to avoid spilling mark bits into the *next* GC cycle. 
    330         40ms       40ms           	sg := mheap_.sweepgen 
    331            .          .           	flushGen := c.flushGen.Load() 
    332         50ms       50ms           	if flushGen == sg { 
    333            .          .           		return 
    334            .          .           	} else if flushGen != sg-2 { 
    335            .          .           		println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg) 
    336            .          .           		throw("bad flushGen") 
    337            .          .           	} 

runtime.memmove

/usr/lib/go/src/runtime/memmove_arm64.s

  Total:       300ms      300ms (flat, cum)  0.63%
     28            .          .           // func memmove(to, from unsafe.Pointer, n uintptr) 
     29            .          .           TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24 
     30            .          .           	CBZ	R2, copy0 
     31            .          .            
     32            .          .           	// Small copies: 1..16 bytes 
     33         10ms       10ms           	CMP	$16, R2 
     34         10ms       10ms           	BLE	copy16 
     35            .          .            
     36            .          .           	// Large copies 
     37            .          .           	CMP	$128, R2 
     38            .          .           	BHI	copy_long 
     39            .          .           	CMP	$32, R2 
     40            .          .           	BHI	copy32_128 
     41            .          .            
     42            .          .           	// Small copies: 17..32 bytes. 
     43            .          .           	LDP	(R1), (R6, R7) 
     44            .          .           	ADD	R1, R2, R4          // R4 points just past the last source byte 
     45            .          .           	LDP	-16(R4), (R12, R13) 
     46         10ms       10ms           	STP	(R6, R7), (R0) 
     47         10ms       10ms           	ADD	R0, R2, R5          // R5 points just past the last destination byte 
     48            .          .           	STP	(R12, R13), -16(R5) 
     49            .          .           	RET 
     50            .          .            
     51            .          .           // Small copies: 1..16 bytes. 
     52            .          .           copy16: 
     53            .          .           	ADD	R1, R2, R4 // R4 points just past the last source byte 
     54            .          .           	ADD	R0, R2, R5 // R5 points just past the last destination byte 
     55            .          .           	CMP	$8, R2 
     56            .          .           	BLT	copy7 
     57            .          .           	MOVD	(R1), R6 
     58         90ms       90ms           	MOVD	-8(R4), R7 
     59            .          .           	MOVD	R6, (R0) 
     60         20ms       20ms           	MOVD	R7, -8(R5) 
     61            .          .           	RET 
     62            .          .            
     63            .          .           copy7: 
     64         10ms       10ms           	TBZ	$2, R2, copy3 
     65            .          .           	MOVWU	(R1), R6 
     66         70ms       70ms           	MOVWU	-4(R4), R7 
     67            .          .           	MOVW	R6, (R0) 
     68            .          .           	MOVW	R7, -4(R5) 
     69            .          .           	RET 
     70            .          .            
     71            .          .           copy3: 
     72            .          .           	TBZ	$1, R2, copy1 
     73            .          .           	MOVHU	(R1), R6 
     74         20ms       20ms           	MOVHU	-2(R4), R7 
     75            .          .           	MOVH	R6, (R0) 
     76            .          .           	MOVH	R7, -2(R5) 
     77            .          .           	RET 
     78            .          .            
     79            .          .           copy1: 
     80            .          .           	MOVBU	(R1), R6 
     81            .          .           	MOVB	R6, (R0) 
     82            .          .            
     83            .          .           copy0: 
     84            .          .           	RET 
     85            .          .            
     86            .          .           	// Medium copies: 33..128 bytes. 
     87            .          .           copy32_128: 
     88            .          .           	ADD	R1, R2, R4          // R4 points just past the last source byte 
     89         10ms       10ms           	ADD	R0, R2, R5          // R5 points just past the last destination byte 
     90            .          .           	LDP	(R1), (R6, R7) 
     91            .          .           	LDP	16(R1), (R8, R9) 
     92            .          .           	LDP	-32(R4), (R10, R11) 
     93            .          .           	LDP	-16(R4), (R12, R13) 
     94            .          .           	CMP	$64, R2 
     95            .          .           	BHI	copy128 
     96            .          .           	STP	(R6, R7), (R0) 
     97            .          .           	STP	(R8, R9), 16(R0) 
     98         10ms       10ms           	STP	(R10, R11), -32(R5) 
     99         10ms       10ms           	STP	(R12, R13), -16(R5) 
    100            .          .           	RET 
    101            .          .            
    102            .          .           	// Copy 65..128 bytes. 
    103            .          .           copy128: 
    104            .          .           	LDP	32(R1), (R14, R15) 
    105            .          .           	LDP	48(R1), (R16, R17) 
    106            .          .           	CMP	$96, R2 
    107            .          .           	BLS	copy96 
    108            .          .           	LDP	-64(R4), (R2, R3) 
    109            .          .           	LDP	-48(R4), (R1, R4) 
    110            .          .           	STP	(R2, R3), -64(R5) 
    111            .          .           	STP	(R1, R4), -48(R5) 
    112            .          .            
    113            .          .           copy96: 
    114            .          .           	STP	(R6, R7), (R0) 
    115            .          .           	STP	(R8, R9), 16(R0) 
    116            .          .           	STP	(R14, R15), 32(R0) 
    117            .          .           	STP	(R16, R17), 48(R0) 
    118            .          .           	STP	(R10, R11), -32(R5) 
    119         10ms       10ms           	STP	(R12, R13), -16(R5) 
    120            .          .           	RET 
    121            .          .            
    122            .          .           	// Copy more than 128 bytes. 
    123            .          .           copy_long: 
    124         10ms       10ms           	ADD	R1, R2, R4 // R4 points just past the last source byte 
    125            .          .           	ADD	R0, R2, R5 // R5 points just past the last destination byte 
    126            .          .           	MOVD	ZR, R7 
    127            .          .           	MOVD	ZR, R8 
    128            .          .            
    129            .          .           	CMP	$1024, R2 

runtime.memmove

/usr/lib/go/src/runtime/memmove_arm64.s

  Total:       100ms      100ms (flat, cum)  0.21%
    155            .          .           	AND	$15, R7, R14         // Calculate the realignment offset 
    156            .          .           	SUB	R14, R1, R1 
    157            .          .           	SUB	R14, R0, R3          // move dst back same amount as src 
    158            .          .           	ADD	R14, R2, R2 
    159            .          .           	LDP	16(R1), (R6, R7)     // Load   B 
    160         10ms       10ms           	STP	(R12, R13), (R0)     // Store A 
    161            .          .           	LDP	32(R1), (R8, R9)     // Load    C 
    162            .          .           	LDP	48(R1), (R10, R11)   // Load     D 
    163            .          .           	LDP.W	64(R1), (R12, R13)   // Load      E 
    164            .          .           	// 80 bytes have been loaded; if less than 80+64 bytes remain, copy from the end 
    165            .          .           	SUBS	$144, R2, R2 
    166            .          .           	BLS	copy64_from_end 
    167            .          .            
    168            .          .           loop64: 
    169         10ms       10ms           	STP	(R6, R7), 16(R3)     // Store  B 
    170         10ms       10ms           	LDP	16(R1), (R6, R7)     // Load   B (next iteration) 
    171            .          .           	STP	(R8, R9), 32(R3)     // Store   C 
    172         10ms       10ms           	LDP	32(R1), (R8, R9)     // Load    C 
    173         10ms       10ms           	STP	(R10, R11), 48(R3)   // Store    D 
    174         30ms       30ms           	LDP	48(R1), (R10, R11)   // Load     D 
    175            .          .           	STP.W	(R12, R13), 64(R3)   // Store     E 
    176         10ms       10ms           	LDP.W	64(R1), (R12, R13)   // Load      E 
    177         10ms       10ms           	SUBS	$64, R2, R2 
    178            .          .           	BHI	loop64 
    179            .          .            
    180            .          .           	// Write the last iteration and copy 64 bytes from the end. 
    181            .          .           copy64_from_end: 
    182            .          .           	LDP	-64(R4), (R14, R15)  // Load       F 

runtime.acquirem

/usr/lib/go/src/runtime/runtime1.go

  Total:       250ms      250ms (flat, cum)  0.52%
    624            .          .            
    625            .          .           // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. 
    626            .          .            
    627            .          .           //go:nosplit 
    628            .          .           func acquirem() *m { 
    629         10ms       10ms           	gp := getg() 
    630        110ms      110ms           	gp.m.locks++ 
    631        130ms      130ms           	return gp.m 
    632            .          .           } 
    633            .          .            

runtime.releasem

/usr/lib/go/src/runtime/runtime1.go

  Total:       130ms      130ms (flat, cum)  0.27%
    635            .          .           func releasem(mp *m) { 
    636            .          .           	gp := getg() 
    637         50ms       50ms           	mp.locks-- 
    638         80ms       80ms           	if mp.locks == 0 && gp.preempt { 
    639            .          .           		// restore the preemption request in case we've cleared it in newstack 
    640            .          .           		gp.stackguard0 = stackPreempt 
    641            .          .           	} 
    642            .          .           } 
    643            .          .            

git.urbach.dev/cli/q/src/codegen.(*Function).findFreeRegister

/home/user/q/src/codegen/findFreeRegister.go

  Total:       100ms      100ms (flat, cum)  0.21%
     36            .          .           				usedRegisters |= (1 << right.Register) 
     37            .          .           			} 
     38            .          .           		} 
     39            .          .           	} 
     40            .          .            
     41         10ms       10ms           	for _, current := range f.Steps { 
     42            .          .           		// These checks need to happen regardless of whether the value is alive after execution. 
     43            .          .           		// If it is used as an operand, the operand restrictions of the architecture apply. 
     44         80ms       80ms           		binaryOp, isBinaryOp := current.Value.(*ssa.BinaryOp) 
     45            .          .            
     46         10ms       10ms           		if isBinaryOp && !binaryOp.Op.IsComparison() { 
     47            .          .           			switch f.build.Arch { 
     48            .          .           			case config.ARM: 
     49            .          .           				if current.Register != -1 && binaryOp.Op == token.Mod { 
     50            .          .           					if binaryOp.Left == step.Value { 
     51            .          .           						usedRegisters |= (1 << current.Register) 

git.urbach.dev/cli/q/src/codegen.(*Function).findFreeRegister

/home/user/q/src/codegen/findFreeRegister.go

  Total:       270ms      270ms (flat, cum)  0.57%
     77            .          .           				} 
     78            .          .           			} 
     79            .          .           		} 
     80            .          .            
     81            .          .           		// If it's not alive in this step, ignore it. 
     82        110ms      110ms           		if !slices.Contains(current.Live, step) {                     return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97
                                     ⋮
                                     ⋮
                                                              return Index(s, v) >= 0                                      slices.go:118                        if v == s[i] {                                           slices.go:98                        for i := range s {                                       slices.go:97                        if v == s[i] {                                           slices.go:98

     83            .          .           			continue 
     84            .          .           		} 
     85            .          .            
     86            .          .           		// Mark all the neighbor registers that are alive 
     87            .          .           		// at the same time as used. 
     88         20ms       20ms           		for _, live := range current.Live { 
     89         40ms       40ms           			if live.Register == -1 { 
     90            .          .           				continue 
     91            .          .           			} 
     92            .          .            
     93         30ms       30ms           			usedRegisters |= (1 << live.Register) 
     94            .          .           		} 
     95            .          .            
     96            .          .           		// Ignore the definition itself. 
     97         30ms       30ms           		if current == step { 
     98            .          .           			continue 
     99            .          .           		} 
    100            .          .            
    101            .          .           		// Find all the registers that this instruction 
    102            .          .           		// would clobber and mark them as used. 
    103            .          .           		var clobbered []cpu.Register 
    104            .          .            
    105         10ms       10ms           		switch instr := current.Value.(type) { 
    106            .          .           		case *ssa.BinaryOp: 
    107            .          .           			switch instr.Op { 
    108            .          .           			case token.Div, token.Mod: 
    109            .          .           				clobbered = f.CPU.DivisionClobbered 
    110            .          .           			case token.Shl, token.Shr: 
    111            .          .           				clobbered = f.CPU.ShiftClobbered 
    112            .          .           			} 
    113            .          .           		case *ssa.Call: 
    114            .          .           			clobbered = f.CPU.Call.Clobbered 
    115            .          .           		case *ssa.CallExtern: 
    116            .          .           			clobbered = f.CPU.ExternCall.Clobbered 
    117            .          .           		case *ssa.FromTuple: 
    118            .          .           			usedRegisters |= (1 << f.CPU.Call.Out[instr.Index]) 
    119            .          .           		case *ssa.Parameter: 
    120            .          .           			usedRegisters |= (1 << f.CPU.Call.In[instr.Index]) 
    121            .          .           		case *ssa.Syscall: 
    122            .          .           			clobbered = f.CPU.Syscall.Clobbered 
    123            .          .           		} 
    124            .          .            
    125         10ms       10ms           		for _, reg := range clobbered { 
    126            .          .           			usedRegisters |= (1 << reg) 
    127            .          .           		} 
    128            .          .           	} 
    129            .          .            
    130            .          .           	// Pick one of the register hints if possible. 
    131            .          .           	for _, reg := range step.Hints { 
    132         10ms       10ms           		if usedRegisters&(1<<reg) == 0 { 
    133            .          .           			return reg 
    134            .          .           		} 
    135            .          .           	} 
    136            .          .            
    137            .          .           	// Pick a general purpose register that's not used yet. 
    138            .          .           	for _, reg := range f.CPU.General { 
    139         10ms       10ms           		if usedRegisters&(1<<reg) == 0 { 
    140            .          .           			return reg 
    141            .          .           		} 
    142            .          .           	} 
    143            .          .            
    144            .          .           	panic("no free registers") 

gogo

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       20ms (flat, cum) 0.042%
    191            .          .           	MOVD	0(R6), R4	// make sure g != nil 
    192            .          .           	B	gogo<>(SB) 
    193            .          .            
    194            .          .           TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 
    195            .          .           	MOVD	R6, g 
    196            .       10ms           	BL	runtime·save_g(SB) 
    197            .          .            
    198            .          .           	MOVD	gobuf_sp(R5), R0 
    199            .          .           	MOVD	R0, RSP 
    200         10ms       10ms           	MOVD	gobuf_bp(R5), R29 
    201            .          .           	MOVD	gobuf_lr(R5), LR 
    202            .          .           	MOVD	gobuf_ctxt(R5), R26 
    203            .          .           	MOVD	$0, gobuf_sp(R5) 
    204            .          .           	MOVD	$0, gobuf_bp(R5) 
    205            .          .           	MOVD	$0, gobuf_lr(R5) 

runtime.mcall

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms      7.12s (flat, cum) 14.90%
    224            .          .            
    225            .          .           	// Switch to m->g0 & its stack, call fn. 
    226            .          .           	MOVD	g, R3 
    227            .          .           	MOVD	g_m(g), R8 
    228            .          .           	MOVD	m_g0(R8), g 
    229         10ms       10ms           	BL	runtime·save_g(SB) 
    230            .          .           	CMP	g, R3 
    231            .          .           	BNE	2(PC) 
    232            .          .           	B	runtime·badmcall(SB) 
    233            .          .            
    234            .          .           	MOVD	(g_sched+gobuf_sp)(g), R0 
    235            .          .           	MOVD	R0, RSP	// sp = m->g0->sched.sp 
    236            .          .           	MOVD	$0, R29				// clear frame pointer, as caller may execute on another M 
    237            .          .           	MOVD	R3, R0				// arg = g 
    238            .          .           	MOVD	$0, -16(RSP)			// dummy LR 
    239            .          .           	SUB	$16, RSP 
    240            .          .           	MOVD	0(R26), R4			// code pointer 
    241            .      7.11s           	BL	(R4) 
    242            .          .           	B	runtime·badmcall2(SB) 
    243            .          .            
    244            .          .           // systemstack_switch is a dummy routine that systemstack leaves at the bottom 
    245            .          .           // of the G stack. We need to distinguish the routine that 
    246            .          .           // lives at the bottom of the G stack from the one that lives 

runtime.systemstack

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    252            .          .           	RET 
    253            .          .            
    254            .          .           // func systemstack(fn func()) 
    255            .          .           TEXT runtime·systemstack(SB), NOSPLIT, $0-8 
    256            .          .           	MOVD	fn+0(FP), R3	// R3 = fn 
    257         10ms       10ms           	MOVD	R3, R26		// context 
    258            .          .           	MOVD	g_m(g), R4	// R4 = m 
    259            .          .            
    260            .          .           	MOVD	m_gsignal(R4), R5	// R5 = gsignal 
    261            .          .           	CMP	g, R5 
    262            .          .           	BEQ	noswitch 

runtime.systemstack

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        40ms      3.53s (flat, cum)  7.39%
    283            .          .           	// be systemstack_switch if the G stack is scanned. 
    284            .          .           	BL	gosave_systemstack_switch<>(SB) 
    285            .          .            
    286            .          .           	// switch to g0 
    287            .          .           	MOVD	R5, g 
    288            .       50ms           	BL	runtime·save_g(SB) 
    289            .          .           	MOVD	(g_sched+gobuf_sp)(g), R3 
    290         10ms       10ms           	MOVD	R3, RSP 
    291            .          .            
    292            .          .           	// call target function 
    293            .          .           	MOVD	0(R26), R3	// code pointer 
    294            .      3.44s           	BL	(R3) 
    295            .          .            
    296            .          .           	// switch back to g 
    297            .          .           	MOVD	g_m(g), R3 
    298            .          .           	MOVD	m_curg(R3), g 
    299            .          .           	BL	runtime·save_g(SB) 
    300            .          .           	MOVD	(g_sched+gobuf_sp)(g), R0 
    301         10ms       10ms           	MOVD	R0, RSP 
    302            .          .           	MOVD	(g_sched+gobuf_bp)(g), R29 
    303            .          .           	MOVD	$0, (g_sched+gobuf_sp)(g) 
    304            .          .           	MOVD	$0, (g_sched+gobuf_bp)(g) 
    305         10ms       10ms           	RET 
    306            .          .            
    307            .          .           noswitch: 
    308            .          .           	// already on m stack, just call directly 
    309            .          .           	// Using a tail call here cleans up tracebacks since we won't stop 
    310            .          .           	// at an intermediate systemstack. 
    311         10ms       10ms           	MOVD	0(R26), R3	// code pointer 
    312            .          .           	MOVD.P	16(RSP), R30	// restore LR 
    313            .          .           	SUB	$8, RSP, R29	// restore FP 
    314            .          .           	B	(R3) 
    315            .          .            
    316            .          .           // func switchToCrashStack0(fn func()) 

runtime.memhash64

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    608            .          .           	MOVD	$runtime·aeskeysched+0(SB), R3 
    609            .          .            
    610            .          .           	VEOR	V0.B16, V0.B16, V0.B16 
    611            .          .           	VLD1	(R3), [V2.B16] 
    612            .          .           	VLD1	(R0), V0.D[1] 
    613         10ms       10ms           	VMOV	R1, V0.D[0] 
    614            .          .            
    615            .          .           	AESE	V2.B16, V0.B16 
    616            .          .           	AESMC	V0.B16, V0.B16 
    617            .          .           	AESE	V2.B16, V0.B16 
    618            .          .           	AESMC	V0.B16, V0.B16 

runtime.strhash

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        50ms       50ms (flat, cum)   0.1%
    631            .          .           noaes: 
    632            .          .           	B	runtime·memhashFallback<ABIInternal>(SB) 
    633            .          .            
    634            .          .           // func strhash(p unsafe.Pointer, h uintptr) uintptr 
    635            .          .           TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24 
    636         20ms       20ms           	MOVB	runtime·useAeshash(SB), R10 
    637            .          .           	CBZ	R10, noaes 
    638            .          .           	LDP	(R0), (R0, R2)	// string data / length 
    639         30ms       30ms           	B	aeshashbody<>(SB) 
    640            .          .           noaes: 
    641            .          .           	B	runtime·strhashFallback<ABIInternal>(SB) 
    642            .          .            
    643            .          .           // R0: data 

aeshashbody

/usr/lib/go/src/runtime/asm_arm64.s

  Total:       200ms      200ms (flat, cum)  0.42%
    644            .          .           // R1: seed data 
    645            .          .           // R2: length 
    646            .          .           // At return, R0 = return value 
    647            .          .           TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0 
    648         30ms       30ms           	VEOR	V30.B16, V30.B16, V30.B16 
    649            .          .           	VMOV	R1, V30.D[0] 
    650            .          .           	VMOV	R2, V30.D[1] // load length into seed 
    651            .          .            
    652            .          .           	MOVD	$runtime·aeskeysched+0(SB), R4 
    653            .          .           	VLD1.P	16(R4), [V0.B16] 
    654            .          .           	AESE	V30.B16, V0.B16 
    655            .          .           	AESMC	V0.B16, V0.B16 
    656         10ms       10ms           	CMP	$16, R2 
    657            .          .           	BLO	aes0to15 
    658            .          .           	BEQ	aes16 
    659            .          .           	CMP	$32, R2 
    660            .          .           	BLS	aes17to32 
    661            .          .           	CMP	$64, R2 
    662            .          .           	BLS	aes33to64 
    663            .          .           	CMP	$128, R2 
    664            .          .           	BLS	aes65to128 
    665            .          .           	B	aes129plus 
    666            .          .            
    667            .          .           aes0to15: 
    668            .          .           	CBZ	R2, aes0 
    669            .          .           	VEOR	V2.B16, V2.B16, V2.B16 
    670            .          .           	TBZ	$3, R2, less_than_8 
    671            .          .           	VLD1.P	8(R0), V2.D[0] 
    672            .          .            
    673            .          .           less_than_8: 
    674         30ms       30ms           	TBZ	$2, R2, less_than_4 
    675            .          .           	VLD1.P	4(R0), V2.S[2] 
    676            .          .            
    677            .          .           less_than_4: 
    678         80ms       80ms           	TBZ	$1, R2, less_than_2 
    679            .          .           	VLD1.P	2(R0), V2.H[6] 
    680            .          .            
    681            .          .           less_than_2: 
    682         50ms       50ms           	TBZ	$0, R2, done 
    683            .          .           	VLD1	(R0), V2.B[14] 
    684            .          .           done: 
    685            .          .           	AESE	V0.B16, V2.B16 
    686            .          .           	AESMC	V2.B16, V2.B16 
    687            .          .           	AESE	V0.B16, V2.B16 

aeshashbody

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        20ms       20ms (flat, cum) 0.042%
    705            .          .           	VLD1	(R4), [V1.B16] 
    706            .          .           	AESE	V30.B16, V1.B16 
    707            .          .           	AESMC	V1.B16, V1.B16 
    708            .          .           	SUB	$16, R2, R10 
    709            .          .           	VLD1.P	(R0)(R10), [V2.B16] 
    710         10ms       10ms           	VLD1	(R0), [V3.B16] 
    711            .          .            
    712         10ms       10ms           	AESE	V0.B16, V2.B16 
    713            .          .           	AESMC	V2.B16, V2.B16 
    714            .          .           	AESE	V1.B16, V3.B16 
    715            .          .           	AESMC	V3.B16, V3.B16 
    716            .          .            
    717            .          .           	AESE	V0.B16, V2.B16 

runtime.procyield

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        20ms       20ms (flat, cum) 0.042%
    965            .          .           	RET 
    966            .          .            
    967            .          .           TEXT runtime·procyield(SB),NOSPLIT,$0-0 
    968            .          .           	MOVWU	cycles+0(FP), R0 
    969            .          .           again: 
    970         20ms       20ms           	YIELD 
    971            .          .           	SUBW	$1, R0 
    972            .          .           	CBNZ	R0, again 
    973            .          .           	RET 
    974            .          .            
    975            .          .           // Save state of caller into g->sched, 

runtime.interhash

/usr/lib/go/src/runtime/alg.go

  Total:       120ms      200ms (flat, cum)  0.42%
    128            .          .           func c128hash(p unsafe.Pointer, h uintptr) uintptr { 
    129            .          .           	x := (*[2]float64)(p) 
    130            .          .           	return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h)) 
    131            .          .           } 
    132            .          .            
    133         10ms       10ms           func interhash(p unsafe.Pointer, h uintptr) uintptr { 
    134            .          .           	a := (*iface)(p) 
    135            .          .           	tab := a.tab 
    136         20ms       20ms           	if tab == nil { 
    137            .          .           		return h 
    138            .          .           	} 
    139            .          .           	t := tab.Type 
    140         70ms       70ms           	if t.Equal == nil { 
    141            .          .           		// Check hashability here. We could do this check inside 
    142            .          .           		// typehash, but we want to report the topmost type in 
    143            .          .           		// the error text (e.g. in a struct with a field of slice type 
    144            .          .           		// we want to report the struct, not the slice). 
    145            .          .           		panic(errorString("hash of unhashable type " + toRType(t).string())) 
    146            .          .           	} 
    147         10ms       10ms           	if isDirectIface(t) {                                                       return t.Kind_&abi.KindDirectIface != 0                              typekind.go:11

    148         10ms       90ms           		return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) 
    149            .          .           	} else { 
    150            .          .           		return c1 * typehash(t, a.data, h^c0) 
    151            .          .           	} 
    152            .          .           } 
    153            .          .            

runtime.nilinterhash

/usr/lib/go/src/runtime/alg.go

  Total:        10ms       30ms (flat, cum) 0.063%
    165            .          .           	a := (*eface)(p) 
    166            .          .           	t := a._type 
    167            .          .           	if t == nil { 
    168            .          .           		return h 
    169            .          .           	} 
    170         10ms       10ms           	if t.Equal == nil { 
    171            .          .           		// See comment in interhash above. 
    172            .          .           		panic(errorString("hash of unhashable type " + toRType(t).string())) 
    173            .          .           	} 
    174            .          .           	if isDirectIface(t) { 
    175            .          .           		return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) 
    176            .          .           	} else { 
    177            .       20ms           		return c1 * typehash(t, a.data, h^c0) 
    178            .          .           	} 
    179            .          .           } 
    180            .          .            
    181            .          .           // typehash computes the hash of the object of type t at address p. 
    182            .          .           // h is the seed. 

runtime.typehash

/usr/lib/go/src/runtime/alg.go

  Total:        80ms      100ms (flat, cum)  0.21%
    197            .          .           // 
    198            .          .           // Do not remove or change the type signature. 
    199            .          .           // See go.dev/issue/67401. 
    200            .          .           // 
    201            .          .           //go:linkname typehash 
    202         30ms       30ms           func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { 
    203         10ms       10ms           	if t.TFlag&abi.TFlagRegularMemory != 0 { 
    204            .          .           		// Handle ptr sizes specially, see issue 37086. 
    205            .          .           		switch t.Size_ { 
    206            .          .           		case 4: 
    207            .          .           			return memhash32(p, h) 
    208            .          .           		case 8: 
    209         40ms       40ms           			return memhash64(p, h) 
    210            .          .           		default: 
    211            .          .           			return memhash(p, h, t.Size_) 
    212            .          .           		} 
    213            .          .           	} 
    214            .          .           	switch t.Kind_ & abi.KindMask { 
    215            .          .           	case abi.Float32: 
    216            .          .           		return f32hash(p, h) 
    217            .          .           	case abi.Float64: 
    218            .          .           		return f64hash(p, h) 
    219            .          .           	case abi.Complex64: 
    220            .          .           		return c64hash(p, h) 
    221            .          .           	case abi.Complex128: 
    222            .          .           		return c128hash(p, h) 
    223            .          .           	case abi.String: 
    224            .       20ms           		return strhash(p, h) 
    225            .          .           	case abi.Interface: 
    226            .          .           		i := (*interfacetype)(unsafe.Pointer(t)) 
    227            .          .           		if len(i.Methods) == 0 { 
    228            .          .           			return nilinterhash(p, h) 
    229            .          .           		} 

runtime.interequal

/usr/lib/go/src/runtime/alg.go

  Total:        80ms       90ms (flat, cum)  0.19%
    286            .          .           	return *(*complex128)(p) == *(*complex128)(q) 
    287            .          .           } 
    288            .          .           func strequal(p, q unsafe.Pointer) bool { 
    289            .          .           	return *(*string)(p) == *(*string)(q) 
    290            .          .           } 
    291         10ms       10ms           func interequal(p, q unsafe.Pointer) bool { 
    292            .          .           	x := *(*iface)(p) 
    293            .          .           	y := *(*iface)(q) 
    294         70ms       80ms           	return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data) 
    295            .          .           } 
    296            .          .           func nilinterequal(p, q unsafe.Pointer) bool { 
    297            .          .           	x := *(*eface)(p) 
    298            .          .           	y := *(*eface)(q) 
    299            .          .           	return x._type == y._type && efaceeq(x._type, x.data, y.data) 

runtime.ifaceeq

/usr/lib/go/src/runtime/alg.go

  Total:        60ms       60ms (flat, cum)  0.13%
    312            .          .           		// Ptrs, chans, and single-element items can be compared directly using ==. 
    313            .          .           		return x == y 
    314            .          .           	} 
    315            .          .           	return eq(x, y) 
    316            .          .           } 
    317         20ms       20ms           func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { 
    318            .          .           	if tab == nil { 
    319            .          .           		return true 
    320            .          .           	} 
    321            .          .           	t := tab.Type 
    322         10ms       10ms           	eq := t.Equal 
    323         30ms       30ms           	if eq == nil { 
    324            .          .           		panic(errorString("comparing uncomparable type " + toRType(t).string())) 
    325            .          .           	} 
    326            .          .           	if isDirectIface(t) { 
    327            .          .           		// See comment in efaceeq. 
    328            .          .           		return x == y 

git.urbach.dev/cli/q/src/core.(*Environment).AddPackage

/home/user/q/src/core/Environment.go

  Total:        10ms      140ms (flat, cum)  0.29%
     22            .          .           	typeCache 
     23            .          .           } 
     24            .          .            
     25            .          .           // AddPackage returns an existing package with the giving name or creates a new one. 
     26            .          .           func (env *Environment) AddPackage(name string, isExtern bool) *Package { 
     27            .       60ms           	pkg, exists := env.Packages[name] 
     28            .          .            
     29            .          .           	if !exists { 
     30            .       10ms           		pkg = &Package{ 
     31            .          .           			Name:      name, 
     32            .       10ms           			Constants: make(map[string]*Constant), 
     33            .       10ms           			Functions: make(map[string]*Function, 8), 
     34            .       30ms           			Structs:   make(map[string]*types.Struct), 
     35            .          .           			Globals:   make(map[string]*Global), 
     36            .          .           			IsExtern:  isExtern, 
     37            .          .           		} 
     38            .          .            
     39         10ms       20ms           		env.Packages[name] = pkg 
     40            .          .           	} 
     41            .          .            
     42            .          .           	return pkg 

git.urbach.dev/cli/q/src/core.(*Environment).Function

/home/user/q/src/core/Environment.go

  Total:           0       50ms (flat, cum)   0.1%
     44            .          .            
     45            .          .           // Function looks up a function by the package name and raw function name. 
     46            .          .           func (env *Environment) Function(pkgName string, name string) *Function { 
     47            .       20ms           	pkg, exists := env.Packages[pkgName] 
     48            .          .            
     49            .          .           	if !exists { 
     50            .          .           		return nil 
     51            .          .           	} 
     52            .          .            
     53            .       30ms           	fn, exists := pkg.Functions[name] 
     54            .          .            
     55            .          .           	if !exists { 
     56            .          .           		return nil 
     57            .          .           	} 
     58            .          .            

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:        10ms       10ms (flat, cum) 0.021%
     59            .          .           	return fn 
     60            .          .           } 
     61            .          .            
     62            .          .           // Functions returns an iterator over all functions. 
     63            .          .           func (env *Environment) Functions() iter.Seq[*Function] { 
     64         10ms       10ms           	return func(yield func(*Function) bool) { 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).Functions.func4

/home/user/q/src/core/Environment.go

  Total:        20ms       40ms (flat, cum) 0.084%
     65         20ms       40ms           		for _, pkg := range env.Packages { 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:        20ms       80ms (flat, cum)  0.17%
     66         20ms       80ms           			for _, fn := range pkg.Functions { 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:       260ms      980ms (flat, cum)  2.05%
     67        260ms      980ms           				for variant := range fn.Variants {                                                                               if !yield(f) {                               Function.go:92
                                                                                  if !yield(variant) {                     Environment.go:68
                                                                                      suffix.WriteString(input.Typ.Name()) parseParameters.go:55
                                                                                          b.buf = append(b.buf, s...)      builder.go:114                                            typ, err := env.TypeFromTokens(typeTokens, f.File) parseParameters.go:40                                            if f.Err != nil {                    Compile.go:53                                            f.Type = &types.Function{            parseParameters.go:14                                            Output: make([]types.Type, len(f.Output)), parseParameters.go:16                                            f.Type.Input[i] = input.Typ          parseParameters.go:28                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            suffix.WriteString(input.Typ.Name()) parseParameters.go:55                                            for i, input := range f.Input {      parseParameters.go:19                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            suffix.WriteString(input.Typ.Name()) parseParameters.go:55
                                                                                          b.buf = append(b.buf, s...)      builder.go:114                                            if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                                            input.Typ = typ                      parseParameters.go:27
                                     ⋮
                                     ⋮
                                                                              if !yield(f) {                               Function.go:92
                                                                                  if !yield(variant) {                     Environment.go:68
                                                                                      typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).parseParameters.(*Environment).ResolveTypes.(*Environment).Functions.func3-range4

/home/user/q/src/core/Environment.go

  Total:       260ms      980ms (flat, cum)  2.05%
     68        260ms      980ms           					if !yield(variant) {                                                                                       suffix.WriteString(input.Typ.Name()) parseParameters.go:55
                                                                                          b.buf = append(b.buf, s...)      builder.go:114                                            typ, err := env.TypeFromTokens(typeTokens, f.File) parseParameters.go:40                                            if f.Err != nil {                    Compile.go:53                                            f.Type = &types.Function{            parseParameters.go:14                                            Output: make([]types.Type, len(f.Output)), parseParameters.go:16                                            f.Type.Input[i] = input.Typ          parseParameters.go:28                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            suffix.WriteString(input.Typ.Name()) parseParameters.go:55                                            for i, input := range f.Input {      parseParameters.go:19                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            suffix.WriteString(input.Typ.Name()) parseParameters.go:55
                                                                                          b.buf = append(b.buf, s...)      builder.go:114
                                     ⋮
                                     ⋮
                                                                                      if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                                            input.Typ = typ                      parseParameters.go:27                                            typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20

     69            .          .           						return 
     70            .          .           					} 
     71            .          .           				} 
     72            .          .           			} 
     73            .          .           		} 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Globals.func2

/home/user/q/src/core/Environment.go

  Total:        10ms       20ms (flat, cum) 0.042%
     75            .          .           } 
     76            .          .            
     77            .          .           // Globals returns an iterator over all globals. 
     78            .          .           func (env *Environment) Globals() iter.Seq[*Global] { 
     79            .          .           	return func(yield func(*Global) bool) { 
     80            .       10ms           		for _, pkg := range env.Packages { 
     81         10ms       10ms           			for _, global := range pkg.Globals { 
     82            .          .           				if !yield(global) { 
     83            .          .           					return 
     84            .          .           				} 
     85            .          .           			} 
     86            .          .           		} 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).LiveFunctions.func5

/home/user/q/src/core/Environment.go

  Total:           0      450ms (flat, cum)  0.94%
     90            .          .           // LiveFunctions returns an iterator over functions that are alive, 
     91            .          .           // starting with `run.init` and all of its dependencies. 
     92            .          .           func (env *Environment) LiveFunctions() iter.Seq[*Function] { 
     93            .          .           	return func(yield func(*Function) bool) { 
     94            .          .           		running := true 
     95            .       40ms           		traversed := make(map[*Function]bool, env.NumFunctions) 
     96            .          .            
     97            .      410ms           		env.Init.EachDependency(traversed, func(f *Function) { 
     98            .          .           			if !running { 
     99            .          .           				return 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).LiveFunctions.func5.1

/home/user/q/src/core/Environment.go

  Total:           0      370ms (flat, cum)  0.77%
    100            .          .           			} 
    101            .          .            
    102            .      370ms           			running = yield(f) 
    103            .          .           		}) 
    104            .          .           	} 
    105            .          .           } 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes

/home/user/q/src/core/Environment.go

  Total:       280ms      620ms (flat, cum)  1.30%
    106            .          .            
    107            .          .           // ResolveTypes resolves all the type tokens in structs, globals and function parameters. 
    108            .          .           func (env *Environment) ResolveTypes() error { 
    109         20ms       80ms           	err := env.parseStructs(env.Structs())                                                       for structure := range structs {                                     parseStructs.go:13
                                                          if !yield(structure) {                                           Environment.go:129
                                                              err := env.parseStruct(structure, processed)                 parseStructs.go:14
                                     ⋮
                                     ⋮
                                                      for structure := range structs {                                     parseStructs.go:13
                                                          for _, structure := range pkg.Structs {                          Environment.go:128
    110            .          .            
    111            .          .           	if err != nil { 
    112            .          .           		return err 
    113            .          .           	} 
    114            .          .            
    115         10ms       20ms           	err = env.parseGlobals(env.Globals())                                                       for global := range globals {                                        parseGlobals.go:9
                                                          for _, pkg := range env.Packages {                               Environment.go:80                for _, global := range pkg.Globals {                             Environment.go:81

    116            .          .            
    117            .          .           	if err != nil { 
    118            .          .           		return err 
    119            .          .           	} 
    120            .          .            
    121        250ms      520ms           	return env.parseParameters(env.Functions())                                                       for f := range functions {                                           parseParameters.go:13
                                                          for variant := range fn.Variants {                               Environment.go:67
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      suffix.WriteString(input.Typ.Name())                 parseParameters.go:55
                                                                          b.buf = append(b.buf, s...)                      builder.go:114                            typ, err := env.TypeFromTokens(typeTokens, f.File)   parseParameters.go:40                            f.Type = &types.Function{                            parseParameters.go:14                            Output: make([]types.Type, len(f.Output)),           parseParameters.go:16                            f.Type.Input[i] = input.Typ                          parseParameters.go:28                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                            suffix.WriteString(input.Typ.Name())                 parseParameters.go:55                            for i, input := range f.Input {                      parseParameters.go:19                            Input:  make([]types.Type, len(f.Input)),            parseParameters.go:15                            suffix.WriteString(input.Typ.Name())                 parseParameters.go:55
                                                                          b.buf = append(b.buf, s...)                      builder.go:114                            if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                            input.Typ = typ                                      parseParameters.go:27                            typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                            Input:  make([]types.Type, len(f.Input)),            parseParameters.go:15                for _, fn := range pkg.Functions {                               Environment.go:66                for variant := range fn.Variants {                               Environment.go:67
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20

    122            .          .           } 
    123            .          .            
    124            .          .           // Structs returns an iterator over all structs. 

git.urbach.dev/cli/q/src/core.typeByName.(*Environment).Structs.func1

/home/user/q/src/core/Environment.go

  Total:        10ms       80ms (flat, cum)  0.17%
    125            .          .           func (env *Environment) Structs() iter.Seq[*types.Struct] { 
    126            .          .           	return func(yield func(*types.Struct) bool) { 
    127            .          .           		for _, pkg := range env.Packages { 
    128         10ms       80ms           			for _, structure := range pkg.Structs { 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Structs.func1

/home/user/q/src/core/Environment.go

  Total:           0       10ms (flat, cum) 0.021%
    129            .       10ms           				if !yield(structure) {                                                                               err := env.parseStruct(structure, processed) parseStructs.go:14

    130            .          .           					return 
    131            .          .           				} 
    132            .          .           			} 
    133            .          .           		} 
    134            .          .           	} 

runtime.mapaccess1

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:       130ms      340ms (flat, cum)  0.71%
     35            .          .           // the key is not in the map. 
     36            .          .           // NOTE: The returned pointer may keep the whole map live, so don't 
     37            .          .           // hold onto it for very long. 
     38            .          .           // 
     39            .          .           //go:linkname runtime_mapaccess1 runtime.mapaccess1 
     40         20ms       20ms           func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { 
     41            .          .           	if race.Enabled && m != nil { 
     42            .          .           		callerpc := sys.GetCallerPC() 
     43            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess1) 
     44            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
     45            .          .           		race.ReadObjectPC(typ.Key, key, callerpc, pc) 
     46            .          .           	} 
     47            .          .           	if msan.Enabled && m != nil { 
     48            .          .           		msan.Read(key, typ.Key.Size_) 
     49            .          .           	} 
     50            .          .           	if asan.Enabled && m != nil { 
     51            .          .           		asan.Read(key, typ.Key.Size_) 
     52            .          .           	} 
     53            .          .            
     54         10ms       10ms           	if m == nil || m.Used() == 0 { 
     55            .       10ms           		if err := mapKeyError(typ, key); err != nil {                                                               return mapKeyError2(t.Key, p)                                map.go:828

     56            .          .           			panic(err) // see issue 23734 
     57            .          .           		} 
     58            .          .           		return unsafe.Pointer(&zeroVal[0]) 
     59            .          .           	} 
     60            .          .            
     61            .          .           	if m.writing != 0 { 
     62            .          .           		fatal("concurrent map read and map write") 
     63            .          .           	} 
     64            .          .            
     65         10ms      120ms           	hash := typ.Hasher(key, m.seed) 
     66            .          .            
     67            .          .           	if m.dirLen <= 0 { 
     68         10ms       30ms           		_, elem, ok := m.getWithKeySmall(typ, hash, key) 
     69            .          .           		if !ok { 
     70            .          .           			return unsafe.Pointer(&zeroVal[0]) 
     71            .          .           		} 
     72            .          .           		return elem 
     73            .          .           	} 
     74            .          .            
     75            .          .           	// Select table. 
     76            .          .           	idx := m.directoryIndex(hash) 
     77            .          .           	t := m.directoryAt(idx) 
     78            .          .            
     79            .          .           	// Probe table. 
     80         10ms       10ms           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
     81            .          .           	for ; ; seq = seq.next() { 
     82            .          .           		g := t.groups.group(typ, seq.offset) 
     83            .          .            
     84         40ms       40ms           		match := g.ctrls().matchH2(h2(hash))                                                               return ctrlGroupMatchH2(g, h)                                group.go:154
                                                                  v := uint64(g) ^ (bitsetLSB * uint64(h))                 group.go:170

     85            .          .            
     86         20ms       20ms           		for match != 0 { 
     87         10ms       10ms           			i := match.first() 
     88            .          .            
     89            .          .           			slotKey := g.key(typ, i) 
     90            .          .           			slotKeyOrig := slotKey 
     91            .          .           			if typ.IndirectKey() { 
     92            .          .           				slotKey = *((*unsafe.Pointer)(slotKey)) 
     93            .          .           			} 
     94            .       70ms           			if typ.Key.Equal(key, slotKey) { 
     95            .          .           				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) 
     96            .          .           				if typ.IndirectElem() { 
     97            .          .           					slotElem = *((*unsafe.Pointer)(slotElem)) 
     98            .          .           				} 
     99            .          .           				return slotElem 

runtime.mapaccess2

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:        40ms      130ms (flat, cum)  0.27%
    109            .          .           		} 
    110            .          .           	} 
    111            .          .           } 
    112            .          .            
    113            .          .           //go:linkname runtime_mapaccess2 runtime.mapaccess2 
    114         10ms       10ms           func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) { 
    115            .          .           	if race.Enabled && m != nil { 
    116            .          .           		callerpc := sys.GetCallerPC() 
    117            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess1) 
    118            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
    119            .          .           		race.ReadObjectPC(typ.Key, key, callerpc, pc) 
    120            .          .           	} 
    121            .          .           	if msan.Enabled && m != nil { 
    122            .          .           		msan.Read(key, typ.Key.Size_) 
    123            .          .           	} 
    124            .          .           	if asan.Enabled && m != nil { 
    125            .          .           		asan.Read(key, typ.Key.Size_) 
    126            .          .           	} 
    127            .          .            
    128            .          .           	if m == nil || m.Used() == 0 { 
    129         30ms       50ms           		if err := mapKeyError(typ, key); err != nil {                                                               if !t.HashMightPanic() {                                     map.go:825
                                                                  return mt.Flags&SwissMapHashMightPanic != 0              map_swiss.go:57
                                     ⋮
                                     ⋮
                                                              return mapKeyError2(t.Key, p)                                map.go:828

    130            .          .           			panic(err) // see issue 23734 
    131            .          .           		} 
    132            .          .           		return unsafe.Pointer(&zeroVal[0]), false 
    133            .          .           	} 
    134            .          .            
    135            .          .           	if m.writing != 0 { 
    136            .          .           		fatal("concurrent map read and map write") 
    137            .          .           	} 
    138            .          .            
    139            .       20ms           	hash := typ.Hasher(key, m.seed) 
    140            .          .            
    141            .          .           	if m.dirLen == 0 { 
    142            .       50ms           		_, elem, ok := m.getWithKeySmall(typ, hash, key) 
    143            .          .           		if !ok { 
    144            .          .           			return unsafe.Pointer(&zeroVal[0]), false 
    145            .          .           		} 
    146            .          .           		return elem, true 
    147            .          .           	} 

runtime.mapassign

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:       110ms      490ms (flat, cum)  1.03%
    183            .          .           		} 
    184            .          .           	} 
    185            .          .           } 
    186            .          .            
    187            .          .           //go:linkname runtime_mapassign runtime.mapassign 
    188         20ms       20ms           func runtime_mapassign(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { 
    189         10ms       10ms           	if m == nil { 
    190            .          .           		panic(errNilAssign) 
    191            .          .           	} 
    192            .          .           	if race.Enabled { 
    193            .          .           		callerpc := sys.GetCallerPC() 
    194            .          .           		pc := abi.FuncPCABIInternal(runtime_mapassign) 
    195            .          .           		race.WritePC(unsafe.Pointer(m), callerpc, pc) 
    196            .          .           		race.ReadObjectPC(typ.Key, key, callerpc, pc) 
    197            .          .           	} 
    198            .          .           	if msan.Enabled { 
    199            .          .           		msan.Read(key, typ.Key.Size_) 
    200            .          .           	} 
    201            .          .           	if asan.Enabled { 
    202            .          .           		asan.Read(key, typ.Key.Size_) 
    203            .          .           	} 
    204            .          .           	if m.writing != 0 { 
    205            .          .           		fatal("concurrent map writes") 
    206            .          .           	} 
    207            .          .            
    208         20ms       90ms           	hash := typ.Hasher(key, m.seed) 
    209            .          .            
    210            .          .           	// Set writing after calling Hasher, since Hasher may panic, in which 
    211            .          .           	// case we have not actually done a write. 
    212            .          .           	m.writing ^= 1 // toggle, see comment on writing 
    213            .          .            
    214            .          .           	if m.dirPtr == nil { 
    215            .      200ms           		m.growToSmall(typ) 
    216            .          .           	} 
    217            .          .            
    218            .          .           	if m.dirLen == 0 { 
    219            .          .           		if m.used < abi.SwissMapGroupSlots { 
    220            .      100ms           			elem := m.putSlotSmall(typ, hash, key) 
    221            .          .            
    222         10ms       10ms           			if m.writing == 0 { 
    223            .          .           				fatal("concurrent map writes") 
    224            .          .           			} 
    225            .          .           			m.writing ^= 1 
    226            .          .            
    227            .          .           			return elem 
    228            .          .           		} 
    229            .          .            
    230            .          .           		// Can't fit another entry, grow to full size map. 
    231            .       10ms           		m.growToTable(typ) 
    232            .          .           	} 
    233            .          .            
    234            .          .           	var slotElem unsafe.Pointer 
    235            .          .           outer: 
    236            .          .           	for { 
    237            .          .           		// Select table. 
    238            .          .           		idx := m.directoryIndex(hash) 
    239            .          .           		t := m.directoryAt(idx) 
    240            .          .            
    241            .          .           		seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    242            .          .            
    243            .          .           		// As we look for a match, keep track of the first deleted slot 
    244            .          .           		// we find, which we'll use to insert the new entry if 
    245            .          .           		// necessary. 
    246            .          .           		var firstDeletedGroup groupReference 
    247            .          .           		var firstDeletedSlot uintptr 
    248            .          .            
    249            .          .           		for ; ; seq = seq.next() { 
    250         30ms       30ms           			g := t.groups.group(typ, seq.offset)                                                                       offset := uintptr(i) * typ.GroupSize                 group.go:325

    251         10ms       10ms           			match := g.ctrls().matchH2(h2(hash))                                                                       return ctrlGroupMatchH2(g, h)                        group.go:154
                                                                          v := uint64(g) ^ (bitsetLSB * uint64(h))         group.go:170

    252            .          .            
    253            .          .           			// Look for an existing slot containing this key. 
    254         10ms       10ms           			for match != 0 { 
    255            .          .           				i := match.first() 
    256            .          .            
    257            .          .           				slotKey := g.key(typ, i) 
    258            .          .           				slotKeyOrig := slotKey 
    259            .          .           				if typ.IndirectKey() { 

runtime.mapassign

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:        60ms      110ms (flat, cum)  0.23%
    284            .          .            
    285            .          .           				var i uintptr 
    286            .          .            
    287            .          .           				// If we found a deleted slot along the way, we 
    288            .          .           				// can replace it without consuming growthLeft. 
    289         20ms       20ms           				if firstDeletedGroup.data != nil { 
    290            .          .           					g = firstDeletedGroup 
    291            .          .           					i = firstDeletedSlot 
    292            .          .           					t.growthLeft++ // will be decremented below to become a no-op. 
    293            .          .           				} else { 
    294            .          .           					// Otherwise, use the empty slot. 
    295         10ms       10ms           					i = match.first()                                                                                       return bitsetFirst(b)                group.go:50
                                                                                          return uintptr(sys.TrailingZeros64(uint64(b))) >> 3 group.go:58

    296            .          .           				} 
    297            .          .            
    298            .          .           				// If there is room left to grow, just insert the new entry. 
    299            .          .           				if t.growthLeft > 0 { 
    300            .          .           					slotKey := g.key(typ, i) 
    301            .          .           					slotKeyOrig := slotKey 
    302            .          .           					if typ.IndirectKey() { 
    303            .          .           						kmem := newobject(typ.Key) 
    304            .          .           						*(*unsafe.Pointer)(slotKey) = kmem 
    305            .          .           						slotKey = kmem 
    306            .          .           					} 
    307            .       40ms           					typedmemmove(typ.Key, slotKey, key) 
    308            .          .            
    309         10ms       10ms           					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) 
    310            .          .           					if typ.IndirectElem() { 
    311            .          .           						emem := newobject(typ.Elem) 
    312            .          .           						*(*unsafe.Pointer)(slotElem) = emem 
    313            .          .           						slotElem = emem 
    314            .          .           					} 
    315            .          .            
    316         10ms       10ms           					g.ctrls().set(i, ctrl(h2(hash)))                                                                                       return (*ctrlGroup)(g.data)          group.go:280

    317            .          .           					t.growthLeft-- 
    318            .          .           					t.used++ 
    319            .          .           					m.used++ 
    320            .          .            
    321            .          .           					t.checkInvariants(typ, m) 
    322            .          .           					break outer 
    323            .          .           				} 
    324            .          .            
    325            .       10ms           				t.rehash(typ, m) 
    326            .          .           				continue outer 
    327            .          .           			} 
    328            .          .            
    329            .          .           			// No empty slots in this group. Check for a deleted 
    330            .          .           			// slot, which we'll use if we don't find a match later 
    331            .          .           			// in the probe sequence. 
    332            .          .           			// 
    333            .          .           			// We only need to remember a single deleted slot. 
    334            .          .           			if firstDeletedGroup.data == nil { 
    335         10ms       10ms           				// Since we already checked for empty slots 
    336            .          .           				// above, matches here must be deleted slots. 
    337            .          .           				match = g.ctrls().matchEmptyOrDeleted() 
    338            .          .           				if match != 0 { 
    339            .          .           					firstDeletedGroup = g 
    340            .          .           					firstDeletedSlot = match.first() 

git.urbach.dev/cli/q/src/token.identifier

/home/user/q/src/token/identifier.go

  Total:       310ms      310ms (flat, cum)  0.65%
      1            .          .           package token 
      2            .          .            
      3            .          .           // identifier handles all tokens that qualify as an identifier. 
      4         20ms       20ms           func identifier(tokens List, buffer []byte, i Position) (List, Position) { 
      5            .          .           	position := i 
      6            .          .           	i++ 
      7            .          .            
      8        120ms      120ms           	for i < Position(len(buffer)) && isIdentifier(buffer[i]) {                                                       return isLetter(c) || isDigit(c) || c == '_'                         identifier.go:55
                                                          return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')          identifier.go:68
                                     ⋮
                                     ⋮
                                                      return isLetter(c) || isDigit(c) || c == '_'                         identifier.go:55                return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')          identifier.go:68                return c >= '0' && c <= '9'                                      digit.go:26

      9         50ms       50ms           		i++ 
     10            .          .           	} 
     11            .          .            
     12         10ms       10ms           	identifier := buffer[position:i] 
     13            .          .           	kind := Identifier 
     14            .          .            
     15         10ms       10ms           	switch string(identifier) { 
     16         10ms       10ms           	case "as": 
     17            .          .           		kind = Cast 
     18            .          .           	case "assert": 
     19            .          .           		kind = Assert 
     20         10ms       10ms           	case "const": 
     21            .          .           		kind = Const 
     22            .          .           	case "delete": 
     23            .          .           		kind = Delete 
     24            .          .           	case "if": 
     25            .          .           		kind = If 
     26         10ms       10ms           	case "else": 
     27            .          .           		kind = Else 
     28            .          .           	case "extern": 
     29            .          .           		kind = Extern 
     30            .          .           	case "global": 
     31            .          .           		kind = Global 
     32            .          .           	case "go": 
     33            .          .           		kind = Go 
     34            .          .           	case "import": 
     35            .          .           		kind = Import 
     36            .          .           	case "loop": 
     37            .          .           		kind = Loop 
     38            .          .           	case "new": 
     39            .          .           		kind = New 
     40            .          .           	case "return": 
     41            .          .           		kind = Return 
     42         10ms       10ms           	case "syscall": 
     43            .          .           		kind = Syscall 
     44            .          .           	case "switch": 
     45            .          .           		kind = Switch 
     46            .          .           	} 
     47            .          .            
     48         30ms       30ms           	tokens = append(tokens, Token{Kind: kind, Position: position, Length: Length(len(identifier))}) 
     49         30ms       30ms           	return tokens, i 
     50            .          .           } 
     51            .          .            

git.urbach.dev/cli/q/src/token.isIdentifier

/home/user/q/src/token/identifier.go

  Total:        90ms       90ms (flat, cum)  0.19%
     53            .          .           // a digit or an underscore. 
     54            .          .           func isIdentifier(c byte) bool { 
     55         90ms       90ms           	return isLetter(c) || isDigit(c) || c == '_'                                                       return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')              identifier.go:68
                                     ⋮
                                     ⋮
                                                      return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')              identifier.go:68            return c >= '0' && c <= '9'                                          digit.go:26

     56            .          .           } 
     57            .          .            
     58            .          .           // isIdentifierStart returns true if the character is the 

git.urbach.dev/cli/q/src/token.isIdentifierStart

/home/user/q/src/token/identifier.go

  Total:        10ms       10ms (flat, cum) 0.021%
     59            .          .           // start of an identifier which is either a letter or an 
     60            .          .           // underscore. 
     61            .          .           func isIdentifierStart(c byte) bool { 
     62         10ms       10ms           	return isLetter(c) || c == '_'                                                       return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')              identifier.go:68

     63            .          .           } 
     64            .          .            

git.urbach.dev/cli/q/src/token.isLetter

/home/user/q/src/token/identifier.go

  Total:        70ms       70ms (flat, cum)  0.15%
     66            .          .           // or uppercase letter in the English alphabet. 
     67            .          .           func isLetter(c byte) bool { 
     68         70ms       70ms           	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') 
     69            .          .           } 

slices.Index[go.shape.[]git.urbach.dev/cli/q/src/cpu.Register,go.shape.int8]

/usr/lib/go/src/slices/slices.go

  Total:       170ms      170ms (flat, cum)  0.36%
     92            .          .           } 
     93            .          .            
     94            .          .           // Index returns the index of the first occurrence of v in s, 
     95            .          .           // or -1 if not present. 
     96            .          .           func Index[S ~[]E, E comparable](s S, v E) int { 
     97         70ms       70ms           	for i := range s { 
     98        100ms      100ms           		if v == s[i] { 
     99            .          .           			return i 
    100            .          .           		} 
    101            .          .           	} 
    102            .          .           	return -1 
    103            .          .           } 

slices.IndexFunc[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:        40ms       80ms (flat, cum)  0.17%
    104            .          .            
    105            .          .           // IndexFunc returns the first index i satisfying f(s[i]), 
    106            .          .           // or -1 if none do. 
    107            .          .           func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { 
    108            .          .           	for i := range s { 
    109         40ms       80ms           		if f(s[i]) { 
    110            .          .           			return i 
    111            .          .           		} 
    112            .          .           	} 
    113            .          .           	return -1 

slices.Contains[go.shape.[]git.urbach.dev/cli/q/src/cpu.Register,go.shape.int8]

/usr/lib/go/src/slices/slices.go

  Total:       220ms      220ms (flat, cum)  0.46%
    114            .          .           } 
    115            .          .            
    116            .          .           // Contains reports whether v is present in s. 
    117            .          .           func Contains[S ~[]E, E comparable](s S, v E) bool { 
    118        220ms      220ms           	return Index(s, v) >= 0                                                       for i := range s {                                                   slices.go:97
                                     ⋮
                                     ⋮
                                     ⋮
                                                      if v == s[i] {                                                       slices.go:98
                                     ⋮
                                     ⋮
                                     ⋮
                                                      for i := range s {                                                   slices.go:97
                                     ⋮
                                     ⋮
                                                      if v == s[i] {                                                       slices.go:98            for i := range s {                                                   slices.go:97            if v == s[i] {                                                       slices.go:98
                                     ⋮
                                     ⋮
                                     ⋮
                                                      for i := range s {                                                   slices.go:97            if v == s[i] {                                                       slices.go:98
    119            .          .           } 
    120            .          .            
    121            .          .           // ContainsFunc reports whether at least one 
    122            .          .           // element e of s satisfies f(e). 
    123            .          .           func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { 

slices.Insert[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:        10ms       10ms (flat, cum) 0.021%
    139            .          .           	if m == 0 { 
    140            .          .           		return s 
    141            .          .           	} 
    142            .          .           	n := len(s) 
    143            .          .           	if i == n { 
    144         10ms       10ms           		return append(s, v...) 
    145            .          .           	} 
    146            .          .           	if n+m > cap(s) { 
    147            .          .           		// Use append rather than make so that we bump the size of 
    148            .          .           		// the slice up to the next storage class. 
    149            .          .           		// This is what Grow does but we don't call Grow because 

slices.Insert[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:           0       10ms (flat, cum) 0.021%
    172            .          .           	if !overlaps(v, s[i+m:]) { 
    173            .          .           		// Easy case - v does not overlap either the c or d regions. 
    174            .          .           		// (It might be in some of a or b, or elsewhere entirely.) 
    175            .          .           		// The data we copy up doesn't write to v at all, so just do it. 
    176            .          .            
    177            .       10ms           		copy(s[i+m:], s[i:]) 
    178            .          .            
    179            .          .           		// Now we have 
    180            .          .           		// s: aaaaaaaabbbbbbbbcccccccc 
    181            .          .           		//            ^   ^       ^   ^ 
    182            .          .           		//            i  i+m      n  n+m 

slices.DeleteFunc[go.shape.[]*git.urbach.dev/cli/q/src/ssa.Block,go.shape.*uint8]

/usr/lib/go/src/slices/slices.go

  Total:        30ms       30ms (flat, cum) 0.063%
    234            .          .            
    235            .          .           // DeleteFunc removes any elements from s for which del returns true, 
    236            .          .           // returning the modified slice. 
    237            .          .           // DeleteFunc zeroes the elements between the new length and the original length. 
    238            .          .           // If the result is empty, it has the same nilness as s. 
    239         30ms       30ms           func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { 

slices.DeleteFunc[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:        60ms      110ms (flat, cum)  0.23%
    240         40ms       80ms           	i := IndexFunc(s, del)                                                       if f(s[i]) {                                                         slices.go:109
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    241            .          .           	if i == -1 { 
    242         10ms       10ms           		return s 
    243            .          .           	} 
    244            .          .           	// Don't start copying elements until we find one to delete. 
    245            .          .           	for j := i + 1; j < len(s); j++ { 
    246            .          .           		if v := s[j]; !del(v) { 
    247         10ms       10ms           			s[i] = v 
    248            .          .           			i++ 
    249            .          .           		} 
    250            .          .           	} 
    251            .       10ms           	clear(s[i:]) // zero/nil out the obsolete elements, for GC 
    252            .          .           	return s[:i] 
    253            .          .           } 
    254            .          .            
    255            .          .           // Replace replaces the elements s[i:j] by the given v, and returns the 
    256            .          .           // modified slice. 

runtime.makechan

/usr/lib/go/src/runtime/chan.go

  Total:        20ms       20ms (flat, cum) 0.042%
     74            .          .            
     75            .          .           func makechan(t *chantype, size int) *hchan { 
     76            .          .           	elem := t.Elem 
     77            .          .            
     78            .          .           	// compiler checks this but be safe. 
     79         20ms       20ms           	if elem.Size_ >= 1<<16 { 
     80            .          .           		throw("makechan: invalid channel element type") 
     81            .          .           	} 
     82            .          .           	if hchanSize%maxAlign != 0 || elem.Align_ > maxAlign { 
     83            .          .           		throw("makechan: bad alignment") 
     84            .          .           	} 

runtime.makechan

/usr/lib/go/src/runtime/chan.go

  Total:           0       70ms (flat, cum)  0.15%
    104            .          .           		// Allocate hchan and buf in one call. 
    105            .          .           		c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) 
    106            .          .           		c.buf = add(unsafe.Pointer(c), hchanSize) 
    107            .          .           	default: 
    108            .          .           		// Elements contain pointers. 
    109            .       30ms           		c = new(hchan) 
    110            .       40ms           		c.buf = mallocgc(mem, elem, true) 
    111            .          .           	} 
    112            .          .            
    113            .          .           	c.elemsize = uint16(elem.Size_) 
    114            .          .           	c.elemtype = elem 
    115            .          .           	c.dataqsiz = uint(size) 

runtime.chansend1

/usr/lib/go/src/runtime/chan.go

  Total:        20ms      1.03s (flat, cum)  2.16%
    155            .          .           } 
    156            .          .            
    157            .          .           // entry point for c <- x from compiled code. 
    158            .          .           // 
    159            .          .           //go:nosplit 
    160         20ms       20ms           func chansend1(c *hchan, elem unsafe.Pointer) { 
    161            .      1.01s           	chansend(c, elem, true, sys.GetCallerPC()) 
    162            .          .           } 
    163            .          .            
    164            .          .           /* 
    165            .          .            * generic single channel send/recv 
    166            .          .            * If block is not nil, 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:       100ms      100ms (flat, cum)  0.21%
    188            .          .            
    189            .          .           	if raceenabled { 
    190            .          .           		racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend)) 
    191            .          .           	} 
    192            .          .            
    193        100ms      100ms           	if c.bubble != nil && getg().bubble != c.bubble { 
    194            .          .           		fatal("send on synctest channel from outside bubble") 
    195            .          .           	} 
    196            .          .            
    197            .          .           	// Fast path: check for failed non-blocking operation without acquiring the lock. 
    198            .          .           	// 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:        50ms      930ms (flat, cum)  1.95%
    217            .          .           	var t0 int64 
    218            .          .           	if blockprofilerate > 0 { 
    219            .          .           		t0 = cputicks() 
    220            .          .           	} 
    221            .          .            
    222            .      340ms           	lock(&c.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

    223            .          .            
    224         10ms       10ms           	if c.closed != 0 { 
    225            .          .           		unlock(&c.lock) 
    226            .          .           		panic(plainError("send on closed channel")) 
    227            .          .           	} 
    228            .          .            
    229         30ms       30ms           	if sg := c.recvq.dequeue(); sg != nil {                                                       if y == nil {                                                        chan.go:893            if !sgp.g.selectDone.CompareAndSwap(0, 1) {                          chan.go:911
                                                          return Cas(&u.value, old, new)                                   types.go:236
    230            .          .           		// Found a waiting receiver. We pass the value we want to send 
    231            .          .           		// directly to the receiver, bypassing the channel buffer (if any). 
    232            .      280ms           		send(c, sg, ep, func() { unlock(&c.lock) }, 3)                     unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

    233            .          .           		return true 
    234            .          .           	} 
    235            .          .            
    236            .          .           	if c.qcount < c.dataqsiz { 
    237            .          .           		// Space is available in the channel buffer. Enqueue the element to send. 
    238            .          .           		qp := chanbuf(c, c.sendx) 
    239            .          .           		if raceenabled { 
    240            .          .           			racenotify(c, c.sendx, nil) 
    241            .          .           		} 
    242            .          .           		typedmemmove(c.elemtype, qp, ep) 
    243            .          .           		c.sendx++ 
    244            .          .           		if c.sendx == c.dataqsiz { 
    245            .          .           			c.sendx = 0 
    246            .          .           		} 
    247            .          .           		c.qcount++ 
    248            .      250ms           		unlock(&c.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

    249            .          .           		return true 
    250            .          .           	} 
    251            .          .            
    252            .          .           	if !block { 
    253            .          .           		unlock(&c.lock) 
    254            .          .           		return false 
    255            .          .           	} 
    256            .          .            
    257            .          .           	// Block on the channel. Some receiver will complete our operation for us. 
    258            .          .           	gp := getg() 
    259            .       10ms           	mysg := acquireSudog() 
    260            .          .           	mysg.releasetime = 0 
    261         10ms       10ms           	if t0 != 0 { 
    262            .          .           		mysg.releasetime = -1 
    263            .          .           	} 
    264            .          .           	// No stack splits between assigning elem and enqueuing mysg 
    265            .          .           	// on gp.waiting where copystack can find it. 
    266            .          .           	mysg.elem = ep 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:        10ms       10ms (flat, cum) 0.021%
    286            .          .           	// stack object, but sudogs aren't considered as roots of the 
    287            .          .           	// stack tracer. 
    288            .          .           	KeepAlive(ep) 
    289            .          .            
    290            .          .           	// someone woke us up. 
    291         10ms       10ms           	if mysg != gp.waiting { 
    292            .          .           		throw("G waiting list is corrupted") 
    293            .          .           	} 
    294            .          .           	gp.waiting = nil 
    295            .          .           	gp.activeStackChans = false 
    296            .          .           	closed := !mysg.success 

runtime.send

/usr/lib/go/src/runtime/chan.go

  Total:           0      250ms (flat, cum)  0.52%
    335            .          .           			} 
    336            .          .           			c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 
    337            .          .           		} 
    338            .          .           	} 
    339            .          .           	if sg.elem != nil { 
    340            .       20ms           		sendDirect(c.elemtype, sg, ep) 
    341            .          .           		sg.elem = nil 
    342            .          .           	} 
    343            .          .           	gp := sg.g 
    344            .       30ms           	unlockf() 
    345            .          .           	gp.param = unsafe.Pointer(sg) 
    346            .          .           	sg.success = true 
    347            .          .           	if sg.releasetime != 0 { 
    348            .          .           		sg.releasetime = cputicks() 
    349            .          .           	} 
    350            .      200ms           	goready(gp, skip+1)                                                       systemstack(func() {                                                 proc.go:480

    351            .          .           } 
    352            .          .            
    353            .          .           // timerchandrain removes all elements in channel c's buffer. 
    354            .          .           // It reports whether any elements were removed. 
    355            .          .           // Because it is only intended for timers, it does not 

runtime.sendDirect

/usr/lib/go/src/runtime/chan.go

  Total:           0       20ms (flat, cum) 0.042%
    394            .          .            
    395            .          .           	// Once we read sg.elem out of sg, it will no longer 
    396            .          .           	// be updated if the destination's stack gets copied (shrunk). 
    397            .          .           	// So make sure that no preemption points can happen between read & use. 
    398            .          .           	dst := sg.elem 
    399            .       20ms           	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) 
    400            .          .           	// No need for cgo write barrier checks because dst is always 
    401            .          .           	// Go memory. 
    402            .          .           	memmove(dst, src, t.Size_) 
    403            .          .           } 
    404            .          .            

runtime.closechan

/usr/lib/go/src/runtime/chan.go

  Total:        50ms       60ms (flat, cum)  0.13%
    409            .          .           	src := sg.elem 
    410            .          .           	typeBitsBulkBarrier(t, uintptr(dst), uintptr(src), t.Size_) 
    411            .          .           	memmove(dst, src, t.Size_) 
    412            .          .           } 
    413            .          .            
    414         10ms       10ms           func closechan(c *hchan) { 
    415            .          .           	if c == nil { 
    416            .          .           		panic(plainError("close of nil channel")) 
    417            .          .           	} 
    418         10ms       10ms           	if c.bubble != nil && getg().bubble != c.bubble { 
    419            .          .           		fatal("close of synctest channel from outside bubble") 
    420            .          .           	} 
    421            .          .            
    422            .       10ms           	lock(&c.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

    423            .          .           	if c.closed != 0 { 
    424            .          .           		unlock(&c.lock) 
    425            .          .           		panic(plainError("close of closed channel")) 
    426            .          .           	} 
    427            .          .            
    428            .          .           	if raceenabled { 
    429            .          .           		callerpc := sys.GetCallerPC() 
    430            .          .           		racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan)) 
    431            .          .           		racerelease(c.raceaddr()) 
    432            .          .           	} 
    433            .          .            
    434            .          .           	c.closed = 1 
    435            .          .            
    436            .          .           	var glist gList 
    437            .          .            
    438            .          .           	// release all readers 
    439            .          .           	for { 
    440         30ms       30ms           		sg := c.recvq.dequeue()                                                               if !sgp.g.selectDone.CompareAndSwap(0, 1) {                  chan.go:911
                                                                  return Cas(&u.value, old, new)                           types.go:236
    441            .          .           		if sg == nil { 
    442            .          .           			break 
    443            .          .           		} 
    444            .          .           		if sg.elem != nil { 
    445            .          .           			typedmemclr(c.elemtype, sg.elem) 

runtime.recv

/usr/lib/go/src/runtime/chan.go

  Total:           0       30ms (flat, cum) 0.063%
    725            .          .           		// copy data from queue to receiver 
    726            .          .           		if ep != nil { 
    727            .          .           			typedmemmove(c.elemtype, ep, qp) 
    728            .          .           		} 
    729            .          .           		// copy data from sender to queue 
    730            .       10ms           		typedmemmove(c.elemtype, qp, sg.elem) 
    731            .          .           		c.recvx++ 
    732            .          .           		if c.recvx == c.dataqsiz { 
    733            .          .           			c.recvx = 0 
    734            .          .           		} 
    735            .          .           		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 
    736            .          .           	} 
    737            .          .           	sg.elem = nil 
    738            .          .           	gp := sg.g 
    739            .          .           	unlockf() 
    740            .          .           	gp.param = unsafe.Pointer(sg) 
    741            .          .           	sg.success = true 
    742            .          .           	if sg.releasetime != 0 { 
    743            .          .           		sg.releasetime = cputicks() 
    744            .          .           	} 
    745            .       20ms           	goready(gp, skip+1)                                                       systemstack(func() {                                                 proc.go:480

    746            .          .           } 

runtime.chanparkcommit

/usr/lib/go/src/runtime/chan.go

  Total:        10ms       30ms (flat, cum) 0.063%
    747            .          .            
    748         10ms       10ms           func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool { 
    749            .          .           	// There are unlocked sudogs that point into gp's stack. Stack 
    750            .          .           	// copying must lock the channels of those sudogs. 
    751            .          .           	// Set activeStackChans here instead of before we try parking 
    752            .          .           	// because we could self-deadlock in stack growth on the 
    753            .          .           	// channel lock. 
    754            .          .           	gp.activeStackChans = true 
    755            .          .           	// Mark that it's safe for stack shrinking to occur now, 
    756            .          .           	// because any thread acquiring this G's stack for shrinking 
    757            .          .           	// is guaranteed to observe activeStackChans after this store. 
    758            .          .           	gp.parkingOnChan.Store(false) 
    759            .          .           	// Make sure we unlock after setting activeStackChans and 
    760            .          .           	// unsetting parkingOnChan. The moment we unlock chanLock 
    761            .          .           	// we risk gp getting readied by a channel operation and 
    762            .          .           	// so gp could continue running before everything before 
    763            .          .           	// the unlock is visible (even to gp itself). 
    764            .       20ms           	unlock((*mutex)(chanLock))                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

    765            .          .           	return true 
    766            .          .           } 
    767            .          .            
    768            .          .           // compiler implements 
    769            .          .           // 

runtime.(*waitq).enqueue

/usr/lib/go/src/runtime/chan.go

  Total:        20ms       20ms (flat, cum) 0.042%
    868            .          .           func reflect_chanclose(c *hchan) { 
    869            .          .           	closechan(c) 
    870            .          .           } 
    871            .          .            
    872            .          .           func (q *waitq) enqueue(sgp *sudog) { 
    873         10ms       10ms           	sgp.next = nil 
    874            .          .           	x := q.last 
    875            .          .           	if x == nil { 
    876         10ms       10ms           		sgp.prev = nil 
    877            .          .           		q.first = sgp 
    878            .          .           		q.last = sgp 
    879            .          .           		return 
    880            .          .           	} 
    881            .          .           	sgp.prev = x 

runtime.(*waitq).dequeue

/usr/lib/go/src/runtime/chan.go

  Total:        90ms       90ms (flat, cum)  0.19%
    884            .          .           } 
    885            .          .            
    886            .          .           func (q *waitq) dequeue() *sudog { 
    887            .          .           	for { 
    888            .          .           		sgp := q.first 
    889         10ms       10ms           		if sgp == nil { 
    890            .          .           			return nil 
    891            .          .           		} 
    892            .          .           		y := sgp.next 
    893         10ms       10ms           		if y == nil { 
    894         10ms       10ms           			q.first = nil 
    895            .          .           			q.last = nil 
    896            .          .           		} else { 
    897            .          .           			y.prev = nil 
    898         10ms       10ms           			q.first = y 
    899            .          .           			sgp.next = nil // mark as removed (see dequeueSudoG) 
    900            .          .           		} 
    901            .          .            
    902            .          .           		// if a goroutine was put on this queue because of a 
    903            .          .           		// select, there is a small window between the goroutine 
    904            .          .           		// being woken up by a different case and it grabbing the 
    905            .          .           		// channel locks. Once it has the lock 
    906            .          .           		// it removes itself from the queue, so we won't see it after that. 
    907            .          .           		// We use a flag in the G struct to tell us when someone 
    908            .          .           		// else has won the race to signal this goroutine but the goroutine 
    909            .          .           		// hasn't removed itself from the queue yet. 
    910            .          .           		if sgp.isSelect { 
    911         50ms       50ms           			if !sgp.g.selectDone.CompareAndSwap(0, 1) {                                                                       return Cas(&u.value, old, new)                       types.go:236
                                     ⋮
                                     ⋮
                                     ⋮

    912            .          .           				// We lost the race to wake this goroutine. 
    913            .          .           				continue 
    914            .          .           			} 
    915            .          .           		} 
    916            .          .            

runtime.(*spanSet).push

/usr/lib/go/src/runtime/mspanset.go

  Total:       100ms      520ms (flat, cum)  1.09%
     78            .          .           	spans [spanSetBlockEntries]atomicMSpanPointer 
     79            .          .           } 
     80            .          .            
     81            .          .           // push adds span s to buffer b. push is safe to call concurrently 
     82            .          .           // with other push and pop operations. 
     83         10ms       40ms           func (b *spanSet) push(s *mspan) { 
     84            .          .           	// Obtain our slot. 
     85         30ms      420ms           	cursor := uintptr(b.index.incTail().tail() - 1) 
     86         60ms       60ms           	top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries 
     87            .          .            
     88            .          .           	// Do we need to add a block? 
     89            .          .           	spineLen := b.spineLen.Load() 
     90            .          .           	var block *spanSetBlock 
     91            .          .           retry: 

runtime.(*spanSet).push

/usr/lib/go/src/runtime/mspanset.go

  Total:       110ms      110ms (flat, cum)  0.23%
    140            .          .           		unlock(&b.spineLock) 
    141            .          .           	} 
    142            .          .            
    143            .          .           	// We have a block. Insert the span atomically, since there may be 
    144            .          .           	// concurrent readers via the block API. 
    145        110ms      110ms           	block.spans[bottom].StoreNoWB(s)                                                       p.p.StoreNoWB(unsafe.Pointer(s))                                     mspanset.go:412
                                                          StorepNoWB(unsafe.Pointer(&u.value), value)                      types.go:479
    146            .          .           } 
    147            .          .            
    148            .          .           // pop removes and returns a span from buffer b, or nil if b is empty. 
    149            .          .           // pop is safe to call concurrently with other pop and push operations. 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    151            .          .           	var head, tail uint32 
    152            .          .           claimLoop: 
    153            .          .           	for { 
    154            .          .           		headtail := b.index.load() 
    155         10ms       10ms           		head, tail = headtail.split()                                                               return h.head(), h.tail()                                    mspanset.go:356
                                                                  return uint32(h >> 32)                                   mspanset.go:346

    156            .          .           		if head >= tail { 
    157            .          .           			// The buf is empty, as far as we can tell. 
    158            .          .           			return nil 
    159            .          .           		} 
    160            .          .           		// Check if the head position we want to claim is actually 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        20ms       20ms (flat, cum) 0.042%
    172            .          .           		// Try to claim the current head by CASing in an updated head. 
    173            .          .           		// This may fail transiently due to a push which modifies the 
    174            .          .           		// tail, so keep trying while the head isn't changing. 
    175            .          .           		want := head 
    176            .          .           		for want == head { 
    177         20ms       20ms           			if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {                                                                       return h.u.CompareAndSwap(uint64(old), uint64(new))  mspanset.go:371
                                                                          return Cas64(&u.value, old, new)                 types.go:325

    178            .          .           				break claimLoop 
    179            .          .           			} 
    180            .          .           			headtail = b.index.load() 
    181            .          .           			head, tail = headtail.split() 
    182            .          .           		} 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    194            .          .           	// Given that the spine length is correct, we know we will never 
    195            .          .           	// see a nil block here, since the length is always updated after 
    196            .          .           	// the block is set. 
    197            .          .           	block := blockp.Load() 
    198            .          .           	s := block.spans[bottom].Load() 
    199         10ms       10ms           	for s == nil { 
    200            .          .           		// We raced with the span actually being set, but given that we 
    201            .          .           		// know a block for this span exists, the race window here is 
    202            .          .           		// extremely small. Try again. 
    203            .          .           		s = block.spans[bottom].Load() 
    204            .          .           	} 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        30ms       30ms (flat, cum) 0.063%
    217            .          .           	// popping its corresponding mspan) by the time we get here. Because 
    218            .          .           	// we're the last popper, we also don't have to worry about concurrent 
    219            .          .           	// pushers (there can't be any). Note that we may not be the popper 
    220            .          .           	// which claimed the last slot in the block, we're just the last one 
    221            .          .           	// to finish popping. 
    222         30ms       30ms           	if block.popped.Add(1) == spanSetBlockEntries {                                                       return Xadd(&u.value, delta)                                         types.go:291

    223            .          .           		// Clear the block's pointer. 
    224            .          .           		blockp.StoreNoWB(nil) 
    225            .          .            
    226            .          .           		// Return the block to the block pool. 
    227            .          .           		spanSetBlockPool.free(block) 

runtime.headTailIndex.head

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    341            .          .           	return headTailIndex(uint64(head)<<32 | uint64(tail)) 
    342            .          .           } 
    343            .          .            
    344            .          .           // head returns the head of a headTailIndex value. 
    345            .          .           func (h headTailIndex) head() uint32 { 
    346         10ms       10ms           	return uint32(h >> 32) 
    347            .          .           } 
    348            .          .            
    349            .          .           // tail returns the tail of a headTailIndex value. 
    350            .          .           func (h headTailIndex) tail() uint32 { 

runtime.headTailIndex.split

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    352            .          .           } 
    353            .          .            
    354            .          .           // split splits the headTailIndex value into its parts. 
    355            .          .           func (h headTailIndex) split() (head uint32, tail uint32) { 
    356         10ms       10ms           	return h.head(), h.tail()                                                       return uint32(h >> 32)                                               mspanset.go:346

    357            .          .           } 
    358            .          .            
    359            .          .           // atomicHeadTailIndex is an atomically-accessed headTailIndex. 
    360            .          .           type atomicHeadTailIndex struct { 
    361            .          .           	u atomic.Uint64 

runtime.(*atomicHeadTailIndex).cas

/usr/lib/go/src/runtime/mspanset.go

  Total:        20ms       20ms (flat, cum) 0.042%
    366            .          .           	return headTailIndex(h.u.Load()) 
    367            .          .           } 
    368            .          .            
    369            .          .           // cas atomically compares-and-swaps a headTailIndex value. 
    370            .          .           func (h *atomicHeadTailIndex) cas(old, new headTailIndex) bool { 
    371         20ms       20ms           	return h.u.CompareAndSwap(uint64(old), uint64(new))                                                       return Cas64(&u.value, old, new)                                     types.go:325

    372            .          .           } 
    373            .          .            
    374            .          .           // incHead atomically increments the head of a headTailIndex. 
    375            .          .           func (h *atomicHeadTailIndex) incHead() headTailIndex { 
    376            .          .           	return headTailIndex(h.u.Add(1 << 32)) 

runtime.(*atomicHeadTailIndex).incTail

/usr/lib/go/src/runtime/mspanset.go

  Total:        30ms      390ms (flat, cum)  0.82%
    380            .          .           func (h *atomicHeadTailIndex) decHead() headTailIndex { 
    381            .          .           	return headTailIndex(h.u.Add(-(1 << 32))) 
    382            .          .           } 
    383            .          .            
    384            .          .           // incTail atomically increments the tail of a headTailIndex. 
    385            .      360ms           func (h *atomicHeadTailIndex) incTail() headTailIndex { 
    386         30ms       30ms           	ht := headTailIndex(h.u.Add(1))                                                       return Xadd64(&u.value, delta)                                       types.go:344

    387            .          .           	// Check for overflow. 
    388            .          .           	if ht.tail() == 0 { 
    389            .          .           		print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n") 
    390            .          .           		throw("headTailIndex overflow") 
    391            .          .           	} 

runtime.(*atomicMSpanPointer).StoreNoWB

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    407            .          .           	return (*mspan)(p.p.Load()) 
    408            .          .           } 
    409            .          .            
    410            .          .           // Store stores an *mspan. 
    411            .          .           func (p *atomicMSpanPointer) StoreNoWB(s *mspan) { 
    412         10ms       10ms           	p.p.StoreNoWB(unsafe.Pointer(s))                                                       StorepNoWB(unsafe.Pointer(&u.value), value)                          types.go:479

    413            .          .           } 

git.urbach.dev/cli/q/src/codegen..markAlive.Backward[go.shape.[]*git.urbach.dev/cli/q/src/codegen.Step,go.shape.*uint8].func1

/usr/lib/go/src/slices/iter.go

  Total:       310ms      1.99s (flat, cum)  4.17%
     23            .          .            
     24            .          .           // Backward returns an iterator over index-value pairs in the slice, 
     25            .          .           // traversing it backward with descending indices. 
     26            .          .           func Backward[Slice ~[]E, E any](s Slice) iter.Seq2[int, E] { 
     27            .          .           	return func(yield func(int, E) bool) { 
     28         10ms       10ms           		for i := len(s) - 1; i >= 0; i-- { 
     29        300ms      1.98s           			if !yield(i, s[i]) {                                                                       if live.Value == current.Value {                     markAlive.go:38                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118
                                                                              for i := range s {                           slices.go:97                            f.assignFreeRegister(step)                           CompileToAssembly.go:28
                                                                          step.Register = f.findFreeRegister(step)         assignFreeRegister.go:6                            f.hintABI(step)                                      CompileToAssembly.go:22
                                     ⋮
                                     ⋮
                                                                      switch existing.(type) {                             Block.go:197                            if step.Register == -1 && f.needsRegister(step) {    CompileToAssembly.go:27                            if existing.IsPure() && instr.Equals(existing) {     Block.go:191
                                     ⋮
                                     ⋮
                                                                      f.createLiveRanges(step)                             CompileToAssembly.go:23                            for _, current := range slices.Backward(steps) {     markAlive.go:31                            if step.Register == -1 && f.needsRegister(step) {    CompileToAssembly.go:27                            if slices.Contains(current.Live, live) {             markAlive.go:32                            current.Live = append(current.Live, live)            markAlive.go:36
                                     ⋮
                                     ⋮
                                                                      if existing.IsPure() && instr.Equals(existing) {     Block.go:191                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118
                                                                              for i := range s {                           slices.go:97                            f.hintDestination(step)                              CompileToAssembly.go:31                            if live.Value == current.Value {                     markAlive.go:38                            switch existing.(type) {                             Block.go:197
                                     ⋮
                                     ⋮
                                                                      f.createLiveRanges(step)                             CompileToAssembly.go:23                            current.Live = append(current.Live, live)            markAlive.go:36                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118
                                                                              if v == s[i] {                               slices.go:98
                                     ⋮
                                     ⋮
                                                                      current.Live = append(current.Live, live)            markAlive.go:36                            if step.Register == -1 && f.needsRegister(step) {    CompileToAssembly.go:27                            current.Live = append(current.Live, live)            markAlive.go:36                            f.hintDestination(step)                              CompileToAssembly.go:31                            if slices.Contains(current.Live, live) {             markAlive.go:32                                return Index(s, v) >= 0                          slices.go:118
                                                                              if v == s[i] {                               slices.go:98

     30            .          .           				return 
     31            .          .           			} 
     32            .          .           		} 
     33            .          .           	} 
     34            .          .           } 

git.urbach.dev/cli/q/src/core.(*Function).AddInput

/home/user/q/src/core/Function.go

  Total:           0      190ms (flat, cum)   0.4%
     29            .          .           	codegen.Function 
     30            .          .           } 
     31            .          .            
     32            .          .           // AddInput adds an input parameter. 
     33            .          .           func (f *Function) AddInput(tokens token.List, source token.Source) { 
     34            .      190ms           	f.Input = append(f.Input, &ssa.Parameter{ 
     35            .          .           		Tokens: tokens, 
     36            .          .           		Source: source, 
     37            .          .           	}) 

git.urbach.dev/cli/q/src/core.(*Function).AddOutput

/home/user/q/src/core/Function.go

  Total:        10ms       70ms (flat, cum)  0.15%
     39            .          .            
     40            .          .           // AddOutput adds an output parameter. 
     41            .          .           func (f *Function) AddOutput(tokens token.List, source token.Source) { 
     42         10ms       70ms           	f.Output = append(f.Output, &ssa.Parameter{ 
     43            .          .           		Tokens: tokens, 
     44            .          .           		Source: source, 
     45            .          .           	}) 
     46            .          .           } 
     47            .          .            

git.urbach.dev/cli/q/src/core.(*Function).Body

/home/user/q/src/core/Function.go

  Total:        10ms       10ms (flat, cum) 0.021%
     51            .          .           	f.FullName += suffix 
     52            .          .           } 
     53            .          .            
     54            .          .           // Body returns the function body. 
     55            .          .           func (f *Function) Body() token.List { 
     56         10ms       10ms           	return f.File.Tokens[f.body.Start():f.body.End()] 
     57            .          .           } 
     58            .          .            
     59            .          .           // IsExtern returns true if the function has no body. 
     60            .          .           func (f *Function) IsExtern() bool { 
     61            .          .           	return f.body.End() == 0 

git.urbach.dev/cli/q/src/core.(*Function).Package

/home/user/q/src/core/Function.go

  Total:        20ms       20ms (flat, cum) 0.042%
     71            .          .           	return f.name 
     72            .          .           } 
     73            .          .            
     74            .          .           // Package returns the package name. 
     75            .          .           func (f *Function) Package() string { 
     76         20ms       20ms           	return f.pkg 
     77            .          .           } 
     78            .          .            
     79            .          .           // SetBody sets the token range for the function body. 
     80            .          .           func (f *Function) SetBody(start int, end int) { 
     81            .          .           	f.body = token.NewSource(token.Position(start), token.Position(end)) 

git.urbach.dev/cli/q/src/core.(*Function).Variants

/home/user/q/src/core/Function.go

  Total:       260ms      980ms (flat, cum)  2.05%
     87            .          .           } 
     88            .          .            
     89            .          .           // Variants returns all function overloads. 
     90            .          .           func (f *Function) Variants(yield func(*Function) bool) { 
     91            .          .           	for { 
     92        260ms      980ms           		if !yield(f) {                                                               if !yield(variant) {                                         Environment.go:68
                                                                  suffix.WriteString(input.Typ.Name())                     parseParameters.go:55
                                                                      b.buf = append(b.buf, s...)                          builder.go:114                        typ, err := env.TypeFromTokens(typeTokens, f.File)       parseParameters.go:40                        if f.Err != nil {                                        Compile.go:53                        f.Type = &types.Function{                                parseParameters.go:14                        Output: make([]types.Type, len(f.Output)),               parseParameters.go:16                        f.Type.Input[i] = input.Typ                              parseParameters.go:28                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        suffix.WriteString(input.Typ.Name())                     parseParameters.go:55                        for i, input := range f.Input {                          parseParameters.go:19                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        suffix.WriteString(input.Typ.Name())                     parseParameters.go:55
                                                                      b.buf = append(b.buf, s...)                          builder.go:114                        if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                        input.Typ = typ                                          parseParameters.go:27                        typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15
                                     ⋮
                                     ⋮
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20

     93            .          .           			return 
     94            .          .           		} 
     95            .          .            
     96            .          .           		f = f.Next 
     97            .          .            

git.urbach.dev/cli/q/src/expression.Parse

/home/user/q/src/expression/Parse.go

  Total:       300ms         4s (flat, cum)  8.37%
      3            .          .           import ( 
      4            .          .           	"git.urbach.dev/cli/q/src/token" 
      5            .          .           ) 
      6            .          .            
      7            .          .           // Parse generates an expression tree from tokens. 
      8         50ms      160ms           func Parse(tokens token.List) *Expression { 
      9            .          .           	var ( 
     10            .          .           		cursor *Expression 
     11            .          .           		root   *Expression 
     12            .          .           		i      uint 
     13            .          .           	) 
     14            .          .            
     15            .          .           loop: 
     16            .          .           	for i < uint(len(tokens)) { 
     17         10ms       10ms           		t := tokens[i] 
     18            .          .            
     19            .          .           		switch t.Kind { 
     20         30ms       30ms           		case token.GroupStart, token.ArrayStart, token.BlockStart: 
     21            .          .           			i++ 
     22            .          .           			groupLevel := 1 
     23            .          .           			groupPosition := i 
     24            .          .            
     25            .          .           			for i < uint(len(tokens)) { 
     26         20ms       20ms           				t = tokens[i] 
     27            .          .            
     28         10ms       10ms           				switch t.Kind { 
     29         20ms       20ms           				case token.GroupStart, token.ArrayStart, token.BlockStart: 
     30            .          .           					groupLevel++ 
     31         10ms       10ms           				case token.GroupEnd, token.ArrayEnd, token.BlockEnd: 
     32            .          .           					groupLevel-- 
     33            .          .            
     34            .          .           					if groupLevel == 0 { 
     35         10ms      1.23s           						root, cursor = handleGroupEnd(tokens, root, cursor, groupPosition, i, t) 
     36            .          .           						i++ 
     37            .          .           						continue loop 
     38            .          .           					} 
     39            .          .           				} 
     40            .          .            
     41            .          .           				i++ 
     42            .          .           			} 
     43            .          .            
     44            .          .           			break loop 
     45            .          .           		} 
     46            .          .            
     47            .          .           		switch { 
     48         40ms       40ms           		case cursor != nil && cursor.Token.Kind == token.Cast && len(cursor.Children) < 2: 
     49            .          .           			cursor.AddChild(&newTypeExpression(tokens[i:]).Expression) 
     50            .          .           			return root 
     51            .          .            
     52         10ms       10ms           		case t.Kind.IsLiteral():                                                               return k == Identifier || k == Number || k == String || k == Rune || k.IsBuiltin() Kind.go:125

     53         10ms      2.08s           			root, cursor = handleLiteral(root, cursor, t) 
     54            .          .            
     55            .          .           		case !t.Kind.IsOperator(): 
     56            .          .           			// do nothing 
     57            .          .            
     58            .          .           		case cursor == nil: 
     59            .          .           			cursor = newLeaf(t) 
     60            .          .           			cursor.precedence = precedence(t.Kind) 
     61            .          .           			root = cursor 
     62            .          .            
     63            .          .           		default: 
     64            .      150ms           			node := newLeaf(t)                                                                       return &Expression{Token: t}                         newLeaf.go:7

     65            .          .           			node.precedence = precedence(t.Kind) 
     66            .          .            
     67            .          .           			if cursor.Token.Kind.IsOperator() { 
     68            .      100ms           				root = handleOperator(root, cursor, node) 
     69            .          .           			} else { 
     70         50ms      100ms           				node.AddChild(cursor)                                                                               expr.Children = make([]*Expression, 0, 2)    Expression.go:21                                    if expr.Children == nil {                    Expression.go:20

     71            .          .           				root = node 
     72            .          .           			} 
     73            .          .            
     74            .          .           			cursor = node 
     75            .          .           		} 
     76            .          .            
     77         10ms       10ms           		i++ 
     78            .          .           	} 
     79            .          .            
     80            .          .           	if root == nil { 
     81            .          .           		root = New() 
     82            .          .           	} 
     83            .          .            
     84         20ms       20ms           	return root 
     85            .          .           } 

git.urbach.dev/cli/q/src/expression.(*Expression).AddChild

/home/user/q/src/expression/Expression.go

  Total:       130ms      340ms (flat, cum)  0.71%
     15            .          .           	precedence int8 
     16            .          .           } 
     17            .          .            
     18            .          .           // AddChild adds a child to the expression. 
     19            .          .           func (expr *Expression) AddChild(child *Expression) { 
     20         60ms       60ms           	if expr.Children == nil { 
     21         10ms      140ms           		expr.Children = make([]*Expression, 0, 2) 
     22            .          .           	} 
     23            .          .            
     24         60ms      140ms           	expr.Children = append(expr.Children, child) 
     25            .          .           	child.Parent = expr 
     26            .          .           } 

git.urbach.dev/cli/q/src/expression.(*Expression).EachLeaf

/home/user/q/src/expression/Expression.go

  Total:        70ms      170ms (flat, cum)  0.36%
     28            .          .           // EachLeaf iterates through all leaves in the tree. 
     29            .          .           func (expr *Expression) EachLeaf(yield func(*Expression) bool) bool { 
     30         20ms       20ms           	if expr.IsLeaf() {                                                       return len(expr.Children) == 0                                       Expression.go:68
     31         20ms       50ms           		return yield(expr) 
     32            .          .           	} 
     33            .          .            
     34            .          .           	for _, child := range expr.Children { 
     35         30ms      100ms           		if !child.EachLeaf(yield) { 
     36            .          .           			return false 
     37            .          .           		} 
     38            .          .           	} 
     39            .          .            
     40            .          .           	return true 

git.urbach.dev/cli/q/src/expression.(*Expression).InsertAbove

/home/user/q/src/expression/Expression.go

  Total:        60ms      140ms (flat, cum)  0.29%
     53            .          .            
     54            .          .           // InsertAbove replaces this expression in its parent's children with the given new parent, 
     55            .          .           // and attaches this expression as a child of the new parent. Effectively, it promotes the 
     56            .          .           // given tree above the current node. It assumes that the caller is the last child. 
     57            .          .           func (expr *Expression) InsertAbove(tree *Expression) { 
     58         10ms       10ms           	if expr.Parent != nil { 
     59            .          .           		expr.Parent.Children[len(expr.Parent.Children)-1] = tree 
     60            .          .           		tree.Parent = expr.Parent 
     61            .          .           	} 
     62            .          .            
     63         50ms      130ms           	tree.AddChild(expr)                                                       expr.Children = make([]*Expression, 0, 2)                            Expression.go:21
                                     ⋮
                                     ⋮
                                                      if expr.Children == nil {                                            Expression.go:20            expr.Children = append(expr.Children, child)                         Expression.go:24
                                     ⋮
                                     ⋮
                                                      expr.Children = make([]*Expression, 0, 2)                            Expression.go:21

     64            .          .           } 
     65            .          .            

git.urbach.dev/cli/q/src/expression.(*Expression).IsLeaf

/home/user/q/src/expression/Expression.go

  Total:        10ms       10ms (flat, cum) 0.021%
     66            .          .           // IsLeaf returns true if the expression has no children. 
     67            .          .           func (expr *Expression) IsLeaf() bool { 
     68         10ms       10ms           	return len(expr.Children) == 0 
     69            .          .           } 
     70            .          .            
     71            .          .           // LastChild returns the last child. 
     72            .          .           func (expr *Expression) LastChild() *Expression { 
     73            .          .           	return expr.Children[len(expr.Children)-1] 

git.urbach.dev/cli/q/src/expression.(*Expression).Source.(*Expression).Leaves.func1

/home/user/q/src/expression/Expression.go

  Total:           0      100ms (flat, cum)  0.21%
     74            .          .           } 
     75            .          .            
     76            .          .           // Leaves iterates through all leaves in the tree. 
     77            .          .           func (expr *Expression) Leaves() iter.Seq[*Expression] { 
     78            .          .           	return func(yield func(*Expression) bool) { 
     79            .      100ms           		expr.EachLeaf(yield) 
     80            .          .           	} 
     81            .          .           } 
     82            .          .            
     83            .          .           // RemoveChild removes a child from the expression. 
     84            .          .           func (expr *Expression) RemoveChild(child *Expression) { 

git.urbach.dev/cli/q/src/expression.(*Expression).Source

/home/user/q/src/expression/Expression.go

  Total:        50ms      150ms (flat, cum)  0.31%
    102            .          .           	expr.Token.Reset() 
    103            .          .           	expr.precedence = 0 
    104            .          .           } 
    105            .          .            
    106            .          .           // Source returns the start and end positions in the source file. 
    107         10ms       10ms           func (expr *Expression) Source() token.Source { 
    108         10ms       10ms           	start := expr.Token.Position 
    109            .          .           	end := expr.Token.End() 
    110            .          .            
    111         30ms      130ms           	for leaf := range expr.Leaves() {                                                       expr.EachLeaf(yield)                                                 Expression.go:79
                                     ⋮
                                     ⋮

git.urbach.dev/cli/q/src/expression.(*Expression).Source-range1

/home/user/q/src/expression/Expression.go

  Total:        10ms       10ms (flat, cum) 0.021%
    113         10ms       10ms           			start = leaf.Token.Position 
    114            .          .           		} else if leaf.Token.End() > end { 

git.urbach.dev/cli/q/src/expression.(*Expression).Source

/home/user/q/src/expression/Expression.go

  Total:        20ms       20ms (flat, cum) 0.042%
    116            .          .           		} 
    117         10ms       10ms           	} 
    118            .          .            
    119         10ms       10ms           	return token.NewSource(start, end) 
    120            .          .           } 
    121            .          .            
    122            .          .           // SourceString returns the string that was parsed in this expression. 
    123            .          .           func (expr *Expression) SourceString(source []byte) string { 
    124            .          .           	region := expr.Source() 

git.urbach.dev/cli/q/src/expression.(*Expression).String

/home/user/q/src/expression/Expression.go

  Total:           0       30ms (flat, cum) 0.063%
    156            .          .           } 
    157            .          .            
    158            .          .           // String generates a textual representation of the expression. 
    159            .          .           func (expr *Expression) String(source []byte) string { 
    160            .          .           	builder := strings.Builder{} 
    161            .       30ms           	expr.write(&builder, source) 
    162            .          .           	return builder.String() 
    163            .          .           } 

internal/runtime/maps.bitset.first

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
     45            .          .           // first returns the relative index of the first control byte in the group that 
     46            .          .           // is in the set. 
     47            .          .           // 
     48            .          .           // Preconditions: b is not 0 (empty). 
     49            .          .           func (b bitset) first() uintptr { 
     50         30ms       30ms           	return bitsetFirst(b)                                                       return uintptr(sys.TrailingZeros64(uint64(b))) >> 3                  group.go:58
                                     ⋮
                                     ⋮

     51            .          .           } 
     52            .          .            
     53            .          .           // Portable implementation of first. 

internal/runtime/maps.bitsetFirst

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
     55            .          .           // On AMD64, this is replaced with an intrisic that simply does 
     56            .          .           // TrailingZeros64. There is no need to shift as the bitset is packed. 
     57            .          .           func bitsetFirst(b bitset) uintptr { 
     58         30ms       30ms           	return uintptr(sys.TrailingZeros64(uint64(b))) >> 3 
     59            .          .           } 
     60            .          .            
     61            .          .           // removeFirst clears the first set bit (that is, resets the least significant 
     62            .          .           // set bit to 0). 
     63            .          .           func (b bitset) removeFirst() bitset { 

internal/runtime/maps.(*ctrlGroup).get

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    129            .          .           // get returns the i-th control byte. 
    130            .          .           func (g *ctrlGroup) get(i uintptr) ctrl { 
    131            .          .           	if goarch.BigEndian { 
    132            .          .           		return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), 7-i)) 
    133            .          .           	} 
    134         10ms       10ms           	return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), i)) 
    135            .          .           } 
    136            .          .            
    137            .          .           // set sets the i-th control byte. 
    138            .          .           func (g *ctrlGroup) set(i uintptr, c ctrl) { 
    139            .          .           	if goarch.BigEndian { 

internal/runtime/maps.ctrlGroup.matchH2

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        90ms       90ms (flat, cum)  0.19%
    149            .          .           } 
    150            .          .            
    151            .          .           // matchH2 returns the set of slots which are full and for which the 7-bit hash 
    152            .          .           // matches the given value. May return false positives. 
    153            .          .           func (g ctrlGroup) matchH2(h uintptr) bitset { 
    154         90ms       90ms           	return ctrlGroupMatchH2(g, h)                                                       v := uint64(g) ^ (bitsetLSB * uint64(h))                             group.go:170
                                     ⋮
                                     ⋮
                                     ⋮
                                                      return bitset(((v - bitsetLSB) &^ v) & bitsetMSB)                    group.go:171
    155            .          .           } 
    156            .          .            
    157            .          .           // Portable implementation of matchH2. 
    158            .          .           // 
    159            .          .           // Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See 

internal/runtime/maps.ctrlGroupMatchH2

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        80ms       80ms (flat, cum)  0.17%
    165            .          .           	// subtract off 0x0101 the first 2 bytes we'll become 0xffff and both be 
    166            .          .           	// considered matches of h. The false positive matches are not a problem, 
    167            .          .           	// just a rare inefficiency. Note that they only occur if there is a real 
    168            .          .           	// match and never occur on ctrlEmpty, or ctrlDeleted. The subsequent key 
    169            .          .           	// comparisons ensure that there is no correctness issue. 
    170         70ms       70ms           	v := uint64(g) ^ (bitsetLSB * uint64(h)) 
    171         10ms       10ms           	return bitset(((v - bitsetLSB) &^ v) & bitsetMSB) 
    172            .          .           } 
    173            .          .            
    174            .          .           // matchEmpty returns the set of slots in the group that are empty. 
    175            .          .           func (g ctrlGroup) matchEmpty() bitset { 
    176            .          .           	return ctrlGroupMatchEmpty(g) 

internal/runtime/maps.(*groupReference).ctrls

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
    275            .          .           	return v, false 
    276            .          .           } 
    277            .          .            
    278            .          .           // ctrls returns the group control word. 
    279            .          .           func (g *groupReference) ctrls() *ctrlGroup { 
    280         30ms       30ms           	return (*ctrlGroup)(g.data) 
    281            .          .           } 
    282            .          .            

internal/runtime/maps.(*groupReference).key

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        60ms       60ms (flat, cum)  0.13%
    283            .          .           // key returns a pointer to the key at index i. 
    284            .          .           func (g *groupReference) key(typ *abi.SwissMapType, i uintptr) unsafe.Pointer { 
    285         50ms       50ms           	offset := groupSlotsOffset + i*typ.SlotSize 
    286            .          .            
    287         10ms       10ms           	return unsafe.Pointer(uintptr(g.data) + offset) 
    288            .          .           } 
    289            .          .            

internal/runtime/maps.(*groupReference).elem

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
    290            .          .           // elem returns a pointer to the element at index i. 
    291            .          .           func (g *groupReference) elem(typ *abi.SwissMapType, i uintptr) unsafe.Pointer { 
    292         30ms       30ms           	offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff 
    293            .          .            
    294            .          .           	return unsafe.Pointer(uintptr(g.data) + offset) 
    295            .          .           } 
    296            .          .            
    297            .          .           // groupsReference is a wrapper type describing an array of groups stored at 

internal/runtime/maps.newGroups

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:           0      1.15s (flat, cum)  2.41%
    311            .          .           // 
    312            .          .           // Length must be a power of two. 
    313            .          .           func newGroups(typ *abi.SwissMapType, length uint64) groupsReference { 
    314            .          .           	return groupsReference{ 
    315            .          .           		// TODO: make the length type the same throughout. 
    316            .      1.15s           		data:       newarray(typ.Group, int(length)), 
    317            .          .           		lengthMask: length - 1, 
    318            .          .           	} 
    319            .          .           } 
    320            .          .            

internal/runtime/maps.(*groupsReference).group

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
    321            .          .           // group returns the group at index i. 
    322            .          .           func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference { 
    323            .          .           	// TODO(prattmic): Do something here about truncation on cast to 
    324            .          .           	// uintptr on 32-bit systems? 
    325         30ms       30ms           	offset := uintptr(i) * typ.GroupSize 
    326            .          .            
    327            .          .           	return groupReference{ 
    328            .          .           		data: unsafe.Pointer(uintptr(g.data) + offset), 
    329            .          .           	} 
    330            .          .           } 

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive

/home/user/q/src/codegen/markAlive.go

  Total:       240ms      640ms (flat, cum)  1.34%
      6            .          .           	"git.urbach.dev/cli/q/src/ssa" 
      7            .          .           ) 
      8            .          .            
      9            .          .           // markAlive marks the `live` value in the `block` as alive and recursively 
     10            .          .           // proceeds in the predecessors of `block` if they can reach the definition. 
     11         10ms      260ms           func (f *Function) markAlive(live *Step, block *ssa.Block, use *Step, first bool) { 
     12            .          .           	if use.Block == block { 
     13         10ms       10ms           		phi, isPhi := use.Value.(*ssa.Phi) 
     14            .          .            
     15            .          .           		if isPhi { 
     16            .          .           			index := phi.Arguments.Index(live.Value) 
     17            .          .           			pre := block.Predecessors[index] 
     18            .       80ms           			f.markAlive(live, pre, use, false) 
     19            .          .           			return 
     20            .          .           		} 
     21            .          .           	} 
     22            .          .            
     23            .       20ms           	region := f.BlockToRegion[block] 
     24            .          .            
     25         20ms       20ms           	if first && use.Block == block && (block.Loop == nil || live.Block.Loop != nil) { 
     26            .          .           		region.End = uint32(use.Index) 
     27            .          .           	} 
     28            .          .            
     29            .          .           	steps := f.Steps[region.Start:region.End] 
     30            .          .            
     31        200ms      250ms           	for _, current := range slices.Backward(steps) {                                                       if !yield(i, s[i]) {                                                 iter.go:29
                                                          if live.Value == current.Value {                                 markAlive.go:38                if slices.Contains(current.Live, live) {                         markAlive.go:32
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97
                                     ⋮
                                     ⋮
                                                      if !yield(i, s[i]) {                                                 iter.go:29
                                                          for _, current := range slices.Backward(steps) {                 markAlive.go:31                if slices.Contains(current.Live, live) {                         markAlive.go:32                current.Live = append(current.Live, live)                        markAlive.go:36                if slices.Contains(current.Live, live) {                         markAlive.go:32
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97                if live.Value == current.Value {                                 markAlive.go:38                current.Live = append(current.Live, live)                        markAlive.go:36                if slices.Contains(current.Live, live) {                         markAlive.go:32
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  if v == s[i] {                                           slices.go:98                current.Live = append(current.Live, live)                        markAlive.go:36            for i := len(s) - 1; i >= 0; i-- {                                   iter.go:28            if !yield(i, s[i]) {                                                 iter.go:29
                                                          current.Live = append(current.Live, live)                        markAlive.go:36                if slices.Contains(current.Live, live) {                         markAlive.go:32                    return Index(s, v) >= 0                                      slices.go:118
                                                                  if v == s[i] {                                           slices.go:98

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive-range1

/home/user/q/src/codegen/markAlive.go

  Total:       150ms      200ms (flat, cum)  0.42%
     32         90ms       90ms           		if slices.Contains(current.Live, live) {                                                               return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97
                                     ⋮
                                     ⋮
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97                        if v == s[i] {                                           slices.go:98
                                     ⋮
                                     ⋮
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  if v == s[i] {                                           slices.go:98

     33            .          .           			return 
     34            .          .           		} 
     35            .          .            
     36         40ms       90ms           		current.Live = append(current.Live, live) 
     37            .          .            
     38         20ms       20ms           		if live.Value == current.Value { 
     39            .          .           			_, isParam := current.Value.(*ssa.Parameter) 
     40            .          .           			_, isPhi := current.Value.(*ssa.Phi) 
     41            .          .            

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive

/home/user/q/src/codegen/markAlive.go

  Total:        50ms      410ms (flat, cum)  0.86%
     43            .          .           				return 
     44            .          .           			} 
     45            .          .           		} 
     46         10ms       10ms           	} 
     47            .          .            
     48         30ms       30ms           	for _, pre := range block.Predecessors { 
     49            .          .           		if pre == block { 
     50            .          .           			continue 
     51            .          .           		} 
     52            .          .            
     53            .       80ms           		if !pre.CanReachPredecessor(live.Block) {                                                               return b.canReachPredecessor(other, make(map[*Block]bool))   Block.go:155
     54            .          .           			continue 
     55            .          .           		} 
     56            .          .            
     57         10ms      290ms           		f.markAlive(live, pre, use, false) 
     58            .          .           	} 
     59            .          .           } 

git.urbach.dev/cli/q/src/core.(*Environment).parseParameters

/home/user/q/src/core/parseParameters.go

  Total:       250ms      520ms (flat, cum)  1.09%
      8            .          .           	"git.urbach.dev/cli/q/src/types" 
      9            .          .           ) 
     10            .          .            
     11            .          .           // parseParameters parses the tokens of the input and output types. 
     12            .          .           func (env *Environment) parseParameters(functions iter.Seq[*Function]) error { 
     13        250ms      520ms           	for f := range functions {                                                       for variant := range fn.Variants {                                   Environment.go:67
                                                          if !yield(f) {                                                   Function.go:92
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  suffix.WriteString(input.Typ.Name())                     parseParameters.go:55
                                                                      b.buf = append(b.buf, s...)                          builder.go:114                        typ, err := env.TypeFromTokens(typeTokens, f.File)       parseParameters.go:40                        f.Type = &types.Function{                                parseParameters.go:14                        Output: make([]types.Type, len(f.Output)),               parseParameters.go:16                        f.Type.Input[i] = input.Typ                              parseParameters.go:28                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        suffix.WriteString(input.Typ.Name())                     parseParameters.go:55                        for i, input := range f.Input {                          parseParameters.go:19                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        suffix.WriteString(input.Typ.Name())                     parseParameters.go:55
                                                                      b.buf = append(b.buf, s...)                          builder.go:114                        if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                        input.Typ = typ                                          parseParameters.go:27                        typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15            for _, fn := range pkg.Functions {                                   Environment.go:66            for variant := range fn.Variants {                                   Environment.go:67
                                                          if !yield(f) {                                                   Function.go:92
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).parseParameters-range3

/home/user/q/src/core/parseParameters.go

  Total:       240ms      510ms (flat, cum)  1.07%
     14            .       50ms           		f.Type = &types.Function{ 
     15         50ms       90ms           			Input:  make([]types.Type, len(f.Input)), 
     16            .       10ms           			Output: make([]types.Type, len(f.Output)), 
     17            .          .           		} 
     18            .          .            
     19         10ms       10ms           		for i, input := range f.Input { 
     20        130ms      130ms           			input.Name = input.Tokens[0].StringFrom(f.File.Bytes) 
     21            .       90ms           			typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) 
     22            .          .            
     23            .          .           			if err != nil { 
     24            .          .           				return err 
     25            .          .           			} 
     26            .          .            
     27         10ms       10ms           			input.Typ = typ 
     28         10ms       10ms           			f.Type.Input[i] = input.Typ 
     29            .          .           		} 
     30            .          .            
     31            .          .           		for i, output := range f.Output { 
     32            .          .           			typeTokens := output.Tokens 
     33            .          .            
     34         20ms       20ms           			if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { 
     35            .          .           				output.Name = output.Tokens[0].StringFrom(f.File.Bytes) 
     36            .          .           				output.SetEnd(output.Tokens[0].End()) 
     37            .          .           				typeTokens = typeTokens[1:] 
     38            .          .           			} 
     39            .          .            
     40            .       60ms           			typ, err := env.TypeFromTokens(typeTokens, f.File) 
     41            .          .            
     42            .          .           			if err != nil { 
     43            .          .           				return err 
     44            .          .           			} 
     45            .          .            
     46            .          .           			output.Typ = typ 
     47            .          .           			f.Type.Output[i] = output.Typ 
     48            .          .           		} 
     49            .          .            
     50            .          .           		if f.Previous != nil || f.Next != nil { 
     51            .          .           			suffix := strings.Builder{} 
     52            .          .           			suffix.WriteByte('[') 
     53            .          .            
     54            .          .           			for i, input := range f.Input { 
     55         10ms       30ms           				suffix.WriteString(input.Typ.Name())                                                                               b.buf = append(b.buf, s...)                  builder.go:114
                                     ⋮
                                     ⋮
                                                                              b.buf = append(b.buf, s...)                  builder.go:114

     56            .          .            
     57            .          .           				if i != len(f.Input)-1 { 
     58            .          .           					suffix.WriteByte(',') 
     59            .          .           				} 
     60            .          .           			} 

sync/atomic.(*Pointer[go.shape.interface { Chdir(string); Getenv(string); Open(string); Stat(string) }]).Load

/usr/lib/go/src/sync/atomic/type.go

  Total:        10ms       10ms (flat, cum) 0.021%
     53            .          .           	_ noCopy 
     54            .          .           	v unsafe.Pointer 
     55            .          .           } 
     56            .          .            
     57            .          .           // Load atomically loads and returns the value stored in x. 
     58         10ms       10ms           func (x *Pointer[T]) Load() *T { return (*T)(LoadPointer(&x.v)) } 
     59            .          .            

sync/atomic.(*Pointer[go.shape.struct { internal/sync.isEntry bool }]).Store

/usr/lib/go/src/sync/atomic/type.go

  Total:        10ms       10ms (flat, cum) 0.021%
     60            .          .           // Store atomically stores val into x. 
     61         10ms       10ms           func (x *Pointer[T]) Store(val *T) { StorePointer(&x.v, unsafe.Pointer(val)) } 
     62            .          .            
     63            .          .           // Swap atomically stores new into x and returns the previous value. 
     64            .          .           func (x *Pointer[T]) Swap(new *T) (old *T) { return (*T)(SwapPointer(&x.v, unsafe.Pointer(new))) } 
     65            .          .            
     66            .          .           // CompareAndSwap executes the compare-and-swap operation for x. 

sync/atomic.(*Uint32).Load

/usr/lib/go/src/sync/atomic/type.go

  Total:        10ms       10ms (flat, cum) 0.021%
    142            .          .           	_ noCopy 
    143            .          .           	v uint32 
    144            .          .           } 
    145            .          .            
    146            .          .           // Load atomically loads and returns the value stored in x. 
    147         10ms       10ms           func (x *Uint32) Load() uint32 { return LoadUint32(&x.v) } 
    148            .          .            
    149            .          .           // Store atomically stores val into x. 
    150            .          .           func (x *Uint32) Store(val uint32) { StoreUint32(&x.v, val) } 
    151            .          .            
    152            .          .           // Swap atomically stores new into x and returns the previous value. 

sync/atomic.(*Uint64).CompareAndSwap

/usr/lib/go/src/sync/atomic/type.go

  Total:        10ms       10ms (flat, cum) 0.021%
    186            .          .           // Swap atomically stores new into x and returns the previous value. 
    187            .          .           func (x *Uint64) Swap(new uint64) (old uint64) { return SwapUint64(&x.v, new) } 
    188            .          .            
    189            .          .           // CompareAndSwap executes the compare-and-swap operation for x. 
    190            .          .           func (x *Uint64) CompareAndSwap(old, new uint64) (swapped bool) { 
    191         10ms       10ms           	return CompareAndSwapUint64(&x.v, old, new) 
    192            .          .           } 

sync/atomic.(*Uint64).Add

/usr/lib/go/src/sync/atomic/type.go

  Total:       200ms      200ms (flat, cum)  0.42%
    194            .          .           // Add atomically adds delta to x and returns the new value. 
    195        200ms      200ms           func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) } 
    196            .          .            
    197            .          .           // And atomically performs a bitwise AND operation on x using the bitmask 
    198            .          .           // provided as mask and returns the old value. 
    199            .          .           func (x *Uint64) And(mask uint64) (old uint64) { return AndUint64(&x.v, mask) } 
    200            .          .            

sync.(*WaitGroup).Add

/usr/lib/go/src/sync/waitgroup.go

  Total:       210ms      690ms (flat, cum)  1.44%
     99            .          .           				// Add has been called from outside this bubble. 
    100            .          .           				fatal("sync: WaitGroup.Add called from inside and outside synctest bubble") 
    101            .          .           			} 
    102            .          .           		} 
    103            .          .           	} 
    104        180ms      180ms           	state := wg.state.Add(uint64(delta) << 32)                                                       func (x *Uint64) Add(delta uint64) (new uint64) { return AddUint64(&x.v, delta) } type.go:195

    105            .          .           	if state&waitGroupBubbleFlag != 0 && !bubbled { 
    106            .          .           		// Add has been called from within a synctest bubble (and we aren't in one). 
    107            .          .           		fatal("sync: WaitGroup.Add called from inside and outside synctest bubble") 
    108            .          .           	} 
    109            .          .           	v := int32(state >> 32) 
    110            .          .           	w := uint32(state & 0x7fffffff) 
    111            .          .           	if race.Enabled && delta > 0 && v == int32(delta) { 
    112            .          .           		// The first increment must be synchronized with Wait. 
    113            .          .           		// Need to model this as a read, because there can be 
    114            .          .           		// several concurrent wg.counter transitions from 0. 
    115            .          .           		race.Read(unsafe.Pointer(&wg.sema)) 
    116            .          .           	} 
    117            .          .           	if v < 0 { 
    118            .          .           		panic("sync: negative WaitGroup counter") 
    119            .          .           	} 
    120            .          .           	if w != 0 && delta > 0 && v == int32(delta) { 
    121            .          .           		panic("sync: WaitGroup misuse: Add called concurrently with Wait") 
    122            .          .           	} 
    123            .          .           	if v > 0 || w == 0 { 
    124         20ms       20ms           		return 
    125            .          .           	} 
    126            .          .           	// This goroutine has set counter to 0 when waiters > 0. 
    127            .          .           	// Now there can't be concurrent mutations of state: 
    128            .          .           	// - Adds must not happen concurrently with Wait, 
    129            .          .           	// - Wait does not increment waiters if it sees counter == 0. 
    130            .          .           	// Still do a cheap sanity check to detect WaitGroup misuse. 
    131            .          .           	if wg.state.Load() != state { 
    132            .          .           		panic("sync: WaitGroup misuse: Add called concurrently with Wait") 
    133            .          .           	} 
    134            .          .           	// Reset waiters count to 0. 
    135            .          .           	wg.state.Store(0) 
    136            .          .           	if bubbled { 
    137            .          .           		// Adds must not happen concurrently with wait when counter is 0, 
    138            .          .           		// so we can safely disassociate wg from its current bubble. 
    139            .          .           		synctest.Disassociate(wg) 
    140            .          .           	} 
    141            .          .           	for ; w != 0; w-- { 
    142            .      480ms           		runtime_Semrelease(&wg.sema, false, 0) 
    143            .          .           	} 
    144         10ms       10ms           } 
    145            .          .            
    146            .          .           // Done decrements the [WaitGroup] task counter by one. 
    147            .          .           // It is equivalent to Add(-1). 
    148            .          .           // 
    149            .          .           // Callers should prefer [WaitGroup.Go]. 

sync.(*WaitGroup).Done

/usr/lib/go/src/sync/waitgroup.go

  Total:           0      620ms (flat, cum)  1.30%
    151            .          .           // In the terminology of [the Go memory model], a call to Done 
    152            .          .           // "synchronizes before" the return of any Wait call that it unblocks. 
    153            .          .           // 
    154            .          .           // [the Go memory model]: https://go.dev/ref/mem 
    155            .          .           func (wg *WaitGroup) Done() { 
    156            .      620ms           	wg.Add(-1) 
    157            .          .           } 
    158            .          .            
    159            .          .           // Wait blocks until the [WaitGroup] task counter is zero. 
    160            .          .           func (wg *WaitGroup) Wait() { 
    161            .          .           	if race.Enabled { 

sync.(*WaitGroup).Wait

/usr/lib/go/src/sync/waitgroup.go

  Total:        10ms       10ms (flat, cum) 0.021%
    179            .          .           				} 
    180            .          .           			} 
    181            .          .           			return 
    182            .          .           		} 
    183            .          .           		// Increment waiters count. 
    184         10ms       10ms           		if wg.state.CompareAndSwap(state, state+1) {                                                               return CompareAndSwapUint64(&x.v, old, new)                  type.go:191

    185            .          .           			if race.Enabled && w == 0 { 
    186            .          .           				// Wait must be synchronized with the first Add. 
    187            .          .           				// Need to model this is as a write to race with the read in Add. 
    188            .          .           				// As a consequence, can do the write only for the first waiter, 
    189            .          .           				// otherwise concurrent Waits will race with each other. 

sync.(*WaitGroup).Wait

/usr/lib/go/src/sync/waitgroup.go

  Total:        10ms      100ms (flat, cum)  0.21%
    201            .          .           				} 
    202            .          .           				if race.Enabled { 
    203            .          .           					race.Disable() 
    204            .          .           				} 
    205            .          .           			} 
    206            .       90ms           			runtime_SemacquireWaitGroup(&wg.sema, synctestDurable) 
    207         10ms       10ms           			if wg.state.Load() != 0 { 
    208            .          .           				panic("sync: WaitGroup is reused before previous Wait has returned") 
    209            .          .           			} 
    210            .          .           			if race.Enabled { 
    211            .          .           				race.Enable() 
    212            .          .           				race.Acquire(unsafe.Pointer(wg)) 

git.urbach.dev/cli/q/src/token.List.Instructions

/home/user/q/src/token/Instructions.go

  Total:       230ms      6.34s (flat, cum) 13.27%
      1            .          .           package token 
      2            .          .            
      3            .          .           // Instructions yields on each AST node. 
      4         10ms       10ms           func (list List) Instructions(yield func(List) bool) { 
      5            .          .           	start := 0 
      6            .          .           	groupLevel := 0 
      7            .          .           	blockLevel := 0 
      8            .          .            
      9         60ms       60ms           	for i, t := range list { 
     10         90ms       90ms           		switch t.Kind { 
     11            .          .           		case NewLine: 
     12         10ms       10ms           			if start == i { 
     13            .          .           				start = i + 1 
     14            .          .           				continue 
     15            .          .           			} 
     16            .          .            
     17         10ms       10ms           			if groupLevel > 0 || blockLevel > 0 { 
     18            .          .           				continue 
     19            .          .           			} 
     20            .          .            
     21            .      4.07s           			if !yield(list[start:i]) { 
     22            .          .           				return 
     23            .          .           			} 
     24            .          .            
     25            .          .           			start = i + 1 
     26            .          .            
     27            .          .           		case GroupStart: 
     28         10ms       10ms           			groupLevel++ 
     29            .          .            
     30         20ms       20ms           		case GroupEnd: 
     31            .          .           			groupLevel-- 
     32            .          .            
     33         10ms       10ms           		case BlockStart: 
     34            .          .           			blockLevel++ 
     35            .          .            
     36            .          .           		case BlockEnd: 
     37            .          .           			blockLevel-- 
     38            .          .            
     39            .          .           			if groupLevel > 0 || blockLevel > 0 { 
     40            .          .           				continue 
     41            .          .           			} 
     42            .          .            
     43            .          .           			if !list[start].Kind.IsBlock() { 
     44            .          .           				continue 
     45            .          .           			} 
     46            .          .            
     47            .         2s           			if !yield(list[start : i+1]) { 
     48            .          .           				return 
     49            .          .           			} 
     50            .          .            
     51            .          .           			start = i + 1 
     52            .          .            
     53         10ms       10ms           		case EOF: 
     54            .          .           			if start < i { 
     55            .          .           				yield(list[start:i]) 
     56            .          .           			} 
     57            .          .            
     58            .          .           			return 
     59            .          .           		} 
     60            .          .           	} 
     61            .          .            
     62            .          .           	if start < len(list) { 
     63            .       40ms           		yield(list[start:]) 
     64            .          .           	} 
     65            .          .           } 

runtime.ReadMemStats

/usr/lib/go/src/runtime/mstats.go

  Total:           0       20ms (flat, cum) 0.042%
    355            .          .           // collection cycle. 
    356            .          .           func ReadMemStats(m *MemStats) { 
    357            .          .           	_ = m.Alloc // nil check test before we switch stacks, see issue 61158 
    358            .          .           	stw := stopTheWorld(stwReadMemStats) 
    359            .          .            
    360            .       20ms           	systemstack(func() { 

runtime.ReadMemStats.func1

/usr/lib/go/src/runtime/mstats.go

  Total:           0       20ms (flat, cum) 0.042%
    361            .       20ms           		readmemstats_m(m) 
    362            .          .           	}) 
    363            .          .            
    364            .          .           	startTheWorld(stw) 
    365            .          .           } 
    366            .          .            

runtime.readmemstats_m

/usr/lib/go/src/runtime/mstats.go

  Total:           0       20ms (flat, cum) 0.042%
    377            .          .            
    378            .          .           	// Flush mcaches to mcentral before doing anything else. 
    379            .          .           	// 
    380            .          .           	// Flushing to the mcentral may in general cause stats to 
    381            .          .           	// change as mcentral data structures are manipulated. 
    382            .       20ms           	systemstack(flushallmcaches) 
    383            .          .            
    384            .          .           	// Calculate memory allocator stats. 
    385            .          .           	// During program execution we only count number of frees and amount of freed memory. 
    386            .          .           	// Current number of alive objects in the heap and amount of alive heap memory 
    387            .          .           	// are calculated by scanning all spans. 

runtime.flushmcache

/usr/lib/go/src/runtime/mstats.go

  Total:           0       20ms (flat, cum) 0.042%
    618            .          .           	p := allp[i] 
    619            .          .           	c := p.mcache 
    620            .          .           	if c == nil { 
    621            .          .           		return 
    622            .          .           	} 
    623            .       20ms           	c.releaseAll() 
    624            .          .           	stackcache_clear(c) 
    625            .          .           } 
    626            .          .            
    627            .          .           // flushallmcaches flushes the mcaches of all Ps. 
    628            .          .           // 

runtime.flushallmcaches

/usr/lib/go/src/runtime/mstats.go

  Total:           0       20ms (flat, cum) 0.042%
    631            .          .           //go:nowritebarrier 
    632            .          .           func flushallmcaches() { 
    633            .          .           	assertWorldStopped() 
    634            .          .            
    635            .          .           	for i := 0; i < int(gomaxprocs); i++ { 
    636            .       20ms           		flushmcache(i) 
    637            .          .           	} 
    638            .          .           } 
    639            .          .            
    640            .          .           // sysMemStat represents a global system statistic that is managed atomically. 
    641            .          .           // 

runtime.(*sysMemStat).add

/usr/lib/go/src/runtime/mstats.go

  Total:       100ms      100ms (flat, cum)  0.21%
    655            .          .           // 
    656            .          .           // Must be nosplit as it is called in runtime initialization, e.g. newosproc0. 
    657            .          .           // 
    658            .          .           //go:nosplit 
    659            .          .           func (s *sysMemStat) add(n int64) { 
    660        100ms      100ms           	val := atomic.Xadd64((*uint64)(s), n) 
    661            .          .           	if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) { 
    662            .          .           		print("runtime: val=", val, " n=", n, "\n") 
    663            .          .           		throw("sysMemStat overflow") 
    664            .          .           	} 
    665            .          .           } 

runtime.(*consistentHeapStats).acquire

/usr/lib/go/src/runtime/mstats.go

  Total:        70ms       70ms (flat, cum)  0.15%
    773            .          .           // function. 
    774            .          .           // 
    775            .          .           //go:nosplit 
    776            .          .           func (m *consistentHeapStats) acquire() *heapStatsDelta { 
    777            .          .           	if pp := getg().m.p.ptr(); pp != nil { 
    778         50ms       50ms           		seq := pp.statsSeq.Add(1)                                                               return Xadd(&u.value, delta)                                 types.go:291

    779            .          .           		if seq%2 == 0 { 
    780            .          .           			// Should have been incremented to odd. 
    781            .          .           			print("runtime: seq=", seq, "\n") 
    782            .          .           			throw("bad sequence number") 
    783            .          .           		} 
    784            .          .           	} else { 
    785            .          .           		lock(&m.noPLock) 
    786            .          .           	} 
    787         20ms       20ms           	gen := m.gen.Load() % 3                                                       return Load(&u.value)                                                types.go:194

    788            .          .           	return &m.stats[gen] 
    789            .          .           } 
    790            .          .            
    791            .          .           // release indicates that the writer is done modifying 
    792            .          .           // the delta. The value returned by the corresponding 

runtime.(*consistentHeapStats).release

/usr/lib/go/src/runtime/mstats.go

  Total:        50ms       50ms (flat, cum)   0.1%
    800            .          .           // nosplit because a stack growth in this function could 
    801            .          .           // lead to a stack allocation that causes another acquire 
    802            .          .           // before this operation has completed. 
    803            .          .           // 
    804            .          .           //go:nosplit 
    805         10ms       10ms           func (m *consistentHeapStats) release() { 
    806            .          .           	if pp := getg().m.p.ptr(); pp != nil { 
    807         20ms       20ms           		seq := pp.statsSeq.Add(1)                                                               return Xadd(&u.value, delta)                                 types.go:291

    808            .          .           		if seq%2 != 0 { 
    809            .          .           			// Should have been incremented to even. 
    810            .          .           			print("runtime: seq=", seq, "\n") 
    811            .          .           			throw("bad sequence number") 
    812            .          .           		} 
    813            .          .           	} else { 
    814            .          .           		unlock(&m.noPLock) 
    815            .          .           	} 
    816         20ms       20ms           } 
    817            .          .            
    818            .          .           // unsafeRead aggregates the delta for this shard into out. 
    819            .          .           // 
    820            .          .           // Unsafe because it does so without any synchronization. The 
    821            .          .           // world must be stopped. 

internal/runtime/maps.h2

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        30ms       30ms (flat, cum) 0.063%
    186            .          .            
    187            .          .           // Extracts the H2 portion of a hash: the 7 bits not used for h1. 
    188            .          .           // 
    189            .          .           // These are used as an occupied control byte. 
    190            .          .           func h2(h uintptr) uintptr { 
    191         30ms       30ms           	return h & 0x7f 
    192            .          .           } 
    193            .          .            
    194            .          .           // Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType. 
    195            .          .           type Map struct { 
    196            .          .           	// The number of filled slots (i.e. the number of elements in all 

internal/runtime/maps.NewMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0      150ms (flat, cum)  0.31%
    257            .          .           // maxAlloc should be runtime.maxAlloc. 
    258            .          .           // 
    259            .          .           // TODO(prattmic): Put maxAlloc somewhere accessible. 
    260            .          .           func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map { 
    261            .          .           	if m == nil { 
    262            .      150ms           		m = new(Map) 
    263            .          .           	} 
    264            .          .            
    265            .          .           	m.seed = uintptr(rand()) 
    266            .          .            
    267            .          .           	if hint <= abi.SwissMapGroupSlots { 

internal/runtime/maps.NewMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0      320ms (flat, cum)  0.67%
    309            .          .           	} 
    310            .          .            
    311            .          .           	m.globalDepth = uint8(sys.TrailingZeros64(dirSize)) 
    312            .          .           	m.globalShift = depthToShift(m.globalDepth) 
    313            .          .            
    314            .       30ms           	directory := make([]*table, dirSize) 
    315            .          .            
    316            .          .           	for i := range directory { 
    317            .          .           		// TODO: Think more about initial table capacity. 
    318            .      290ms           		directory[i] = newTable(mt, uint64(targetCapacity)/dirSize, i, m.globalDepth) 
    319            .          .           	} 
    320            .          .            
    321            .          .           	m.dirPtr = unsafe.Pointer(&directory[0]) 
    322            .          .           	m.dirLen = len(directory) 

internal/runtime/maps.NewEmptyMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms      260ms (flat, cum)  0.54%
    324            .          .           	return m 
    325            .          .           } 
    326            .          .            
    327            .          .           func NewEmptyMap() *Map { 
    328            .      190ms           	m := new(Map) 
    329         10ms       70ms           	m.seed = uintptr(rand()) 
    330            .          .           	// See comment in NewMap. No need to eager allocate a group. 
    331            .          .           	return m 
    332            .          .           } 
    333            .          .            
    334            .          .           func (m *Map) directoryIndex(hash uintptr) uintptr { 

internal/runtime/maps.(*Map).getWithKeySmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        50ms       70ms (flat, cum)  0.15%
    442            .          .           func (m *Map) getWithKeySmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) (unsafe.Pointer, unsafe.Pointer, bool) { 
    443            .          .           	g := groupReference{ 
    444            .          .           		data: m.dirPtr, 
    445            .          .           	} 
    446            .          .            
    447         10ms       10ms           	match := g.ctrls().matchH2(h2(hash))                                                       return h & 0x7f                                                      map.go:191

    448            .          .            
    449         10ms       10ms           	for match != 0 { 
    450            .          .           		i := match.first() 
    451            .          .            
    452         10ms       10ms           		slotKey := g.key(typ, i)                                                               return unsafe.Pointer(uintptr(g.data) + offset)              group.go:287

    453            .          .           		if typ.IndirectKey() { 
    454            .          .           			slotKey = *((*unsafe.Pointer)(slotKey)) 
    455            .          .           		} 
    456            .          .            
    457         20ms       40ms           		if typ.Key.Equal(key, slotKey) { 
    458            .          .           			slotElem := g.elem(typ, i) 
    459            .          .           			if typ.IndirectElem() { 
    460            .          .           				slotElem = *((*unsafe.Pointer)(slotElem)) 
    461            .          .           			} 
    462            .          .           			return slotKey, slotElem, true 

internal/runtime/maps.(*Map).putSlotSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        40ms       40ms (flat, cum) 0.084%
    527            .          .            
    528            .          .           		return elem 
    529            .          .           	} 
    530            .          .           } 
    531            .          .            
    532         10ms       10ms           func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { 
    533            .          .           	g := groupReference{ 
    534            .          .           		data: m.dirPtr, 
    535            .          .           	} 
    536            .          .            
    537         30ms       30ms           	match := g.ctrls().matchH2(h2(hash))                                                       return ctrlGroupMatchH2(g, h)                                        group.go:154
                                                          return bitset(((v - bitsetLSB) &^ v) & bitsetMSB)                group.go:171            return h & 0x7f                                                      map.go:191
    538            .          .            
    539            .          .           	// Look for an existing slot containing this key. 
    540            .          .           	for match != 0 { 
    541            .          .           		i := match.first() 
    542            .          .            

internal/runtime/maps.(*Map).putSlotSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        30ms       60ms (flat, cum)  0.13%
    560            .          .           	} 
    561            .          .            
    562            .          .           	// There can't be deleted slots, small maps can't have them 
    563            .          .           	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 
    564            .          .           	// more efficient than matchEmpty. 
    565         10ms       10ms           	match = g.ctrls().matchEmptyOrDeleted()                                                       return (*ctrlGroup)(g.data)                                          group.go:280

    566            .          .           	if match == 0 { 
    567            .          .           		fatal("small map with no empty slot (concurrent map writes?)") 
    568            .          .           		return nil 
    569            .          .           	} 
    570            .          .            
    571            .          .           	i := match.first() 
    572            .          .            
    573         10ms       10ms           	slotKey := g.key(typ, i)                                                       offset := groupSlotsOffset + i*typ.SlotSize                          group.go:285

    574            .          .           	if typ.IndirectKey() { 
    575            .          .           		kmem := newobject(typ.Key) 
    576            .          .           		*(*unsafe.Pointer)(slotKey) = kmem 
    577            .          .           		slotKey = kmem 
    578            .          .           	} 
    579            .       30ms           	typedmemmove(typ.Key, slotKey, key) 
    580            .          .            
    581         10ms       10ms           	slotElem := g.elem(typ, i)                                                       offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff            group.go:292

    582            .          .           	if typ.IndirectElem() { 
    583            .          .           		emem := newobject(typ.Elem) 
    584            .          .           		*(*unsafe.Pointer)(slotElem) = emem 
    585            .          .           		slotElem = emem 
    586            .          .           	} 

internal/runtime/maps.(*Map).growToSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0      850ms (flat, cum)  1.78%
    590            .          .            
    591            .          .           	return slotElem 
    592            .          .           } 
    593            .          .            
    594            .          .           func (m *Map) growToSmall(typ *abi.SwissMapType) { 
    595            .      850ms           	grp := newGroups(typ, 1)                                                       data:       newarray(typ.Group, int(length)),                        group.go:316

    596            .          .           	m.dirPtr = grp.data 
    597            .          .            
    598            .          .           	g := groupReference{ 
    599            .          .           		data: m.dirPtr, 

internal/runtime/maps.(*Map).growToTable

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0       20ms (flat, cum) 0.042%
    601            .          .           	g.ctrls().setEmpty() 
    602            .          .           } 
    603            .          .            
    604            .          .           func (m *Map) growToTable(typ *abi.SwissMapType) { 
    605            .       20ms           	tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0) 
    606            .          .            
    607            .          .           	g := groupReference{ 
    608            .          .           		data: m.dirPtr, 
    609            .          .           	} 
    610            .          .            

internal/runtime/maps.(*Map).growToTable

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0       10ms (flat, cum) 0.021%
    627            .          .           		hash := typ.Hasher(key, m.seed) 
    628            .          .            
    629            .          .           		tab.uncheckedPutSlot(typ, hash, key, elem) 
    630            .          .           	} 
    631            .          .            
    632            .       10ms           	directory := make([]*table, 1) 
    633            .          .            
    634            .          .           	directory[0] = tab 
    635            .          .            
    636            .          .           	m.dirPtr = unsafe.Pointer(&directory[0]) 
    637            .          .           	m.dirLen = len(directory) 

internal/runtime/maps.(*Map).Delete

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms       10ms (flat, cum) 0.021%
    638            .          .            
    639            .          .           	m.globalDepth = 0 
    640            .          .           	m.globalShift = depthToShift(m.globalDepth) 
    641            .          .           } 
    642            .          .            
    643         10ms       10ms           func (m *Map) Delete(typ *abi.SwissMapType, key unsafe.Pointer) { 
    644            .          .           	if m == nil || m.Used() == 0 { 
    645            .          .           		if err := mapKeyError(typ, key); err != nil { 
    646            .          .           			panic(err) // see issue 23734 
    647            .          .           		} 
    648            .          .           		return 

internal/runtime/maps.(*Map).Delete

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms       30ms (flat, cum) 0.063%
    660            .          .            
    661            .          .           	if m.dirLen == 0 { 
    662            .          .           		m.deleteSmall(typ, hash, key) 
    663            .          .           	} else { 
    664            .          .           		idx := m.directoryIndex(hash) 
    665            .       20ms           		if m.directoryAt(idx).Delete(typ, m, hash, key) { 
    666            .          .           			m.tombstonePossible = true 
    667            .          .           		} 
    668            .          .           	} 
    669            .          .            
    670         10ms       10ms           	if m.used == 0 { 
    671            .          .           		// Reset the hash seed to make it more difficult for attackers 
    672            .          .           		// to repeatedly trigger hash collisions. See 
    673            .          .           		// https://go.dev/issue/25237. 
    674            .          .           		m.seed = uintptr(rand()) 
    675            .          .           	} 

internal/runtime/maps.mapKeyError

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        30ms       60ms (flat, cum)  0.13%
    820            .          .           	} 
    821            .          .           	return mapKeyError2(t.Key, p) 
    822            .          .           } 
    823            .          .            
    824            .          .           func mapKeyError(t *abi.SwissMapType, p unsafe.Pointer) error { 
    825         30ms       30ms           	if !t.HashMightPanic() {                                                       return mt.Flags&SwissMapHashMightPanic != 0                          map_swiss.go:57
    826            .          .           		return nil 
    827            .          .           	} 
    828            .       30ms           	return mapKeyError2(t.Key, p) 
    829            .          .           } 

internal/runtime/maps.mapKeyError2

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        20ms       20ms (flat, cum) 0.042%
    830            .          .            
    831         20ms       20ms           func mapKeyError2(t *abi.Type, p unsafe.Pointer) error { 
    832            .          .           	if t.TFlag&abi.TFlagRegularMemory != 0 { 
    833            .          .           		return nil 
    834            .          .           	} 
    835            .          .           	switch t.Kind() { 
    836            .          .           	case abi.Float32, abi.Float64, abi.Complex64, abi.Complex128, abi.String: 

internal/runtime/maps.mapKeyError2

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms       10ms (flat, cum) 0.021%
    858            .          .           		if t.Equal == nil { 
    859            .          .           			return unhashableTypeError{t} 
    860            .          .           		} 
    861            .          .            
    862            .          .           		if t.Kind_&abi.KindDirectIface != 0 { 
    863         10ms       10ms           			return mapKeyError2(t, unsafe.Pointer(pdata)) 
    864            .          .           		} else { 
    865            .          .           			return mapKeyError2(t, *pdata) 
    866            .          .           		} 
    867            .          .           	case abi.Array: 
    868            .          .           		a := (*abi.ArrayType)(unsafe.Pointer(t)) 

runtime.nanotime

/usr/lib/go/src/runtime/time_nofake.go

  Total:       210ms      210ms (flat, cum)  0.44%
     28            .          .           // See go.dev/issue/67401. 
     29            .          .           // 
     30            .          .           //go:linkname nanotime 
     31            .          .           //go:nosplit 
     32            .          .           func nanotime() int64 { 
     33        210ms      210ms           	return nanotime1() 
     34            .          .           } 
     35            .          .            
     36            .          .           // overrideWrite allows write to be redirected externally, by 
     37            .          .           // linkname'ing this and set it to a write function. 
     38            .          .           // 

runtime.rand

/usr/lib/go/src/runtime/rand.go

  Total:        90ms      120ms (flat, cum)  0.25%
    155            .          .           // 
    156            .          .           // Do not change signature: used via linkname from other packages. 
    157            .          .           // 
    158            .          .           //go:nosplit 
    159            .          .           //go:linkname rand 
    160         10ms       10ms           func rand() uint64 { 
    161            .          .           	// Note: We avoid acquirem here so that in the fast path 
    162            .          .           	// there is just a getg, an inlined c.Next, and a return. 
    163            .          .           	// The performance difference on a 16-core AMD is 
    164            .          .           	// 3.7ns/call this way versus 4.3ns/call with acquirem (+16%). 
    165            .          .           	mp := getg().m 
    166            .          .           	c := &mp.chacha8 
    167            .          .           	for { 
    168            .          .           		// Note: c.Next is marked nosplit, 
    169            .          .           		// so we don't need to use mp.locks 
    170            .          .           		// on the fast path, which is that the 
    171            .          .           		// first attempt succeeds. 
    172         40ms       40ms           		x, ok := c.Next()                                                               s.i = i + 1                                                  chacha8.go:58                    i := s.i                                                     chacha8.go:54

    173            .          .           		if ok { 
    174         40ms       40ms           			return x 
    175            .          .           		} 
    176            .          .           		mp.locks++ // hold m even though c.Refill may do stack split checks 
    177            .       30ms           		c.Refill() 
    178            .          .           		mp.locks-- 
    179            .          .           	} 

internal/runtime/maps.rand

/usr/lib/go/src/runtime/rand.go

  Total:        30ms      140ms (flat, cum)  0.29%
    181            .          .            
    182            .          .           //go:linkname maps_rand internal/runtime/maps.rand 
    183         10ms       10ms           func maps_rand() uint64 { 
    184         20ms      130ms           	return rand() 
    185            .          .           } 
    186            .          .            
    187            .          .           // mrandinit initializes the random state of an m. 
    188            .          .           func mrandinit(mp *m) { 
    189            .          .           	var seed [4]uint64 

runtime.cheaprand

/usr/lib/go/src/runtime/rand.go

  Total:        70ms       70ms (flat, cum)  0.15%
    223            .          .           // See go.dev/issue/67401. 
    224            .          .           // 
    225            .          .           //go:linkname cheaprand 
    226            .          .           //go:nosplit 
    227            .          .           func cheaprand() uint32 { 
    228         10ms       10ms           	mp := getg().m 
    229            .          .           	// Implement wyrand: https://github.com/wangyi-fudan/wyhash 
    230            .          .           	// Only the platform that math.Mul64 can be lowered 
    231            .          .           	// by the compiler should be in this list. 
    232            .          .           	if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| 
    233            .          .           		goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| 
    234            .          .           		goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 { 
    235         40ms       40ms           		mp.cheaprand += 0xa0761d6478bd642f 
    236         10ms       10ms           		hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) 
    237         10ms       10ms           		return uint32(hi ^ lo) 
    238            .          .           	} 
    239            .          .            
    240            .          .           	// Implement xorshift64+: 2 32-bit xorshift sequences added together. 
    241            .          .           	// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's 
    242            .          .           	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf 

runtime.cheaprandn

/usr/lib/go/src/runtime/rand.go

  Total:        40ms       40ms (flat, cum) 0.084%
    288            .          .           // 
    289            .          .           //go:linkname cheaprandn 
    290            .          .           //go:nosplit 
    291            .          .           func cheaprandn(n uint32) uint32 { 
    292            .          .           	// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ 
    293         40ms       40ms           	return uint32((uint64(cheaprand()) * uint64(n)) >> 32)                                                       mp.cheaprand += 0xa0761d6478bd642f                                   rand.go:235            hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db)  rand.go:236
    294            .          .           } 
    295            .          .            
    296            .          .           // Too much legacy code has go:linkname references 
    297            .          .           // to runtime.fastrand and friends, so keep these around for now. 
    298            .          .           // Code should migrate to math/rand/v2.Uint64, 

runtime.(*gcControllerState).update

/usr/lib/go/src/runtime/mgcpacer.go

  Total:        70ms       70ms (flat, cum)  0.15%
    890            .          .           	default: 
    891            .          .           		throw("markWorkerStop: unknown mark worker mode") 
    892            .          .           	} 
    893            .          .           } 
    894            .          .            
    895         10ms       10ms           func (c *gcControllerState) update(dHeapLive, dHeapScan int64) { 
    896         20ms       20ms           	if dHeapLive != 0 { 
    897         10ms       10ms           		trace := traceAcquire()                                                               if !traceEnabled() {                                         traceruntime.go:188
                                                                  return trace.enabled                                     traceruntime.go:151

    898         30ms       30ms           		live := gcController.heapLive.Add(dHeapLive)                                                               return Xadd64(&u.value, delta)                               types.go:344

    899            .          .           		if trace.ok() { 
    900            .          .           			// gcController.heapLive changed. 
    901            .          .           			trace.HeapAlloc(live) 
    902            .          .           			traceRelease(trace) 
    903            .          .           		} 

runtime.(*gcControllerState).addScannableStack

/usr/lib/go/src/runtime/mgcpacer.go

  Total:        80ms       80ms (flat, cum)  0.17%
    913            .          .           		c.revise() 
    914            .          .           	} 
    915            .          .           } 
    916            .          .            
    917            .          .           func (c *gcControllerState) addScannableStack(pp *p, amount int64) { 
    918         10ms       10ms           	if pp == nil { 
    919            .          .           		c.maxStackScan.Add(amount) 
    920            .          .           		return 
    921            .          .           	} 
    922            .          .           	pp.maxStackScanDelta += amount 
    923            .          .           	if pp.maxStackScanDelta >= maxStackScanSlack || pp.maxStackScanDelta <= -maxStackScanSlack { 
    924         70ms       70ms           		c.maxStackScan.Add(pp.maxStackScanDelta)                                                               return Xadd64(&u.value, delta)                               types.go:344
                                     ⋮
                                     ⋮

    925            .          .           		pp.maxStackScanDelta = 0 
    926            .          .           	} 
    927            .          .           } 
    928            .          .            
    929            .          .           func (c *gcControllerState) addGlobals(amount int64) { 

runtime.(*gcControllerState).heapGoalInternal

/usr/lib/go/src/runtime/mgcpacer.go

  Total:        10ms       10ms (flat, cum) 0.021%
    940            .          .           // information that is necessary for computing the trigger. 
    941            .          .           // 
    942            .          .           // The returned minTrigger is always <= goal. 
    943            .          .           func (c *gcControllerState) heapGoalInternal() (goal, minTrigger uint64) { 
    944            .          .           	// Start with the goal calculated for gcPercent. 
    945         10ms       10ms           	goal = c.gcPercentHeapGoal.Load()                                                       return Load64(&u.value)                                              types.go:309

    946            .          .            
    947            .          .           	// Check if the memory-limit-based goal is smaller, and if so, pick that. 
    948            .          .           	if newGoal := c.memoryLimitHeapGoal(); newGoal < goal { 
    949            .          .           		goal = newGoal 
    950            .          .           	} else { 

runtime.(*gcControllerState).trigger

/usr/lib/go/src/runtime/mgcpacer.go

  Total:        20ms       30ms (flat, cum) 0.063%
   1126            .          .           // 
   1127            .          .           // The returned value may be compared against heapLive to determine whether 
   1128            .          .           // the GC should trigger. Thus, the GC trigger condition should be (but may 
   1129            .          .           // not be, in the case of small movements for efficiency) checked whenever 
   1130            .          .           // the heap goal may change. 
   1131         20ms       20ms           func (c *gcControllerState) trigger() (uint64, uint64) { 
   1132            .       10ms           	goal, minTrigger := c.heapGoalInternal() 
   1133            .          .            
   1134            .          .           	// Invariant: the trigger must always be less than the heap goal. 
   1135            .          .           	// 
   1136            .          .           	// Note that the memory limit sets a hard maximum on our heap goal, 
   1137            .          .           	// but the live heap may grow beyond it. 

runtime.(*gcControllerState).trigger

/usr/lib/go/src/runtime/mgcpacer.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1192            .          .           	if trigger > goal { 
   1193            .          .           		print("trigger=", trigger, " heapGoal=", goal, "\n") 
   1194            .          .           		print("minTrigger=", minTrigger, " maxTrigger=", maxTrigger, "\n") 
   1195            .          .           		throw("produced a trigger greater than the heap goal") 
   1196            .          .           	} 
   1197         20ms       20ms           	return trigger, goal 
   1198            .          .           } 
   1199            .          .            
   1200            .          .           // commit recomputes all pacing parameters needed to derive the 
   1201            .          .           // trigger and the heap goal. Namely, the gcPercent-based heap goal, 
   1202            .          .           // and the amount of runway we want to give the GC this cycle. 

runtime.(*mheap).nextSpanForSweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       70ms (flat, cum)  0.15%
     92            .          .            
     93            .          .           // nextSpanForSweep finds and pops the next span for sweeping from the 
     94            .          .           // central sweep buffers. It returns ownership of the span to the caller. 
     95            .          .           // Returns nil if no such span exists. 
     96            .          .           func (h *mheap) nextSpanForSweep() *mspan { 
     97         10ms       10ms           	sg := h.sweepgen 
     98            .          .           	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ { 
     99            .          .           		spc, full := sc.split() 
    100            .          .           		c := &h.central[spc].mcentral 
    101            .          .           		var s *mspan 
    102            .          .           		if full { 
    103            .       60ms           			s = c.fullUnswept(sg).pop() 
    104            .          .           		} else { 
    105            .          .           			s = c.partialUnswept(sg).pop() 
    106            .          .           		} 
    107            .          .           		if s != nil { 
    108            .          .           			// Write down that we found something so future sweepers 

runtime.(*activeSweep).begin

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms       20ms (flat, cum) 0.042%
    146            .          .           // 
    147            .          .           // Even if the sweepLocker is invalid, its sweepGen is always valid. 
    148            .          .           func (a *activeSweep) begin() sweepLocker { 
    149            .          .           	for { 
    150            .          .           		state := a.state.Load() 
    151         20ms       20ms           		if state&sweepDrainedMask != 0 { 
    152            .          .           			return sweepLocker{mheap_.sweepgen, false} 
    153            .          .           		} 
    154            .          .           		if a.state.CompareAndSwap(state, state+1) { 
    155            .          .           			return sweepLocker{mheap_.sweepgen, true} 
    156            .          .           		} 

runtime.(*activeSweep).end

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms       20ms (flat, cum) 0.042%
    166            .          .           	for { 
    167            .          .           		state := a.state.Load() 
    168            .          .           		if (state&^sweepDrainedMask)-1 >= sweepDrainedMask { 
    169            .          .           			throw("mismatched begin/end of activeSweep") 
    170            .          .           		} 
    171         20ms       20ms           		if a.state.CompareAndSwap(state, state-1) {                                                               return Cas(&u.value, old, new)                               types.go:236

    172            .          .           			if state-1 != sweepDrainedMask { 
    173            .          .           				return 
    174            .          .           			} 
    175            .          .           			// We're the last sweeper, and there's nothing left to sweep. 
    176            .          .           			if debug.gcpacertrace > 0 { 

runtime.(*activeSweep).isDone

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    209            .          .            
    210            .          .           // isDone returns true if all sweep work has been drained and no more 
    211            .          .           // outstanding sweepers exist. That is, when the sweep phase is 
    212            .          .           // completely done. 
    213            .          .           func (a *activeSweep) isDone() bool { 
    214         10ms       10ms           	return a.state.Load() == sweepDrainedMask 
    215            .          .           } 
    216            .          .            
    217            .          .           // reset sets up the activeSweep for the next sweep cycle. 
    218            .          .           // 
    219            .          .           // The world must be stopped. 

runtime.bgsweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0      490ms (flat, cum)  1.03%
    295            .          .           		// isn't spare idle time available on other cores. If there's available idle 
    296            .          .           		// time, helping to sweep can reduce allocation latencies by getting ahead of 
    297            .          .           		// the proportional sweeper and having spans ready to go for allocation. 
    298            .          .           		const sweepBatchSize = 10 
    299            .          .           		nSwept := 0 
    300            .      490ms           		for sweepone() != ^uintptr(0) { 
    301            .          .           			nSwept++ 
    302            .          .           			if nSwept%sweepBatchSize == 0 { 
    303            .          .           				goschedIfBusy() 
    304            .          .           			} 
    305            .          .           		} 

runtime.(*sweepLocker).tryAcquire

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        30ms       30ms (flat, cum) 0.063%
    341            .          .           func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) { 
    342            .          .           	if !l.valid { 
    343            .          .           		throw("use of invalid sweepLocker") 
    344            .          .           	} 
    345            .          .           	// Check before attempting to CAS. 
    346         10ms       10ms           	if atomic.Load(&s.sweepgen) != l.sweepGen-2 { 
    347            .          .           		return sweepLocked{}, false 
    348            .          .           	} 
    349            .          .           	// Attempt to acquire sweep ownership of s. 
    350         10ms       10ms           	if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) { 
    351            .          .           		return sweepLocked{}, false 
    352            .          .           	} 
    353         10ms       10ms           	return sweepLocked{s}, true 
    354            .          .           } 
    355            .          .            
    356            .          .           // sweepone sweeps some unswept heap span and returns the number of pages returned 
    357            .          .           // to the heap, or ^uintptr(0) if there was nothing to sweep. 
    358            .          .           func sweepone() uintptr { 

runtime.sweepone

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        40ms      750ms (flat, cum)  1.57%
    372            .          .            
    373            .          .           	// Find a span to sweep. 
    374            .          .           	npages := ^uintptr(0) 
    375            .          .           	var noMoreWork bool 
    376            .          .           	for { 
    377            .       70ms           		s := mheap_.nextSpanForSweep() 
    378            .          .           		if s == nil { 
    379            .          .           			noMoreWork = sweep.active.markDrained() 
    380            .          .           			break 
    381            .          .           		} 
    382         20ms       20ms           		if state := s.state.get(); state != mSpanInUse { 
    383            .          .           			// This can happen if direct sweeping already 
    384            .          .           			// swept this span, but in that case the sweep 
    385            .          .           			// generation should always be up-to-date. 
    386            .          .           			if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) { 
    387            .          .           				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n") 
    388            .          .           				throw("non in-use span in unswept list") 
    389            .          .           			} 
    390            .          .           			continue 
    391            .          .           		} 
    392            .       30ms           		if s, ok := sl.tryAcquire(s); ok { 
    393            .          .           			// Sweep the span we found. 
    394            .          .           			npages = s.npages 
    395            .      590ms           			if s.sweep(false) { 
    396            .          .           				// Whole span was freed. Count it toward the 
    397            .          .           				// page reclaimer credit since these pages can 
    398            .          .           				// now be used for span allocation. 
    399         20ms       20ms           				mheap_.reclaimCredit.Add(npages)                                                                               return Xadduintptr(&u.value, delta)          types.go:420

    400            .          .           			} else { 
    401            .          .           				// Span is still in-use, so this returned no 
    402            .          .           				// pages to the heap and the span needs to 
    403            .          .           				// move to the swept in-use list. 
    404            .          .           				npages = 0 
    405            .          .           			} 
    406            .          .           			break 
    407            .          .           		} 
    408            .          .           	} 
    409            .       20ms           	sweep.active.end(sl) 
    410            .          .            
    411            .          .           	if noMoreWork { 
    412            .          .           		// The sweep list is empty. There may still be 
    413            .          .           		// concurrent sweeps running, but we're at least very 
    414            .          .           		// close to done sweeping. 

runtime.isSweepDone

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    453            .          .           // Note that this condition may transition from false to true at any 
    454            .          .           // time as the sweeper runs. It may transition from true to false if a 
    455            .          .           // GC runs; to prevent that the caller must be non-preemptible or must 
    456            .          .           // somehow block GC progress. 
    457            .          .           func isSweepDone() bool { 
    458         10ms       10ms           	return sweep.active.isDone()                                                       return a.state.Load() == sweepDrainedMask                            mgcsweep.go:214

    459            .          .           } 
    460            .          .            
    461            .          .           // Returns only when span s has been swept. 
    462            .          .           // 
    463            .          .           //go:nowritebarrier 

runtime.(*mspan).ensureSwept

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms       20ms (flat, cum) 0.042%
    471            .          .           	} 
    472            .          .            
    473            .          .           	// If this operation fails, then that means that there are 
    474            .          .           	// no more spans to be swept. In this case, either s has already 
    475            .          .           	// been swept, or is about to be acquired for sweeping and swept. 
    476         20ms       20ms           	sl := sweep.active.begin()                                                       if state&sweepDrainedMask != 0 {                                     mgcsweep.go:151

    477            .          .           	if sl.valid { 
    478            .          .           		// The caller must be sure that the span is a mSpanInUse span. 
    479            .          .           		if s, ok := sl.tryAcquire(s); ok { 
    480            .          .           			s.sweep(false) 
    481            .          .           			sweep.active.end(sl) 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    551            .          .           	// Both 1 and 2 are possible at the same time. 
    552            .          .           	hadSpecials := s.specials != nil 
    553            .          .           	siter := newSpecialsIter(s) 
    554            .          .           	for siter.valid() { 
    555            .          .           		// A finalizer can be set for an inner byte of an object, find object beginning. 
    556         10ms       10ms           		objIndex := uintptr(siter.s.offset) / size 
    557            .          .           		p := s.base() + objIndex*size 
    558            .          .           		mbits := s.markBitsForIndex(objIndex) 
    559            .          .           		if !mbits.isMarked() { 
    560            .          .           			// This object is not marked and has at least one special record. 
    561            .          .           			// Pass 1: see if it has a finalizer. 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0       60ms (flat, cum)  0.13%
    650            .          .           		} 
    651            .          .           	} 
    652            .          .            
    653            .          .           	// Copy over and clear the inline mark bits if necessary. 
    654            .          .           	if gcUsesSpanInlineMarkBits(s.elemsize) { 
    655            .       60ms           		s.moveInlineMarks(s.gcmarkBits) 
    656            .          .           	} 
    657            .          .            
    658            .          .           	// Check for zombie objects. 
    659            .          .           	if s.freeindex < s.nelems { 
    660            .          .           		// Everything < freeindex is allocated and hence 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       60ms (flat, cum)  0.13%
    675            .          .           	} 
    676            .          .            
    677            .          .           	// Count the number of free objects in this span. 
    678            .          .           	nalloc := uint16(s.countAlloc()) 
    679            .          .           	nfreed := s.allocCount - nalloc 
    680         10ms       10ms           	if nalloc > s.allocCount { 
    681            .          .           		// The zombie check above should have caught this in 
    682            .          .           		// more detail. 
    683            .          .           		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") 
    684            .          .           		throw("sweep increased allocation count") 
    685            .          .           	} 
    686            .          .            
    687            .          .           	s.allocCount = nalloc 
    688            .          .           	s.freeindex = 0 // reset allocation index to start of span. 
    689            .          .           	s.freeIndexForScan = 0 
    690            .          .           	if traceEnabled() { 
    691            .          .           		getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize 
    692            .          .           	} 
    693            .          .            
    694            .          .           	// gcmarkBits becomes the allocBits. 
    695            .          .           	// get a fresh cleared gcmarkBits in preparation for next GC 
    696            .          .           	s.allocBits = s.gcmarkBits 
    697            .       50ms           	s.gcmarkBits = newMarkBits(uintptr(s.nelems)) 
    698            .          .            
    699            .          .           	// refresh pinnerBits if they exists 
    700            .          .           	if s.pinnerBits != nil { 
    701            .          .           		s.refreshPinnerBits() 
    702            .          .           	} 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        30ms      460ms (flat, cum)  0.96%
    768            .          .           			// objects, because a fresh span that had been allocated into, 
    769            .          .           			// wasn't totally filled, but then swept, still has all of its 
    770            .          .           			// free slots zeroed. 
    771            .          .           			s.needzero = 1 
    772            .          .           			stats := memstats.heapStats.acquire() 
    773         10ms       10ms           			atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed)) 
    774            .       10ms           			memstats.heapStats.release() 
    775            .          .            
    776            .          .           			// Count the frees in the inconsistent, internal stats. 
    777         20ms       20ms           			gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))                                                                       return Xadd64(&u.value, delta)                       types.go:344

    778            .          .           		} 
    779            .          .           		if !preserve { 
    780            .          .           			// The caller may not have removed this span from whatever 
    781            .          .           			// unswept set its on but taken ownership of the span for 
    782            .          .           			// sweeping by updating sweepgen. If this span still is in 
    783            .          .           			// an unswept set, then the mcentral will pop it off the 
    784            .          .           			// set, check its sweepgen, and ignore it. 
    785            .          .           			if nalloc == 0 { 
    786            .          .           				// Free totally free span directly back to the heap. 
    787            .      420ms           				mheap_.freeSpan(s)                                                                               systemstack(func() {                         mheap.go:1633

    788            .          .           				return true 
    789            .          .           			} 
    790            .          .           			// Return span back to the right mcentral list. 
    791            .          .           			if nalloc == s.nelems { 
    792            .          .           				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s) 

runtime.deductSweepCredit

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms       30ms (flat, cum) 0.063%
    907            .          .           // It uses statistics gathered by the garbage collector to perform 
    908            .          .           // enough sweeping so that all pages are swept during the concurrent 
    909            .          .           // sweep phase between GC cycles. 
    910            .          .           // 
    911            .          .           // mheap_ must NOT be locked. 
    912         10ms       20ms           func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { 
    913            .          .           	if mheap_.sweepPagesPerByte == 0 { 
    914            .          .           		// Proportional sweep is done or disabled. 
    915            .          .           		return 
    916            .          .           	} 
    917            .          .            
    918            .          .           	trace := traceAcquire() 
    919            .          .           	if trace.ok() { 
    920            .          .           		trace.GCSweepStart() 
    921            .          .           		traceRelease(trace) 
    922            .          .           	} 
    923            .          .            
    924            .          .           	// Fix debt if necessary. 
    925            .          .           retry: 
    926            .          .           	sweptBasis := mheap_.pagesSweptBasis.Load() 
    927            .          .           	live := gcController.heapLive.Load() 
    928         10ms       10ms           	liveBasis := mheap_.sweepHeapLiveBasis 
    929            .          .           	newHeapLive := spanBytes 
    930            .          .           	if liveBasis < live { 
    931            .          .           		// Only do this subtraction when we don't overflow. Otherwise, pagesTarget 
    932            .          .           		// might be computed as something really huge, causing us to get stuck 
    933            .          .           		// sweeping here until the next mark phase. 

runtime.memequal

/usr/lib/go/src/internal/bytealg/equal_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
     27            .          .           	CMP     R0, R1 
     28            .          .           	BEQ     equal 
     29            .          .           	CMP	$1, R2 
     30            .          .           	// handle 1-byte special case for better performance 
     31            .          .           	BEQ	one 
     32         10ms       10ms           	CMP	$16, R2 
     33            .          .           	// handle specially if length < 16 
     34            .          .           	BLO	tail 
     35            .          .           	BIC	$0x3f, R2, R3 
     36            .          .           	CBZ	R3, chunk16 
     37            .          .           	// work with 64-byte chunks 

runtime.memequal

/usr/lib/go/src/internal/bytealg/equal_arm64.s

  Total:       190ms      190ms (flat, cum)   0.4%
     59            .          .           	BIC	$0xf, R2, R3 
     60            .          .           	CBZ	R3, tail 
     61            .          .           	ADD	R3, R0, R6	// end of chunks 
     62            .          .           chunk16_loop: 
     63            .          .           	LDP.P	16(R0), (R4, R5) 
     64         10ms       10ms           	LDP.P	16(R1), (R7, R9) 
     65            .          .           	EOR	R4, R7 
     66            .          .           	CBNZ	R7, not_equal 
     67            .          .           	EOR	R5, R9 
     68            .          .           	CBNZ	R9, not_equal 
     69            .          .           	CMP	R0, R6 
     70            .          .           	BNE	chunk16_loop 
     71            .          .           	AND	$0xf, R2, R2 
     72            .          .           	CBZ	R2, equal 
     73            .          .           tail: 
     74            .          .           	// special compare of tail with length < 16 
     75            .          .           	TBZ	$3, R2, lt_8 
     76            .          .           	MOVD	(R0), R4 
     77            .          .           	MOVD	(R1), R5 
     78         10ms       10ms           	EOR	R4, R5 
     79            .          .           	CBNZ	R5, not_equal 
     80            .          .           	SUB	$8, R2, R6	// offset of the last 8 bytes 
     81            .          .           	MOVD	(R0)(R6), R4 
     82            .          .           	MOVD	(R1)(R6), R5 
     83            .          .           	EOR	R4, R5 
     84            .          .           	CBNZ	R5, not_equal 
     85            .          .           	B	equal 
     86            .          .           	PCALIGN	$16 
     87            .          .           lt_8: 
     88         10ms       10ms           	TBZ	$2, R2, lt_4 
     89         20ms       20ms           	MOVWU	(R0), R4 
     90            .          .           	MOVWU	(R1), R5 
     91         60ms       60ms           	EOR	R4, R5 
     92            .          .           	CBNZ	R5, not_equal 
     93            .          .           	SUB	$4, R2, R6	// offset of the last 4 bytes 
     94            .          .           	MOVWU	(R0)(R6), R4 
     95            .          .           	MOVWU	(R1)(R6), R5 
     96            .          .           	EOR	R4, R5 
     97            .          .           	CBNZ	R5, not_equal 
     98            .          .           	B	equal 
     99            .          .           	PCALIGN	$16 
    100            .          .           lt_4: 
    101            .          .           	TBZ	$1, R2, lt_2 
    102            .          .           	MOVHU.P	2(R0), R4 
    103         50ms       50ms           	MOVHU.P	2(R1), R5 
    104         30ms       30ms           	CMP	R4, R5 
    105            .          .           	BNE	not_equal 
    106            .          .           lt_2: 
    107            .          .           	TBZ	$0, R2, equal 
    108            .          .           one: 
    109            .          .           	MOVBU	(R0), R4 

git.urbach.dev/cli/q/src/codegen.(*Function).fixRegisterConflicts

/home/user/q/src/codegen/fixRegisterConflicts.go

  Total:       190ms      480ms (flat, cum)  1.00%
     11            .          .           ) 
     12            .          .            
     13            .          .           // fixRegisterConflicts checks for conflicts where 2 values that are live at the same time use the same register. 
     14            .          .           // It then assigns a new register to the value that was defined earlier. 
     15            .          .           func (f *Function) fixRegisterConflicts() { 
     16         20ms       20ms           	for _, step := range f.Steps { 
     17            .          .           		var clobbered []cpu.Register 
     18            .          .            
     19         30ms       30ms           		switch instr := step.Value.(type) { 
     20         10ms       10ms           		case *ssa.BinaryOp: 
     21            .          .           			switch instr.Op { 
     22            .          .           			case token.Div, token.Mod: 
     23            .          .           				clobbered = f.CPU.DivisionClobbered 
     24            .          .           			case token.Shl, token.Shr: 
     25            .          .           				clobbered = f.CPU.ShiftRestricted 
     26            .          .            
     27            .          .           				if slices.Contains(f.CPU.ShiftRestricted, step.Register) { 
     28            .          .           					f.assignFreeRegister(step) 
     29            .          .           				} 
     30            .          .           			} 
     31            .          .            
     32            .          .           			if step.Register != -1 { 
     33            .       40ms           				right := f.ValueToStep[instr.Right] 
     34            .          .            
     35            .          .           				if step.Register == right.Register { 
     36            .          .           					f.assignFreeRegister(right) 
     37            .          .           				} 
     38            .          .            
     39            .          .           				left := f.ValueToStep[instr.Left] 
     40            .          .            
     41            .          .           				if instr.Op == token.Mod && step.Register == left.Register { 
     42            .          .           					f.assignFreeRegister(left) 
     43            .          .           				} 
     44            .          .           			} 
     45         10ms       10ms           		case *ssa.Call: 
     46            .          .           			clobbered = f.CPU.Call.Clobbered 
     47            .          .           		case *ssa.CallExtern: 
     48            .          .           			clobbered = f.CPU.ExternCall.Clobbered 
     49            .          .           		case *ssa.Register: 
     50            .          .           			if f.build.Arch == config.ARM && step.Register == arm.SP { 
     51            .          .           				f.assignFreeRegister(step) 
     52            .          .           			} 
     53            .          .           		case *ssa.Syscall: 
     54            .          .           			clobbered = f.CPU.Syscall.Clobbered 
     55            .          .           		} 
     56            .          .            
     57         20ms       20ms           		for i, live := range step.Live { 
     58            .          .           			if live.Register == -1 { 
     59            .          .           				continue 
     60            .          .           			} 
     61            .          .            
     62         40ms       40ms           			if live.Value != step.Value && slices.Contains(clobbered, live.Register) {                                                                       return Index(s, v) >= 0                              slices.go:118
                                                                          if v == s[i] {                                   slices.go:98
                                     ⋮
                                     ⋮
                                     ⋮

     63            .      100ms           				f.assignFreeRegister(live)                                                                               step.Register = f.findFreeRegister(step)     assignFreeRegister.go:6

     64            .          .           				continue 
     65            .          .           			} 
     66            .          .            
     67         10ms       10ms           			for _, previous := range step.Live[:i] { 
     68         30ms       30ms           				if previous.Register == -1 { 
     69            .          .           					continue 
     70            .          .           				} 
     71            .          .            
     72         20ms       20ms           				if previous.Register != live.Register { 
     73            .          .           					continue 
     74            .          .           				} 
     75            .          .            
     76            .          .           				if previous.Index < live.Index { 
     77            .          .           					f.assignFreeRegister(previous) 
     78            .          .           				} else { 
     79            .      150ms           					f.assignFreeRegister(live)                                                                                       step.Register = f.findFreeRegister(step) assignFreeRegister.go:6

     80            .          .           					break 
     81            .          .           				} 
     82            .          .           			} 
     83            .          .           		} 
     84            .          .           	}