pprof

tests.test cpu
File: tests.test
Build ID: e7126a48dfa644bc13d3357e84708130593b5567
Type: cpu
Time: 2025-12-15 10:10:39 UTC
Duration: 18.82s, Total samples = 47.62s (253.01%)
Save options as
Delete config

internal/runtime/syscall.Syscall6

/usr/lib/go/src/internal/runtime/syscall/asm_linux_arm64.s

  Total:       6.40s      6.40s (flat, cum) 13.44%
     12            .          .           	MOVD	a3+24(FP), R2 
     13            .          .           	MOVD	a4+32(FP), R3 
     14            .          .           	MOVD	a5+40(FP), R4 
     15            .          .           	MOVD	a6+48(FP), R5 
     16            .          .           	SVC 
     17        6.39s      6.39s           	CMN	$4095, R0 
     18         10ms       10ms           	BCC	ok 
     19            .          .           	MOVD	$-1, R4 
     20            .          .           	MOVD	R4, r1+56(FP) 
     21            .          .           	MOVD	ZR, r2+64(FP) 
     22            .          .           	NEG	R0, R0 
     23            .          .           	MOVD	R0, errno+72(FP) 

runtime.usleep

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       160ms      160ms (flat, cum)  0.34%
    117            .          .           	MOVW	$SYS_pipe2, R8 
    118            .          .           	SVC 
    119            .          .           	MOVW	R0, errno+16(FP) 
    120            .          .           	RET 
    121            .          .            
    122         10ms       10ms           TEXT runtime·usleep(SB),NOSPLIT,$24-4 
    123            .          .           	MOVWU	usec+0(FP), R3 
    124            .          .           	MOVD	R3, R5 
    125            .          .           	MOVW	$1000000, R4 
    126            .          .           	UDIV	R4, R3 
    127            .          .           	MOVD	R3, 8(RSP) 
    128            .          .           	MUL	R3, R4 
    129            .          .           	SUB	R4, R5 
    130            .          .           	MOVW	$1000, R4 
    131            .          .           	MUL	R4, R5 
    132            .          .           	MOVD	R5, 16(RSP) 
    133            .          .            
    134            .          .           	// nanosleep(&ts, 0) 
    135            .          .           	ADD	$8, RSP, R0 
    136            .          .           	MOVD	$0, R1 
    137            .          .           	MOVD	$SYS_nanosleep, R8 
    138            .          .           	SVC 
    139        150ms      150ms           	RET 
    140            .          .            
    141            .          .           TEXT runtime·gettid(SB),NOSPLIT,$0-4 
    142            .          .           	MOVD	$SYS_gettid, R8 
    143            .          .           	SVC 
    144            .          .           	MOVW	R0, ret+0(FP) 

runtime.madvise

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:        50ms       50ms (flat, cum)   0.1%
    633            .          .           	MOVD	addr+0(FP), R0 
    634            .          .           	MOVD	n+8(FP), R1 
    635            .          .           	MOVW	flags+16(FP), R2 
    636            .          .           	MOVD	$SYS_madvise, R8 
    637            .          .           	SVC 
    638         50ms       50ms           	MOVW	R0, ret+24(FP) 
    639            .          .           	RET 
    640            .          .            
    641            .          .           // int64 futex(int32 *uaddr, int32 op, int32 val, 

runtime.futex

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       4.32s      4.32s (flat, cum)  9.07%
    642            .          .           //	struct timespec *timeout, int32 *uaddr2, int32 val2); 
    643            .          .           TEXT runtime·futex(SB),NOSPLIT|NOFRAME,$0 
    644            .          .           	MOVD	addr+0(FP), R0 
    645         10ms       10ms           	MOVW	op+8(FP), R1 
    646            .          .           	MOVW	val+12(FP), R2 
    647            .          .           	MOVD	ts+16(FP), R3 
    648            .          .           	MOVD	addr2+24(FP), R4 
    649            .          .           	MOVW	val3+32(FP), R5 
    650            .          .           	MOVD	$SYS_futex, R8 
    651            .          .           	SVC 
    652        4.30s      4.30s           	MOVW	R0, ret+40(FP) 
    653         10ms       10ms           	RET 
    654            .          .            
    655            .          .           // int64 clone(int32 flags, void *stk, M *mp, G *gp, void (*fn)(void)); 
    656            .          .           TEXT runtime·clone(SB),NOSPLIT|NOFRAME,$0 
    657            .          .           	MOVW	flags+0(FP), R0 
    658            .          .           	MOVD	stk+8(FP), R1 

runtime.osyield

/usr/lib/go/src/runtime/sys_linux_arm64.s

  Total:       200ms      200ms (flat, cum)  0.42%
    732            .          .           	MOVD	R0, (R0)	// crash 
    733            .          .           ok: 
    734            .          .           	RET 
    735            .          .            
    736            .          .           TEXT runtime·osyield(SB),NOSPLIT|NOFRAME,$0 
    737         10ms       10ms           	MOVD	$SYS_sched_yield, R8 
    738            .          .           	SVC 
    739        190ms      190ms           	RET 
    740            .          .            
    741            .          .           TEXT runtime·sched_getaffinity(SB),NOSPLIT|NOFRAME,$0 
    742            .          .           	MOVD	pid+0(FP), R0 
    743            .          .           	MOVD	len+8(FP), R1 
    744            .          .           	MOVD	buf+16(FP), R2 

runtime.(*moduledata).textAddr

/usr/lib/go/src/runtime/symtab.go

  Total:       320ms      320ms (flat, cum)  0.67%
    683            .          .           // relocated baseaddr to compute the function address. 
    684            .          .           // 
    685            .          .           // It is nosplit because it is part of the findfunc implementation. 
    686            .          .           // 
    687            .          .           //go:nosplit 
    688         30ms       30ms           func (md *moduledata) textAddr(off32 uint32) uintptr { 
    689         10ms       10ms           	off := uintptr(off32) 
    690        110ms      110ms           	res := md.text + off 
    691         50ms       50ms           	if len(md.textsectmap) > 1 { 
    692            .          .           		for i, sect := range md.textsectmap { 
    693            .          .           			// For the last section, include the end address (etext), as it is included in the functab. 
    694            .          .           			if off >= sect.vaddr && off < sect.end || (i == len(md.textsectmap)-1 && off == sect.end) { 
    695            .          .           				res = sect.baseaddr + off - sect.vaddr 
    696            .          .           				break 
    697            .          .           			} 
    698            .          .           		} 
    699            .          .           		if res > md.etext && GOARCH != "wasm" { // on wasm, functions do not live in the same address space as the linear memory 
    700            .          .           			println("runtime: textAddr", hex(res), "out of range", hex(md.text), "-", hex(md.etext)) 
    701            .          .           			throw("runtime: text offset out of range") 
    702            .          .           		} 
    703            .          .           	} 
    704            .          .           	if GOARCH == "wasm" { 
    705            .          .           		// On Wasm, a text offset (e.g. in the method table) is function index, whereas 
    706            .          .           		// the "PC" is function index << 16 + block index. 
    707            .          .           		res <<= 16 
    708            .          .           	} 
    709        120ms      120ms           	return res 
    710            .          .           } 
    711            .          .            
    712            .          .           // textOff is the opposite of textAddr. It converts a PC to a (virtual) offset 
    713            .          .           // to md.text, and returns if the PC is in any Go text section. 
    714            .          .           // 

runtime.(*moduledata).funcName

/usr/lib/go/src/runtime/symtab.go

  Total:           0       40ms (flat, cum) 0.084%
    749            .          .           // funcName returns the string at nameOff in the function name table. 
    750            .          .           func (md *moduledata) funcName(nameOff int32) string { 
    751            .          .           	if nameOff == 0 { 
    752            .          .           		return "" 
    753            .          .           	} 
    754            .       40ms           	return gostringnocopy(&md.funcnametab[nameOff])                                                       ss := stringStruct{str: unsafe.Pointer(str), len: findnull(str)}     string.go:538

    755            .          .           } 
    756            .          .            
    757            .          .           // Despite being an exported symbol, 
    758            .          .           // FuncForPC is linknamed by widely used packages. 
    759            .          .           // Notable members of the hall of shame include: 

runtime.findmoduledatap

/usr/lib/go/src/runtime/symtab.go

  Total:       100ms      100ms (flat, cum)  0.21%
    854            .          .           // implementation. 
    855            .          .           // 
    856            .          .           //go:nosplit 
    857            .          .           func findmoduledatap(pc uintptr) *moduledata { 
    858            .          .           	for datap := &firstmoduledata; datap != nil; datap = datap.next { 
    859        100ms      100ms           		if datap.minpc <= pc && pc < datap.maxpc { 
    860            .          .           			return datap 
    861            .          .           		} 
    862            .          .           	} 
    863            .          .           	return nil 
    864            .          .           } 

runtime.funcInfo.entry

/usr/lib/go/src/runtime/symtab.go

  Total:       240ms      560ms (flat, cum)  1.18%
    889            .          .           //   - github.com/phuslu/log 
    890            .          .           // 
    891            .          .           // Do not remove or change the type signature. 
    892            .          .           // See go.dev/issue/67401. 
    893            .          .           func (f funcInfo) entry() uintptr { 
    894        240ms      560ms           	return f.datap.textAddr(f.entryOff) 
    895            .          .           } 
    896            .          .            
    897            .          .           //go:linkname badFuncInfoEntry runtime.funcInfo.entry 
    898            .          .           func badFuncInfoEntry(funcInfo) uintptr 
    899            .          .            

runtime.findfunc

/usr/lib/go/src/runtime/symtab.go

  Total:       680ms      680ms (flat, cum)  1.43%
    911            .          .           // See go.dev/issue/67401. 
    912            .          .           // 
    913            .          .           //go:nosplit 
    914            .          .           //go:linkname findfunc 
    915            .          .           func findfunc(pc uintptr) funcInfo { 
    916        100ms      100ms           	datap := findmoduledatap(pc)                                                       if datap.minpc <= pc && pc < datap.maxpc {                           symtab.go:859
                                     ⋮
                                     ⋮

    917         10ms       10ms           	if datap == nil { 
    918            .          .           		return funcInfo{} 
    919            .          .           	} 
    920            .          .           	const nsub = uintptr(len(findfuncbucket{}.subbuckets)) 
    921            .          .            
    922         10ms       10ms           	pcOff, ok := datap.textOff(pc) 
    923            .          .           	if !ok { 
    924            .          .           		return funcInfo{} 
    925            .          .           	} 
    926            .          .            
    927         40ms       40ms           	x := uintptr(pcOff) + datap.text - datap.minpc // TODO: are datap.text and datap.minpc always equal? 
    928            .          .           	if GOARCH == "wasm" { 
    929            .          .           		// On Wasm, pcOff is the function index, whereas 
    930            .          .           		// the "PC" is function index << 16 + block index. 
    931            .          .           		x = uintptr(pcOff)<<16 + datap.text - datap.minpc 
    932            .          .           	} 
    933            .          .           	b := x / abi.FuncTabBucketSize 
    934            .          .           	i := x % abi.FuncTabBucketSize / (abi.FuncTabBucketSize / nsub) 
    935            .          .            
    936         10ms       10ms           	ffb := (*findfuncbucket)(add(unsafe.Pointer(datap.findfunctab), b*unsafe.Sizeof(findfuncbucket{}))) 
    937        200ms      200ms           	idx := ffb.idx + uint32(ffb.subbuckets[i]) 
    938            .          .            
    939            .          .           	// Find the ftab entry. 
    940        260ms      260ms           	for datap.ftab[idx+1].entryoff <= pcOff { 
    941            .          .           		idx++ 
    942            .          .           	} 
    943            .          .            
    944         30ms       30ms           	funcoff := datap.ftab[idx].funcoff 
    945         20ms       20ms           	return funcInfo{(*_func)(unsafe.Pointer(&datap.pclntable[funcoff])), datap} 
    946            .          .           } 
    947            .          .            
    948            .          .           // A srcFunc represents a logical function in the source code. This may 
    949            .          .           // correspond to an actual symbol in the binary text, or it may correspond to a 
    950            .          .           // source function that has been inlined. 

runtime.pcvalue

/usr/lib/go/src/runtime/symtab.go

  Total:        30ms       30ms (flat, cum) 0.063%
   1000            .          .           func pcvalueCacheKey(targetpc uintptr) uintptr { 
   1001            .          .           	return (targetpc / goarch.PtrSize) % uintptr(len(pcvalueCache{}.entries)) 
   1002            .          .           } 
   1003            .          .            
   1004            .          .           // Returns the PCData value, and the PC where this value starts. 
   1005         30ms       30ms           func pcvalue(f funcInfo, off uint32, targetpc uintptr, strict bool) (int32, uintptr) { 
   1006            .          .           	// If true, when we get a cache hit, still look up the data and make sure it 
   1007            .          .           	// matches the cached contents. 
   1008            .          .           	const debugCheckCache = false 
   1009            .          .            
   1010            .          .           	// If true, skip checking the cache entirely. 

runtime.pcvalue

/usr/lib/go/src/runtime/symtab.go

  Total:       1.51s      2.82s (flat, cum)  5.92%
   1025            .          .           		cache := &mp.pcvalueCache 
   1026            .          .           		// The cache can be used by the signal handler on this M. Avoid 
   1027            .          .           		// re-entrant use of the cache. The signal handler can also write inUse, 
   1028            .          .           		// but will always restore its value, so we can use a regular increment 
   1029            .          .           		// even if we get signaled in the middle of it. 
   1030         20ms       20ms           		cache.inUse++ 
   1031            .          .           		if cache.inUse == 1 { 
   1032        190ms      190ms           			for i := range cache.entries[ck] { 
   1033            .          .           				// We check off first because we're more 
   1034            .          .           				// likely to have multiple entries with 
   1035            .          .           				// different offsets for the same targetpc 
   1036            .          .           				// than the other way around, so we'll usually 
   1037            .          .           				// fail in the first clause. 
   1038         20ms       20ms           				ent := &cache.entries[ck][i] 
   1039        180ms      180ms           				if ent.off == off && ent.targetpc == targetpc { 
   1040            .          .           					val, pc := ent.val, ent.valPC 
   1041            .          .           					if debugCheckCache { 
   1042            .          .           						checkVal, checkPC = ent.val, ent.valPC 
   1043            .          .           						break 
   1044            .          .           					} else { 
   1045         10ms       10ms           						cache.inUse-- 
   1046            .          .           						releasem(mp) 
   1047            .          .           						return val, pc 
   1048            .          .           					} 
   1049            .          .           				} 
   1050            .          .           			} 
   1051            .          .           		} else if debugCheckCache && (cache.inUse < 1 || cache.inUse > 2) { 
   1052            .          .           			// Catch accounting errors or deeply reentrant use. In principle 
   1053            .          .           			// "inUse" should never exceed 2. 
   1054            .          .           			throw("cache.inUse out of range") 
   1055            .          .           		} 
   1056            .          .           		cache.inUse-- 
   1057         20ms       20ms           		releasem(mp)                                                               mp.locks--                                                   runtime1.go:637

   1058            .          .           	} 
   1059            .          .            
   1060            .          .           	if !f.valid() { 
   1061            .          .           		if strict && panicking.Load() == 0 { 
   1062            .          .           			println("runtime: no module data for", hex(f.entry())) 
   1063            .          .           			throw("no module data") 
   1064            .          .           		} 
   1065            .          .           		return -1, 0 
   1066            .          .           	} 
   1067            .          .           	datap := f.datap 
   1068         50ms       50ms           	p := datap.pctab[off:] 
   1069            .       90ms           	pc := f.entry()                                                       return f.datap.textAddr(f.entryOff)                                  symtab.go:894

   1070            .          .           	prevpc := pc 
   1071            .          .           	val := int32(-1) 
   1072         20ms       20ms           	for { 
   1073            .          .           		var ok bool 
   1074        420ms      1.64s           		p, ok = step(p, &pc, &val, pc == f.entry())                                      ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮

   1075            .          .           		if !ok { 
   1076            .          .           			break 
   1077            .          .           		} 
   1078        140ms      140ms           		if targetpc < pc { 
   1079            .          .           			// Replace a random entry in the cache. Random 
   1080            .          .           			// replacement prevents a performance cliff if 
   1081            .          .           			// a recursive stack's cycle is slightly 
   1082            .          .           			// larger than the cache. 
   1083            .          .           			// Put the new element at the beginning, 
   1084            .          .           			// since it is the most likely to be newly used. 
   1085            .          .           			if debugCheckCache && checkPC != 0 { 
   1086            .          .           				if checkVal != val || checkPC != prevpc { 
   1087            .          .           					print("runtime: table value ", val, "@", prevpc, " != cache value ", checkVal, "@", checkPC, " at PC ", targetpc, " off ", off, "\n") 
   1088            .          .           					throw("bad pcvalue cache") 
   1089            .          .           				} 
   1090            .          .           			} else { 
   1091         50ms       50ms           				mp := acquirem()                                                                               gp.m.locks++                                 runtime1.go:630
   1092            .          .           				cache := &mp.pcvalueCache 
   1093         10ms       10ms           				cache.inUse++ 
   1094            .          .           				if cache.inUse == 1 { 
   1095            .          .           					e := &cache.entries[ck] 
   1096         80ms       80ms           					ci := cheaprandn(uint32(len(cache.entries[ck])))                                                                                       return uint32((uint64(cheaprand()) * uint64(n)) >> 32) rand.go:293
                                                                                          hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) rand.go:236                                                mp.cheaprand += 0xa0761d6478bd642f rand.go:235

   1097        250ms      250ms           					e[ci] = e[0] 
   1098            .          .           					e[0] = pcvalueCacheEnt{ 
   1099            .          .           						targetpc: targetpc, 
   1100            .          .           						off:      off, 
   1101            .          .           						val:      val, 
   1102            .          .           						valPC:    prevpc, 
   1103            .          .           					} 
   1104            .          .           				} 
   1105         30ms       30ms           				cache.inUse-- 
   1106         20ms       20ms           				releasem(mp)                                                                               if mp.locks == 0 && gp.preempt {             runtime1.go:638

   1107            .          .           			} 
   1108            .          .            
   1109            .          .           			return val, prevpc 
   1110            .          .           		} 
   1111            .          .           		prevpc = pc 

runtime.funcname

/usr/lib/go/src/runtime/symtab.go

  Total:        10ms       50ms (flat, cum)   0.1%
   1137            .          .            
   1138            .          .           func funcname(f funcInfo) string { 
   1139            .          .           	if !f.valid() { 
   1140            .          .           		return "" 
   1141            .          .           	} 
   1142         10ms       50ms           	return f.datap.funcName(f.nameOff) 
   1143            .          .           } 
   1144            .          .            
   1145            .          .           func funcpkgpath(f funcInfo) string { 
   1146            .          .           	name := funcNameForPrint(funcname(f)) 
   1147            .          .           	i := len(name) - 1 

runtime.funcspdelta

/usr/lib/go/src/runtime/symtab.go

  Total:        10ms      1.69s (flat, cum)  3.55%
   1198            .          .           func funcline(f funcInfo, targetpc uintptr) (file string, line int32) { 
   1199            .          .           	return funcline1(f, targetpc, true) 
   1200            .          .           } 
   1201            .          .            
   1202            .          .           func funcspdelta(f funcInfo, targetpc uintptr) int32 { 
   1203         10ms      1.69s           	x, _ := pcvalue(f, f.pcsp, targetpc, true) 
   1204            .          .           	if debugPcln && x&(goarch.PtrSize-1) != 0 { 
   1205            .          .           		print("invalid spdelta ", funcname(f), " ", hex(f.entry()), " ", hex(targetpc), " ", hex(f.pcsp), " ", x, "\n") 
   1206            .          .           		throw("bad spdelta") 
   1207            .          .           	} 
   1208            .          .           	return x 

runtime.funcMaxSPDelta

/usr/lib/go/src/runtime/symtab.go

  Total:        80ms      260ms (flat, cum)  0.55%
   1209            .          .           } 
   1210            .          .            
   1211            .          .           // funcMaxSPDelta returns the maximum spdelta at any point in f. 
   1212            .          .           func funcMaxSPDelta(f funcInfo) int32 { 
   1213            .          .           	datap := f.datap 
   1214         60ms       60ms           	p := datap.pctab[f.pcsp:] 
   1215            .          .           	pc := f.entry() 
   1216            .          .           	val := int32(-1) 
   1217            .          .           	most := int32(0) 
   1218            .          .           	for { 
   1219            .          .           		var ok bool 
   1220         20ms      200ms           		p, ok = step(p, &pc, &val, pc == f.entry())                                                               return f.datap.textAddr(f.entryOff)                          symtab.go:894
                                     ⋮
                                     ⋮
                                     ⋮
                                                              return f.datap.textAddr(f.entryOff)                          symtab.go:894

   1221            .          .           		if !ok { 
   1222            .          .           			return most 
   1223            .          .           		} 
   1224            .          .           		most = max(most, val) 

runtime.pcdatastart

/usr/lib/go/src/runtime/symtab.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1225            .          .           	} 
   1226            .          .           } 
   1227            .          .            
   1228            .          .           func pcdatastart(f funcInfo, table uint32) uint32 { 
   1229         10ms       10ms           	return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) 
   1230            .          .           } 

runtime.pcdatavalue

/usr/lib/go/src/runtime/symtab.go

  Total:        70ms      1.21s (flat, cum)  2.54%
   1231            .          .            
   1232         10ms       10ms           func pcdatavalue(f funcInfo, table uint32, targetpc uintptr) int32 { 
   1233            .          .           	if table >= f.npcdata { 
   1234            .          .           		return -1 
   1235            .          .           	} 
   1236         40ms      1.18s           	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, true)                                      ⋮
                                     ⋮
                                                      return *(*uint32)(add(unsafe.Pointer(&f.nfuncdata), unsafe.Sizeof(f.nfuncdata)+uintptr(table)*4)) symtab.go:1229

   1237         20ms       20ms           	return r 
   1238            .          .           } 
   1239            .          .            
   1240            .          .           func pcdatavalue1(f funcInfo, table uint32, targetpc uintptr, strict bool) int32 { 

runtime.pcdatavalue1

/usr/lib/go/src/runtime/symtab.go

  Total:           0       30ms (flat, cum) 0.063%
   1241            .          .           	if table >= f.npcdata { 
   1242            .          .           		return -1 
   1243            .          .           	} 
   1244            .       30ms           	r, _ := pcvalue(f, pcdatastart(f, table), targetpc, strict) 
   1245            .          .           	return r 
   1246            .          .           } 
   1247            .          .            
   1248            .          .           // Like pcdatavalue, but also return the start PC of this PCData value. 
   1249            .          .           func pcdatavalue2(f funcInfo, table uint32, targetpc uintptr) (int32, uintptr) { 

runtime.funcdata

/usr/lib/go/src/runtime/symtab.go

  Total:       160ms      160ms (flat, cum)  0.34%
   1254            .          .           } 
   1255            .          .            
   1256            .          .           // funcdata returns a pointer to the ith funcdata for f. 
   1257            .          .           // funcdata should be kept in sync with cmd/link:writeFuncs. 
   1258            .          .           func funcdata(f funcInfo, i uint8) unsafe.Pointer { 
   1259         40ms       40ms           	if i < 0 || i >= f.nfuncdata { 
   1260            .          .           		return nil 
   1261            .          .           	} 
   1262         10ms       10ms           	base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses 
   1263         20ms       20ms           	p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4 
   1264            .          .           	off := *(*uint32)(unsafe.Pointer(p)) 
   1265            .          .           	// Return off == ^uint32(0) ? 0 : f.datap.gofunc + uintptr(off), but without branches. 
   1266            .          .           	// The compiler calculates mask on most architectures using conditional assignment. 
   1267            .          .           	var mask uintptr 
   1268         10ms       10ms           	if off == ^uint32(0) { 
   1269            .          .           		mask = 1 
   1270            .          .           	} 
   1271            .          .           	mask-- 
   1272         70ms       70ms           	raw := base + uintptr(off) 
   1273         10ms       10ms           	return unsafe.Pointer(raw & mask) 
   1274            .          .           } 

runtime.step

/usr/lib/go/src/runtime/symtab.go

  Total:       1.20s      1.20s (flat, cum)  2.52%
   1276            .          .           // step advances to the next pc, value pair in the encoded table. 
   1277        110ms      110ms           func step(p []byte, pc *uintptr, val *int32, first bool) (newp []byte, ok bool) { 
   1278            .          .           	// For both uvdelta and pcdelta, the common case (~70%) 
   1279            .          .           	// is that they are a single byte. If so, avoid calling readvarint. 
   1280         20ms       20ms           	uvdelta := uint32(p[0]) 
   1281        580ms      580ms           	if uvdelta == 0 && !first { 
   1282            .          .           		return nil, false 
   1283            .          .           	} 
   1284            .          .           	n := uint32(1) 
   1285         40ms       40ms           	if uvdelta&0x80 != 0 { 
   1286        120ms      120ms           		n, uvdelta = readvarint(p)                                                               v |= uint32(b&0x7F) << (shift & 31)                          symtab.go:1307                    for {                                                        symtab.go:1304                    v |= uint32(b&0x7F) << (shift & 31)                          symtab.go:1307
                                     ⋮
                                     ⋮
                                                              if b&0x80 == 0 {                                             symtab.go:1308                    b := p[n]                                                    symtab.go:1305

   1287            .          .           	} 
   1288         20ms       20ms           	*val += int32(-(uvdelta & 1) ^ (uvdelta >> 1)) 
   1289         40ms       40ms           	p = p[n:] 
   1290            .          .            
   1291         50ms       50ms           	pcdelta := uint32(p[0]) 
   1292            .          .           	n = 1 
   1293         40ms       40ms           	if pcdelta&0x80 != 0 { 
   1294         30ms       30ms           		n, pcdelta = readvarint(p)                                                               v |= uint32(b&0x7F) << (shift & 31)                          symtab.go:1307
                                     ⋮
                                     ⋮
                                                              for {                                                        symtab.go:1304

   1295            .          .           	} 
   1296         80ms       80ms           	p = p[n:] 
   1297         30ms       30ms           	*pc += uintptr(pcdelta * sys.PCQuantum) 
   1298         40ms       40ms           	return p, true 
   1299            .          .           } 
   1300            .          .            

runtime.readvarint

/usr/lib/go/src/runtime/symtab.go

  Total:       150ms      150ms (flat, cum)  0.31%
   1302            .          .           func readvarint(p []byte) (read uint32, val uint32) { 
   1303            .          .           	var v, shift, n uint32 
   1304         50ms       50ms           	for { 
   1305         20ms       20ms           		b := p[n] 
   1306            .          .           		n++ 
   1307         70ms       70ms           		v |= uint32(b&0x7F) << (shift & 31) 
   1308         10ms       10ms           		if b&0x80 == 0 { 
   1309            .          .           			break 
   1310            .          .           		} 
   1311            .          .           		shift += 7 
   1312            .          .           	} 
   1313            .          .           	return n, v 

runtime.stackmapdata

/usr/lib/go/src/runtime/symtab.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1325            .          .           	// The invariant is already checked by many of stackmapdata's callers, 
   1326            .          .           	// and disabling it by default allows stackmapdata to be inlined. 
   1327            .          .           	if stackDebug > 0 && (n < 0 || n >= stkmap.n) { 
   1328            .          .           		throw("stackmapdata: index out of range") 
   1329            .          .           	} 
   1330         10ms       10ms           	return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} 
   1331            .          .           } 

runtime.gopark

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
    445            .          .           		checkTimeouts() // timeouts may expire while two goroutines keep the scheduler busy 
    446            .          .           	} 
    447            .          .           	mp := acquirem() 
    448            .          .           	gp := mp.curg 
    449            .          .           	status := readgstatus(gp) 
    450         20ms       20ms           	if status != _Grunning && status != _Gscanrunning { 
    451            .          .           		throw("gopark: bad g status") 
    452            .          .           	} 
    453            .          .           	mp.waitlock = lock 
    454            .          .           	mp.waitunlockf = unlockf 
    455            .          .           	gp.waitreason = reason 

runtime.goready

/usr/lib/go/src/runtime/proc.go

  Total:           0      720ms (flat, cum)  1.51%
    475            .          .           // Do not remove or change the type signature. 
    476            .          .           // See go.dev/issue/67401. 
    477            .          .           // 
    478            .          .           //go:linkname goready 
    479            .          .           func goready(gp *g, traceskip int) { 
    480            .      720ms           	systemstack(func() { 

runtime.send.goready.func1

/usr/lib/go/src/runtime/proc.go

  Total:           0      720ms (flat, cum)  1.51%
    481            .      720ms           		ready(gp, traceskip, true) 
    482            .          .           	}) 
    483            .          .           } 
    484            .          .            
    485            .          .           //go:nosplit 
    486            .          .           func acquireSudog() *sudog { 

runtime.acquireSudog

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
    509            .          .           			pp.sudogcache = append(pp.sudogcache, new(sudog)) 
    510            .          .           		} 
    511            .          .           	} 
    512            .          .           	n := len(pp.sudogcache) 
    513            .          .           	s := pp.sudogcache[n-1] 
    514         10ms       10ms           	pp.sudogcache[n-1] = nil 
    515            .          .           	pp.sudogcache = pp.sudogcache[:n-1] 
    516         20ms       20ms           	if s.elem != nil { 
    517            .          .           		throw("acquireSudog: found s.elem != nil in cache") 
    518            .          .           	} 
    519            .          .           	releasem(mp) 
    520            .          .           	return s 
    521            .          .           } 

runtime.releaseSudog

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
    544            .          .           	if gp.param != nil { 
    545            .          .           		throw("runtime: releaseSudog with non-nil gp.param") 
    546            .          .           	} 
    547            .          .           	mp := acquirem() // avoid rescheduling to another P 
    548            .          .           	pp := mp.p.ptr() 
    549         10ms       10ms           	if len(pp.sudogcache) == cap(pp.sudogcache) { 
    550            .          .           		// Transfer half of local cache to the central cache. 
    551            .          .           		var first, last *sudog 
    552            .          .           		for len(pp.sudogcache) > cap(pp.sudogcache)/2 { 
    553            .          .           			n := len(pp.sudogcache) 
    554            .          .           			p := pp.sudogcache[n-1] 

runtime.releaseSudog

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
    566            .          .           		sched.sudogcache = first 
    567            .          .           		unlock(&sched.sudoglock) 
    568            .          .           	} 
    569            .          .           	pp.sudogcache = append(pp.sudogcache, s) 
    570            .          .           	releasem(mp) 
    571         20ms       20ms           } 
    572            .          .            
    573            .          .           // called from assembly. 
    574            .          .           func badmcall(fn func(*g)) { 
    575            .          .           	throw("runtime: mcall called on m->g0 stack") 
    576            .          .           } 

runtime.(*m).becomeSpinning

/usr/lib/go/src/runtime/proc.go

  Total:        60ms       60ms (flat, cum)  0.13%
   1067            .          .           //go:linkname pprof_makeProfStack 
   1068            .          .           func pprof_makeProfStack() []uintptr { return makeProfStack() } 
   1069            .          .            
   1070            .          .           func (mp *m) becomeSpinning() { 
   1071            .          .           	mp.spinning = true 
   1072         60ms       60ms           	sched.nmspinning.Add(1)                                                       return Xaddint32(&i.value, delta)                                    types.go:56
   1073            .          .           	sched.needspinning.Store(0) 
   1074            .          .           } 
   1075            .          .            
   1076            .          .           // Take a snapshot of allp, for use after dropping the P. 
   1077            .          .           // 

runtime.(*m).clearAllpSnapshot

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1089            .          .           // no longer required. 
   1090            .          .           // 
   1091            .          .           // Must be called after reacquiring a P, as it requires a write barrier. 
   1092            .          .           // 
   1093            .          .           //go:yeswritebarrierrec 
   1094         20ms       20ms           func (mp *m) clearAllpSnapshot() { 
   1095            .          .           	mp.allpSnapshot = nil 
   1096            .          .           } 
   1097            .          .            
   1098            .          .           func (mp *m) hasCgoOnStack() bool { 
   1099            .          .           	return mp.ncgo > 0 || mp.isextra 

runtime.ready

/usr/lib/go/src/runtime/proc.go

  Total:        60ms      720ms (flat, cum)  1.51%
   1112            .          .           	// low resolution, typically on the order of 1 ms or more. 
   1113            .          .           	osHasLowResClock = osHasLowResClockInt > 0 
   1114            .          .           ) 
   1115            .          .            
   1116            .          .           // Mark gp ready to run. 
   1117         10ms       10ms           func ready(gp *g, traceskip int, next bool) { 
   1118         30ms       30ms           	status := readgstatus(gp)                                                       return gp.atomicstatus.Load()                                        proc.go:1205
                                                          return Load(&u.value)                                            types.go:194

   1119            .          .            
   1120            .          .           	// Mark runnable. 
   1121            .          .           	mp := acquirem() // disable preemption because it can be holding p in a local var 
   1122         10ms       10ms           	if status&^_Gscan != _Gwaiting { 
   1123            .          .           		dumpgstatus(gp) 
   1124            .          .           		throw("bad g->status in ready") 
   1125            .          .           	} 
   1126            .          .            
   1127            .          .           	// status is Gwaiting or Gscanwaiting, make Grunnable and put on runq 
   1128            .          .           	trace := traceAcquire() 
   1129            .       50ms           	casgstatus(gp, _Gwaiting, _Grunnable) 
   1130            .          .           	if trace.ok() { 
   1131            .          .           		trace.GoUnpark(gp, traceskip) 
   1132            .          .           		traceRelease(trace) 
   1133            .          .           	} 
   1134            .       10ms           	runqput(mp.p.ptr(), gp, next) 
   1135            .      600ms           	wakep() 
   1136            .          .           	releasem(mp) 
   1137         10ms       10ms           } 
   1138            .          .            
   1139            .          .           // freezeStopWait is a large value that freezetheworld sets 
   1140            .          .           // sched.stopwait to in order to request that all Gs permanently stop. 
   1141            .          .           const freezeStopWait = 0x7fffffff 
   1142            .          .            

runtime.readgstatus

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   1200            .          .           // All reads and writes of g's status go through readgstatus, casgstatus 
   1201            .          .           // castogscanstatus, casfrom_Gscanstatus. 
   1202            .          .           // 
   1203            .          .           //go:nosplit 
   1204            .          .           func readgstatus(gp *g) uint32 { 
   1205         30ms       30ms           	return gp.atomicstatus.Load()                                                       return Load(&u.value)                                                types.go:194

   1206            .          .           } 
   1207            .          .            
   1208            .          .           // The Gscanstatuses are acting like locks and this releases them. 
   1209            .          .           // If it proves to be a performance hit we should be able to make these 
   1210            .          .           // simple atomic stores but for now we are going to throw if 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:       320ms      320ms (flat, cum)  0.67%
   1265            .          .           // and casfrom_Gscanstatus instead. 
   1266            .          .           // casgstatus will loop if the g->atomicstatus is in a Gscan status until the routine that 
   1267            .          .           // put it in the Gscan state is finished. 
   1268            .          .           // 
   1269            .          .           //go:nosplit 
   1270         10ms       10ms           func casgstatus(gp *g, oldval, newval uint32) { 
   1271            .          .           	if (oldval&_Gscan != 0) || (newval&_Gscan != 0) || oldval == newval { 
   1272            .          .           		systemstack(func() { 
   1273            .          .           			// Call on the systemstack to prevent print and throw from counting 
   1274            .          .           			// against the nosplit stack reservation. 
   1275            .          .           			print("runtime: casgstatus: oldval=", hex(oldval), " newval=", hex(newval), "\n") 
   1276            .          .           			throw("casgstatus: bad incoming values") 
   1277            .          .           		}) 
   1278            .          .           	} 
   1279            .          .            
   1280            .          .           	lockWithRankMayAcquire(nil, lockRankGscan) 
   1281            .          .            
   1282            .          .           	// See https://golang.org/cl/21503 for justification of the yield delay. 
   1283            .          .           	const yieldDelay = 5 * 1000 
   1284            .          .           	var nextYield int64 
   1285            .          .            
   1286            .          .           	// loop if gp->atomicstatus is in a scan state giving 
   1287            .          .           	// GC time to finish and change the state to oldval. 
   1288        310ms      310ms           	for i := 0; !gp.atomicstatus.CompareAndSwap(oldval, newval); i++ {                                                       return Cas(&u.value, old, new)                                       types.go:236
                                     ⋮
                                     ⋮

   1289            .          .           		if oldval == _Gwaiting && gp.atomicstatus.Load() == _Grunnable { 
   1290            .          .           			systemstack(func() { 
   1291            .          .           				// Call on the systemstack to prevent throw from counting 
   1292            .          .           				// against the nosplit stack reservation. 
   1293            .          .           				throw("casgstatus: waiting for Gwaiting but is Grunnable") 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:       130ms      130ms (flat, cum)  0.27%
   1304            .          .           			osyield() 
   1305            .          .           			nextYield = nanotime() + yieldDelay/2 
   1306            .          .           		} 
   1307            .          .           	} 
   1308            .          .            
   1309         80ms       80ms           	if gp.bubble != nil { 
   1310            .          .           		systemstack(func() { 
   1311            .          .           			gp.bubble.changegstatus(gp, oldval, newval) 
   1312            .          .           		}) 
   1313            .          .           	} 
   1314            .          .            
   1315            .          .           	if oldval == _Grunning { 
   1316            .          .           		// Track every gTrackingPeriod time a goroutine transitions out of running. 
   1317         10ms       10ms           		if casgstatusAlwaysTrack || gp.trackingSeq%gTrackingPeriod == 0 { 
   1318            .          .           			gp.tracking = true 
   1319            .          .           		} 
   1320            .          .           		gp.trackingSeq++ 
   1321            .          .           	} 
   1322            .          .           	if !gp.tracking { 
   1323         20ms       20ms           		return 
   1324            .          .           	} 
   1325            .          .            
   1326            .          .           	// Handle various kinds of tracking. 
   1327            .          .           	// 
   1328            .          .           	// Currently: 
   1329            .          .           	// - Time spent in runnable. 
   1330            .          .           	// - Time spent blocked on a sync.Mutex or sync.RWMutex. 
   1331            .          .           	switch oldval { 
   1332            .          .           	case _Grunnable: 
   1333            .          .           		// We transitioned out of runnable, so measure how much 
   1334            .          .           		// time we spent in this state and add it to 
   1335            .          .           		// runnableTime. 
   1336         20ms       20ms           		now := nanotime()                                                               return nanotime1()                                           time_nofake.go:33

   1337            .          .           		gp.runnableTime += now - gp.trackingStamp 
   1338            .          .           		gp.trackingStamp = 0 
   1339            .          .           	case _Gwaiting: 
   1340            .          .           		if !gp.waitreason.isMutexWait() { 
   1341            .          .           			// Not blocking on a lock. 

runtime.casgstatus

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      120ms (flat, cum)  0.25%
   1357            .          .           			break 
   1358            .          .           		} 
   1359            .          .           		// Blocking on a lock. Write down the timestamp. 
   1360            .          .           		now := nanotime() 
   1361            .          .           		gp.trackingStamp = now 
   1362         20ms       20ms           	case _Grunnable: 
   1363            .          .           		// We just transitioned into runnable, so record what 
   1364            .          .           		// time that happened. 
   1365         20ms       20ms           		now := nanotime()                                                               return nanotime1()                                           time_nofake.go:33

   1366            .          .           		gp.trackingStamp = now 
   1367            .          .           	case _Grunning: 
   1368            .          .           		// We're transitioning into running, so turn off 
   1369            .          .           		// tracking and record how much time we spent in 
   1370            .          .           		// runnable. 
   1371            .          .           		gp.tracking = false 
   1372         10ms       80ms           		sched.timeToRun.record(gp.runnableTime) 
   1373            .          .           		gp.runnableTime = 0 
   1374            .          .           	} 
   1375            .          .           } 
   1376            .          .            
   1377            .          .           // casGToWaiting transitions gp from old to _Gwaiting, and sets the wait reason. 

runtime.startTheWorldWithSema

/usr/lib/go/src/runtime/proc.go

  Total:           0       10ms (flat, cum) 0.021%
   1820            .          .           	} 
   1821            .          .            
   1822            .          .           	// Wakeup an additional proc in case we have excessive runnable goroutines 
   1823            .          .           	// in local queues or in the global queue. If we don't, the proc will park itself. 
   1824            .          .           	// If we have lots of excessive work, resetspinning will unpark additional procs as necessary. 
   1825            .       10ms           	wakep() 
   1826            .          .            
   1827            .          .           	releasem(mp) 
   1828            .          .            
   1829            .          .           	return now 
   1830            .          .           } 

runtime.mPark

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.36s (flat, cum)  2.86%
   1969            .          .           // mPark causes a thread to park itself, returning once woken. 
   1970            .          .           // 
   1971            .          .           //go:nosplit 
   1972            .          .           func mPark() { 
   1973            .          .           	gp := getg() 
   1974            .      1.36s           	notesleep(&gp.m.park) 
   1975            .          .           	noteclear(&gp.m.park) 
   1976            .          .           } 
   1977            .          .            
   1978            .          .           // mexit tears down and exits the current thread. 
   1979            .          .           // 

runtime.stopm

/usr/lib/go/src/runtime/proc.go

  Total:        40ms      1.57s (flat, cum)  3.30%
   2995            .          .           // Stops execution of the current m until new work is available. 
   2996            .          .           // Returns with acquired P. 
   2997            .          .           func stopm() { 
   2998            .          .           	gp := getg() 
   2999            .          .            
   3000         10ms       10ms           	if gp.m.locks != 0 { 
   3001            .          .           		throw("stopm holding locks") 
   3002            .          .           	} 
   3003            .          .           	if gp.m.p != 0 { 
   3004            .          .           		throw("stopm holding p") 
   3005            .          .           	} 
   3006            .          .           	if gp.m.spinning { 
   3007            .          .           		throw("stopm spinning") 
   3008            .          .           	} 
   3009            .          .            
   3010            .       50ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3011            .       20ms           	mput(gp.m) 
   3012            .       10ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3013            .      1.36s           	mPark()                                                       notesleep(&gp.m.park)                                                proc.go:1974

   3014         30ms      120ms           	acquirep(gp.m.nextp.ptr()) 
   3015            .          .           	gp.m.nextp = 0 
   3016            .          .           } 
   3017            .          .            
   3018            .          .           func mspinning() { 
   3019            .          .           	// startm's caller incremented nmspinning. Set the new M's spinning. 

runtime.startm

/usr/lib/go/src/runtime/proc.go

  Total:        60ms       70ms (flat, cum)  0.15%
   3054            .          .           	// context, otherwise such preemption could occur on function entry to 
   3055            .          .           	// startm. Callers passing a nil P may be preemptible, so we must 
   3056            .          .           	// disable preemption before acquiring a P from pidleget below. 
   3057            .          .           	mp := acquirem() 
   3058            .          .           	if !lockheld { 
   3059            .       10ms           		lock(&sched.lock)                                                               lockWithRank(l, getLockRank(l))                              lock_spinbit.go:152
                                                                  lock2(l)                                                 lockrank_off.go:24

   3060            .          .           	} 
   3061            .          .           	if pp == nil { 
   3062            .          .           		if spinning { 
   3063            .          .           			// TODO(prattmic): All remaining calls to this function 
   3064            .          .           			// with _p_ == nil could be cleaned up to find a P 
   3065            .          .           			// before calling startm. 
   3066            .          .           			throw("startm: P required for spinning=true") 
   3067            .          .           		} 
   3068            .          .           		pp, _ = pidleget(0) 
   3069            .          .           		if pp == nil { 
   3070            .          .           			if !lockheld { 
   3071            .          .           				unlock(&sched.lock) 
   3072            .          .           			} 
   3073            .          .           			releasem(mp) 
   3074            .          .           			return 
   3075            .          .           		} 
   3076            .          .           	} 
   3077         60ms       60ms           	nmp := mget()                                                       sched.midle = mp.schedlink                                           proc.go:6844

   3078            .          .           	if nmp == nil { 
   3079            .          .           		// No M is available, we must drop sched.lock and call newm. 
   3080            .          .           		// However, we already own a P to assign to the M. 
   3081            .          .           		// 
   3082            .          .           		// Once sched.lock is released, another G (e.g., in a syscall), 

runtime.startm

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      3.09s (flat, cum)  6.49%
   3107            .          .           		// Preemption is now safe. 
   3108            .          .           		releasem(mp) 
   3109            .          .           		return 
   3110            .          .           	} 
   3111            .          .           	if !lockheld { 
   3112            .       10ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3113            .          .           	} 
   3114         20ms       20ms           	if nmp.spinning { 
   3115            .          .           		throw("startm: m is spinning") 
   3116            .          .           	} 
   3117            .          .           	if nmp.nextp != 0 { 
   3118            .          .           		throw("startm: m has p") 
   3119            .          .           	} 
   3120            .          .           	if spinning && !runqempty(pp) { 
   3121            .          .           		throw("startm: p has runnable gs") 
   3122            .          .           	} 
   3123            .          .           	// The caller incremented nmspinning, so set m.spinning in the new M. 
   3124         10ms       10ms           	nmp.spinning = spinning 
   3125            .          .           	nmp.nextp.set(pp) 
   3126            .      3.03s           	notewakeup(&nmp.park) 
   3127            .          .           	// Ownership transfer of pp committed by wakeup. Preemption is now 
   3128            .          .           	// safe. 
   3129         20ms       20ms           	releasem(mp)                                                       mp.locks--                                                           runtime1.go:637

   3130            .          .           } 
   3131            .          .            
   3132            .          .           // Hands off P from syscall or locked M. 
   3133            .          .           // Always runs without a P, so write barriers are not allowed. 
   3134            .          .           // 

runtime.wakep

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      3.57s (flat, cum)  7.50%
   3212            .          .           // 
   3213            .          .           // Do not remove or change the type signature. 
   3214            .          .           // See go.dev/issue/67401. 
   3215            .          .           // 
   3216            .          .           //go:linkname wakep 
   3217         10ms       10ms           func wakep() { 
   3218            .          .           	// Be conservative about spinning threads, only start one if none exist 
   3219            .          .           	// already. 
   3220         70ms       70ms           	if sched.nmspinning.Load() != 0 || !sched.nmspinning.CompareAndSwap(0, 1) {                                                       return Loadint32(&i.value)                                           types.go:21
   3221            .          .           		return 
   3222            .          .           	} 
   3223            .          .            
   3224            .          .           	// Disable preemption until ownership of pp transfers to the next M in 
   3225            .          .           	// startm. Otherwise preemption here would leave pp stuck waiting to 
   3226            .          .           	// enter _Pgcstop. 
   3227            .          .           	// 
   3228            .          .           	// See preemption comment on acquirem in startm for more details. 
   3229            .          .           	mp := acquirem() 
   3230            .          .            
   3231            .          .           	var pp *p 
   3232            .       90ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3233            .      190ms           	pp, _ = pidlegetSpinning(0) 
   3234            .          .           	if pp == nil { 
   3235            .          .           		if sched.nmspinning.Add(-1) < 0 { 
   3236            .          .           			throw("wakep: negative nmspinning") 
   3237            .          .           		} 
   3238            .       20ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3239            .          .           		releasem(mp) 
   3240            .          .           		return 
   3241            .          .           	} 
   3242            .          .           	// Since we always have a P, the race in the "No M is available" 
   3243            .          .           	// comment in startm doesn't apply during the small window between the 
   3244            .          .           	// unlock here and lock in startm. A checkdead in between will always 
   3245            .          .           	// see at least one running M (ours). 
   3246            .       30ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3247            .          .            
   3248            .      3.16s           	startm(pp, true, false) 
   3249            .          .            
   3250            .          .           	releasem(mp) 
   3251            .          .           } 
   3252            .          .            
   3253            .          .           // Stops execution of the current m that is locked to a g until the g is runnable again. 

runtime.execute

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      210ms (flat, cum)  0.44%
   3342            .          .           		// the world. 
   3343            .          .           		tryRecordGoroutineProfile(gp, nil, osyield) 
   3344            .          .           	} 
   3345            .          .            
   3346            .          .           	// Assign gp.m before entering _Grunning so running Gs have an M. 
   3347         10ms       10ms           	mp.curg = gp 
   3348            .          .           	gp.m = mp 
   3349            .          .           	gp.syncSafePoint = false // Clear the flag, which may have been set by morestack. 
   3350            .      170ms           	casgstatus(gp, _Grunnable, _Grunning) 
   3351            .          .           	gp.waitsince = 0 
   3352            .          .           	gp.preempt = false 
   3353            .          .           	gp.stackguard0 = gp.stack.lo + stackGuard 
   3354            .          .           	if !inheritTime { 
   3355         10ms       10ms           		mp.p.ptr().schedtick++ 
   3356            .          .           	} 
   3357            .          .            
   3358            .          .           	// Check whether the profiler needs to be turned on or off. 
   3359            .          .           	hz := sched.profilehz 
   3360            .          .           	if mp.profilehz != hz { 
   3361            .          .           		setThreadCPUProfiler(hz) 
   3362            .          .           	} 
   3363            .          .            
   3364         10ms       10ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188

   3365            .          .           	if trace.ok() { 
   3366            .          .           		trace.GoStart() 
   3367            .          .           		traceRelease(trace) 
   3368            .          .           	} 
   3369            .          .            
   3370            .       10ms           	gogo(&gp.sched) 
   3371            .          .           } 
   3372            .          .            
   3373            .          .           // Finds a runnable goroutine to execute. 
   3374            .          .           // Tries to steal from other P's, get g from local or global queue, poll network. 
   3375            .          .           // tryWakeP indicates that the returned goroutine is not normal (GC worker, trace 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:       320ms      360ms (flat, cum)  0.76%
   3383            .          .            
   3384            .          .           top: 
   3385            .          .           	// We may have collected an allp snapshot below. The snapshot is only 
   3386            .          .           	// required in each loop iteration. Clear it to all GC to collect the 
   3387            .          .           	// slice. 
   3388         10ms       30ms           	mp.clearAllpSnapshot() 
   3389            .          .            
   3390         10ms       10ms           	pp := mp.p.ptr()                                                       func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }      runtime2.go:266

   3391            .          .           	if sched.gcwaiting.Load() { 
   3392            .          .           		gcstopm() 
   3393            .          .           		goto top 
   3394            .          .           	} 
   3395         20ms       20ms           	if pp.runSafePointFn != 0 { 
   3396            .          .           		runSafePointFn() 
   3397            .          .           	} 
   3398            .          .            
   3399            .          .           	// now and pollUntil are saved for work stealing later, 
   3400            .          .           	// which may steal timers. It's important that between now 
   3401            .          .           	// and then, nothing blocks, so these numbers remain mostly 
   3402            .          .           	// relevant. 
   3403            .       20ms           	now, pollUntil, _ := pp.timers.check(0, nil) 
   3404            .          .            
   3405            .          .           	// Try to schedule the trace reader. 
   3406         10ms       10ms           	if traceEnabled() || traceShuttingDown() {                                                       return trace.shutdown.Load()                                         traceruntime.go:164
                                                          return b.u.Load() != 0                                           types.go:168
                                                              return Load8(&u.value)                                       types.go:124

   3407            .          .           		gp := traceReader() 
   3408            .          .           		if gp != nil { 
   3409            .          .           			trace := traceAcquire() 
   3410            .          .           			casgstatus(gp, _Gwaiting, _Grunnable) 
   3411            .          .           			if trace.ok() { 
   3412            .          .           				trace.GoUnpark(gp, 0) 
   3413            .          .           				traceRelease(trace) 
   3414            .          .           			} 
   3415            .          .           			return gp, false, true 
   3416            .          .           		} 
   3417            .          .           	} 
   3418            .          .            
   3419            .          .           	// Try to schedule a GC worker. 
   3420         20ms       20ms           	if gcBlackenEnabled != 0 { 
   3421            .          .           		gp, tnow := gcController.findRunnableGCWorker(pp, now) 
   3422            .          .           		if gp != nil { 
   3423            .          .           			return gp, false, true 
   3424            .          .           		} 
   3425            .          .           		now = tnow 
   3426            .          .           	} 
   3427            .          .            
   3428            .          .           	// Check the global runnable queue once in a while to ensure fairness. 
   3429            .          .           	// Otherwise two goroutines can completely occupy the local runqueue 
   3430            .          .           	// by constantly respawning each other. 
   3431         10ms       10ms           	if pp.schedtick%61 == 0 && !sched.runq.empty() { 
   3432            .          .           		lock(&sched.lock) 
   3433            .          .           		gp := globrunqget() 
   3434            .          .           		unlock(&sched.lock) 
   3435            .          .           		if gp != nil { 
   3436            .          .           			return gp, false, false 
   3437            .          .           		} 
   3438            .          .           	} 
   3439            .          .            
   3440            .          .           	// Wake up the finalizer G. 
   3441         20ms       20ms           	if fingStatus.Load()&(fingWait|fingWake) == fingWait|fingWake {                                                       return Load(&u.value)                                                types.go:194

   3442            .          .           		if gp := wakefing(); gp != nil { 
   3443            .          .           			ready(gp, 0, true) 
   3444            .          .           		} 
   3445            .          .           	} 
   3446            .          .            
   3447            .          .           	// Wake up one or more cleanup Gs. 
   3448         40ms       40ms           	if gcCleanups.needsWake() {                                                       return q.workUnits.Load() > 0 && (q.asleep.Load() > 0 || q.ng.Load() < maxCleanupGs()) mcleanup.go:506

   3449            .          .           		gcCleanups.wake() 
   3450            .          .           	} 
   3451            .          .            
   3452         20ms       20ms           	if *cgo_yield != nil { 
   3453            .          .           		asmcgocall(*cgo_yield, nil) 
   3454            .          .           	} 
   3455            .          .            
   3456            .          .           	// local runq 
   3457        110ms      110ms           	if gp, inheritTime := runqget(pp); gp != nil {             if next != 0 && pp.runnext.cas(next, 0) {                            proc.go:7184
                                                          return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246            if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume proc.go:7195            if next != 0 && pp.runnext.cas(next, 0) {                            proc.go:7184            if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume proc.go:7195            t := pp.runqtail                                                     proc.go:7190

   3458            .          .           		return gp, inheritTime, false 
   3459            .          .           	} 
   3460            .          .            
   3461            .          .           	// global runq 
   3462         50ms       50ms           	if !sched.runq.empty() { 
   3463            .          .           		lock(&sched.lock) 
   3464            .          .           		gp, q := globrunqgetbatch(int32(len(pp.runq)) / 2) 
   3465            .          .           		unlock(&sched.lock) 
   3466            .          .           		if gp != nil { 
   3467            .          .           			if runqputbatch(pp, &q); !q.empty() { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        70ms      1.28s (flat, cum)  2.69%
   3500            .          .           	// Spinning Ms: steal work from other Ps. 
   3501            .          .           	// 
   3502            .          .           	// Limit the number of spinning Ms to half the number of busy Ps. 
   3503            .          .           	// This is necessary to prevent excessive CPU consumption when 
   3504            .          .           	// GOMAXPROCS>>1 but the program parallelism is low. 
   3505         10ms       10ms           	if mp.spinning || 2*sched.nmspinning.Load() < gomaxprocs-sched.npidle.Load() {                                                       return Loadint32(&i.value)                                           types.go:21

   3506            .          .           		if !mp.spinning { 
   3507         50ms       50ms           			mp.becomeSpinning()                                                                       sched.nmspinning.Add(1)                              proc.go:1072
                                                                          return Xaddint32(&i.value, delta)                types.go:56

   3508            .          .           		} 
   3509            .          .            
   3510            .      1.21s           		gp, inheritTime, tnow, w, newWork := stealWork(now) 
   3511            .          .           		if gp != nil { 
   3512            .          .           			// Successfully stole. 
   3513         10ms       10ms           			return gp, inheritTime, false 
   3514            .          .           		} 
   3515            .          .           		if newWork { 
   3516            .          .           			// There may be new timer or GC work; restart to 
   3517            .          .           			// discover. 
   3518            .          .           			goto top 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:           0      100ms (flat, cum)  0.21%
   3577            .          .           	// len to change out from under us. 
   3578            .          .           	idlepMaskSnapshot := idlepMask 
   3579            .          .           	timerpMaskSnapshot := timerpMask 
   3580            .          .            
   3581            .          .           	// return P and block 
   3582            .      100ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   3583            .          .           	if sched.gcwaiting.Load() || pp.runSafePointFn != 0 { 
   3584            .          .           		unlock(&sched.lock) 
   3585            .          .           		goto top 
   3586            .          .           	} 
   3587            .          .           	if !sched.runq.empty() { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        10ms      180ms (flat, cum)  0.38%
   3599            .          .           		// See "Delicate dance" comment below. 
   3600            .          .           		mp.becomeSpinning() 
   3601            .          .           		unlock(&sched.lock) 
   3602            .          .           		goto top 
   3603            .          .           	} 
   3604         10ms       20ms           	if releasep() != pp { 
   3605            .          .           		throw("findrunnable: wrong p") 
   3606            .          .           	} 
   3607            .      110ms           	now = pidleput(pp, now) 
   3608            .       50ms           	unlock(&sched.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   3609            .          .            
   3610            .          .           	// Delicate dance: thread transitions from spinning to non-spinning 
   3611            .          .           	// state, potentially concurrently with submission of new work. We must 
   3612            .          .           	// drop nmspinning first and then check all sources again (with 
   3613            .          .           	// #StoreLoad memory barrier in between). If we do it the other way 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       80ms (flat, cum)  0.17%
   3676            .          .           				acquirep(pp) 
   3677            .          .           				mp.becomeSpinning() 
   3678            .          .           				return gp, false, false 
   3679            .          .           			} 
   3680            .          .           		} 
   3681            .       30ms           		unlock(&sched.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   3682            .          .            
   3683            .       30ms           		pp := checkRunqsNoP(allpSnapshot, idlepMaskSnapshot) 
   3684            .          .           		if pp != nil { 
   3685            .          .           			acquirep(pp) 
   3686         10ms       10ms           			mp.becomeSpinning()                                                                       sched.nmspinning.Add(1)                              proc.go:1072
                                                                          return Xaddint32(&i.value, delta)                types.go:56

   3687            .          .           			goto top 
   3688            .          .           		} 
   3689            .          .            
   3690            .          .           		// Check for idle-priority GC work again. 
   3691            .       10ms           		pp, gp := checkIdleGCNoP() 
   3692            .          .           		if pp != nil { 
   3693            .          .           			acquirep(pp) 
   3694            .          .           			mp.becomeSpinning() 
   3695            .          .            
   3696            .          .           			// Run the idle worker. 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       60ms (flat, cum)  0.13%
   3708            .          .           		// transitioning from spinning to non-spinning. 
   3709            .          .           		// 
   3710            .          .           		// Note that we cannot use checkTimers here because it calls 
   3711            .          .           		// adjusttimers which may need to allocate memory, and that isn't 
   3712            .          .           		// allowed when we don't have an active P. 
   3713            .       40ms           		pollUntil = checkTimersNoP(allpSnapshot, timerpMaskSnapshot, pollUntil) 
   3714            .          .           	} 
   3715            .          .            
   3716            .          .           	// We don't need allp anymore at this pointer, but can't clear the 
   3717            .          .           	// snapshot without a P for the write barrier.. 
   3718            .          .            
   3719            .          .           	// Poll network until next timer. 
   3720         20ms       20ms           	if netpollinited() && (netpollAnyWaiters() || pollUntil != 0) && sched.lastpoll.Swap(0) != 0 { 
   3721            .          .           		sched.pollUntil.Store(pollUntil) 
   3722            .          .           		if mp.p != 0 { 
   3723            .          .           			throw("findrunnable: netpoll with p") 
   3724            .          .           		} 
   3725            .          .           		if mp.spinning { 

runtime.findRunnable

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      1.60s (flat, cum)  3.36%
   3768            .          .           					trace.GoUnpark(gp, 0) 
   3769            .          .           					traceRelease(trace) 
   3770            .          .           				} 
   3771            .          .           				return gp, false, false 
   3772            .          .           			} 
   3773         10ms       10ms           			if wasSpinning { 
   3774            .          .           				mp.becomeSpinning() 
   3775            .          .           			} 
   3776            .          .           			goto top 
   3777            .          .           		} 
   3778            .          .           	} else if pollUntil != 0 && netpollinited() { 
   3779            .          .           		pollerPollUntil := sched.pollUntil.Load() 
   3780         10ms       10ms           		if pollerPollUntil == 0 || pollerPollUntil > pollUntil { 
   3781            .          .           			netpollBreak() 
   3782            .          .           		} 
   3783            .          .           	} 
   3784            .      1.57s           	stopm() 
   3785         10ms       10ms           	goto top 
   3786            .          .           } 
   3787            .          .            
   3788            .          .           // pollWork reports whether there is non-background work this P could 
   3789            .          .           // be doing. This is a fairly lightweight check to be used for 
   3790            .          .           // background work loops, like idle GC. It checks a subset of the 

runtime.stealWork

/usr/lib/go/src/runtime/proc.go

  Total:       320ms      410ms (flat, cum)  0.86%
   3817            .          .           	pp := getg().m.p.ptr() 
   3818            .          .            
   3819            .          .           	ranTimer := false 
   3820            .          .            
   3821            .          .           	const stealTries = 4 
   3822         10ms       10ms           	for i := 0; i < stealTries; i++ { 
   3823         40ms       40ms           		stealTimersOrRunNextG := i == stealTries-1 
   3824            .          .            
   3825        100ms      100ms           		for enum := stealOrder.start(cheaprand()); !enum.done(); enum.next() {                                                               return uint32(hi ^ lo)                                       rand.go:237                    enum.pos = (enum.pos + enum.inc) % enum.count                proc.go:7602                    inc:   ord.coprimes[i/ord.count%uint32(len(ord.coprimes))],  proc.go:7592
                                     ⋮
                                     ⋮
                                                              return enum.i == enum.count                                  proc.go:7597
                                     ⋮
                                     ⋮

   3826         20ms       20ms           			if sched.gcwaiting.Load() {                                                                       return b.u.Load() != 0                               types.go:168
                                                                          return Load8(&u.value)                           types.go:124

   3827            .          .           				// GC work may be available. 
   3828            .          .           				return nil, false, now, pollUntil, true 
   3829            .          .           			} 
   3830         90ms       90ms           			p2 := allp[enum.position()] 
   3831         40ms       40ms           			if pp == p2 { 
   3832         10ms       10ms           				continue 
   3833            .          .           			} 
   3834            .          .            
   3835            .          .           			// Steal timers from p2. This call to checkTimers is the only place 
   3836            .          .           			// where we might hold a lock on a different P's timers. We do this 
   3837            .          .           			// once on the last pass before checking runnext because stealing 
   3838            .          .           			// from the other P's runnext should be the last resort, so if there 
   3839            .          .           			// are timers to steal do that first. 
   3840            .          .           			// 
   3841            .          .           			// We only check timers on one of the stealing iterations because 
   3842            .          .           			// the time stored in now doesn't change in this loop and checking 
   3843            .          .           			// the timers for each P more than once with the same value of now 
   3844            .          .           			// is probably a waste of time. 
   3845            .          .           			// 
   3846            .          .           			// timerpMask tells us whether the P may have timers at all. If it 
   3847            .          .           			// can't, no need to check at all. 
   3848         10ms       10ms           			if stealTimersOrRunNextG && timerpMask.read(enum.position()) { 
   3849            .       90ms           				tnow, w, ran := p2.timers.check(now, nil) 
   3850            .          .           				now = tnow 
   3851            .          .           				if w != 0 && (pollUntil == 0 || w < pollUntil) { 
   3852            .          .           					pollUntil = w 
   3853            .          .           				} 
   3854            .          .           				if ran { 

runtime.stealWork

/usr/lib/go/src/runtime/proc.go

  Total:       210ms      800ms (flat, cum)  1.68%
   3866            .          .           					ranTimer = true 
   3867            .          .           				} 
   3868            .          .           			} 
   3869            .          .            
   3870            .          .           			// Don't bother to attempt to steal if p2 is idle. 
   3871        170ms      170ms           			if !idlepMask.read(enum.position()) {                                      ⋮
                                     ⋮
                                                                      return (atomic.Load(&p[word]) & mask) != 0           proc.go:6925
                                     ⋮
                                     ⋮
                                                                      return (atomic.Load(&p[word]) & mask) != 0           proc.go:6925
   3872         20ms      610ms           				if gp := runqsteal(pp, p2, stealTimersOrRunNextG); gp != nil { 
   3873         10ms       10ms           					return gp, false, now, pollUntil, ranTimer 
   3874            .          .           				} 
   3875            .          .           			} 
   3876            .          .           		} 
   3877            .          .           	} 
   3878            .          .            
   3879            .          .           	// No goroutines found to steal. Regardless, running a timer may have 
   3880            .          .           	// made some goroutine ready that we missed. Indicate the next timer to 
   3881            .          .           	// wait for. 
   3882         10ms       10ms           	return nil, false, now, pollUntil, ranTimer 
   3883            .          .           } 
   3884            .          .            
   3885            .          .           // Check all Ps for a runnable G to steal. 
   3886            .          .           // 

runtime.checkRunqsNoP

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   3887            .          .           // On entry we have no P. If a G is available to steal and a P is available, 
   3888            .          .           // the P is returned which the caller should acquire and attempt to steal the 
   3889            .          .           // work to. 
   3890            .          .           func checkRunqsNoP(allpSnapshot []*p, idlepMaskSnapshot pMask) *p { 
   3891         20ms       20ms           	for id, p2 := range allpSnapshot { 
   3892         10ms       10ms           		if !idlepMaskSnapshot.read(uint32(id)) && !runqempty(p2) {                                                               if tail == atomic.Load(&pp.runqtail) {                       proc.go:7036

   3893            .          .           			lock(&sched.lock) 
   3894            .          .           			pp, _ := pidlegetSpinning(0) 
   3895            .          .           			if pp == nil { 
   3896            .          .           				// Can't get a P, don't bother checking remaining Ps. 
   3897            .          .           				unlock(&sched.lock) 

runtime.checkTimersNoP

/usr/lib/go/src/runtime/proc.go

  Total:        40ms       40ms (flat, cum) 0.084%
   3908            .          .            
   3909            .          .           // Check all Ps for a timer expiring sooner than pollUntil. 
   3910            .          .           // 
   3911            .          .           // Returns updated pollUntil value. 
   3912            .          .           func checkTimersNoP(allpSnapshot []*p, timerpMaskSnapshot pMask, pollUntil int64) int64 { 
   3913         10ms       10ms           	for id, p2 := range allpSnapshot { 
   3914            .          .           		if timerpMaskSnapshot.read(uint32(id)) { 
   3915         10ms       10ms           			w := p2.timers.wakeTime() 
   3916         10ms       10ms           			if w != 0 && (pollUntil == 0 || w < pollUntil) { 
   3917            .          .           				pollUntil = w 
   3918            .          .           			} 
   3919            .          .           		} 
   3920            .          .           	} 
   3921            .          .            
   3922         10ms       10ms           	return pollUntil 
   3923            .          .           } 
   3924            .          .            
   3925            .          .           // Check for idle-priority GC, without a P on entry. 

runtime.checkIdleGCNoP

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   3926            .          .           // 
   3927            .          .           // If some GC work, a P, and a worker G are all available, the P and G will be 
   3928            .          .           // returned. The returned P has not been wired yet. 
   3929         10ms       10ms           func checkIdleGCNoP() (*p, *g) { 
   3930            .          .           	// N.B. Since we have no P, gcBlackenEnabled may change at any time; we 
   3931            .          .           	// must check again after acquiring a P. As an optimization, we also check 
   3932            .          .           	// if an idle mark worker is needed at all. This is OK here, because if we 
   3933            .          .           	// observe that one isn't needed, at least one is currently running. Even if 
   3934            .          .           	// it stops running, its own journey into the scheduler should schedule it 

runtime.resetspinning

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      2.47s (flat, cum)  5.19%
   4004            .          .           			wakep() 
   4005            .          .           		} 
   4006            .          .           	} 
   4007            .          .           } 
   4008            .          .            
   4009         10ms       10ms           func resetspinning() { 
   4010            .          .           	gp := getg() 
   4011            .          .           	if !gp.m.spinning { 
   4012            .          .           		throw("resetspinning: not a spinning m") 
   4013            .          .           	} 
   4014            .          .           	gp.m.spinning = false 
   4015         40ms       40ms           	nmspinning := sched.nmspinning.Add(-1)                                                       return Xaddint32(&i.value, delta)                                    types.go:56

   4016            .          .           	if nmspinning < 0 { 
   4017            .          .           		throw("findrunnable: negative nmspinning") 
   4018            .          .           	} 
   4019            .          .           	// M wakeup policy is deliberately somewhat conservative, so check if we 
   4020            .          .           	// need to wakeup another P here. See "Worker thread parking/unparking" 
   4021            .          .           	// comment at the top of the file for details. 
   4022            .      2.42s           	wakep() 
   4023            .          .           } 
   4024            .          .            
   4025            .          .           // injectglist adds each runnable G on the list to some run queue, 
   4026            .          .           // and clears glist. If there is no current P, they are added to the 
   4027            .          .           // global queue, and up to npidle M's are started to run them. 

runtime.schedule

/usr/lib/go/src/runtime/proc.go

  Total:        90ms      6.43s (flat, cum) 13.50%
   4147            .          .           	// goready to put a ready goroutine on the local run queue. 
   4148            .          .           	if mp.spinning && (pp.runnext != 0 || pp.runqhead != pp.runqtail) { 
   4149            .          .           		throw("schedule: spinning with local work") 
   4150            .          .           	} 
   4151            .          .            
   4152            .      3.66s           	gp, inheritTime, tryWakeP := findRunnable() // blocks until work is available 
   4153            .          .            
   4154            .          .           	// findRunnable may have collected an allp snapshot. The snapshot is 
   4155            .          .           	// only required within findRunnable. Clear it to all GC to collect the 
   4156            .          .           	// slice. 
   4157            .          .           	mp.clearAllpSnapshot() 
   4158            .          .            
   4159         10ms       10ms           	if debug.dontfreezetheworld > 0 && freezing.Load() { 
   4160            .          .           		// See comment in freezetheworld. We don't want to perturb 
   4161            .          .           		// scheduler state, so we didn't gcstopm in findRunnable, but 
   4162            .          .           		// also don't want to allow new goroutines to run. 
   4163            .          .           		// 
   4164            .          .           		// Deadlock here rather than in the findRunnable loop so if 
   4165            .          .           		// findRunnable is stuck in a loop we don't perturb that 
   4166            .          .           		// either. 
   4167            .          .           		lock(&deadlock) 
   4168            .          .           		lock(&deadlock) 
   4169            .          .           	} 
   4170            .          .            
   4171            .          .           	// This thread is going to run a goroutine and is not spinning anymore, 
   4172            .          .           	// so if it was marked as spinning we need to reset it now and potentially 
   4173            .          .           	// start a new spinning M. 
   4174            .          .           	if mp.spinning { 
   4175            .      2.47s           		resetspinning() 
   4176            .          .           	} 
   4177            .          .            
   4178         30ms       30ms           	if sched.disable.user && !schedEnabled(gp) { 
   4179            .          .           		// Scheduling of this goroutine is disabled. Put it on 
   4180            .          .           		// the list of pending runnable goroutines for when we 
   4181            .          .           		// re-enable user scheduling and look again. 
   4182            .          .           		lock(&sched.lock) 
   4183            .          .           		if schedEnabled(gp) { 
   4184            .          .           			// Something re-enabled scheduling while we 
   4185            .          .           			// were acquiring the lock. 
   4186            .          .           			unlock(&sched.lock) 
   4187            .          .           		} else { 
   4188            .          .           			sched.disable.runnable.pushBack(gp) 
   4189            .          .           			unlock(&sched.lock) 
   4190            .          .           			goto top 
   4191            .          .           		} 
   4192            .          .           	} 
   4193            .          .            
   4194            .          .           	// If about to schedule a not-normal goroutine (a GCworker or tracereader), 
   4195            .          .           	// wake a P if there is one. 
   4196         10ms       10ms           	if tryWakeP { 
   4197            .          .           		wakep() 
   4198            .          .           	} 
   4199         40ms       40ms           	if gp.lockedm != 0 { 
   4200            .          .           		// Hands off own p to the locked m, 
   4201            .          .           		// then blocks waiting for a new p. 
   4202            .          .           		startlockedm(gp) 
   4203            .          .           		goto top 
   4204            .          .           	} 
   4205            .          .            
   4206            .      210ms           	execute(gp, inheritTime) 
   4207            .          .           } 
   4208            .          .            
   4209            .          .           // dropg removes the association between m and the current goroutine m->curg (gp for short). 
   4210            .          .           // Typically a caller sets gp's status away from Grunning and then 
   4211            .          .           // immediately calls dropg to finish the job. The caller is also responsible 

runtime.park_m

/usr/lib/go/src/runtime/proc.go

  Total:           0       30ms (flat, cum) 0.063%
   4254            .          .           	} 
   4255            .          .            
   4256            .          .           	dropg() 
   4257            .          .            
   4258            .          .           	if fn := mp.waitunlockf; fn != nil { 
   4259            .       30ms           		ok := fn(gp, mp.waitlock) 
   4260            .          .           		mp.waitunlockf = nil 
   4261            .          .           		mp.waitlock = nil 
   4262            .          .           		if !ok { 
   4263            .          .           			trace := traceAcquire() 
   4264            .          .           			casgstatus(gp, _Gwaiting, _Grunnable) 

runtime.park_m

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.22s (flat, cum)  2.56%
   4275            .          .            
   4276            .          .           	if bubble != nil { 
   4277            .          .           		bubble.decActive() 
   4278            .          .           	} 
   4279            .          .            
   4280            .      1.22s           	schedule() 
   4281            .          .           } 
   4282            .          .            
   4283            .          .           func goschedImpl(gp *g, preempted bool) { 
   4284            .          .           	trace := traceAcquire() 
   4285            .          .           	status := readgstatus(gp) 

runtime.goschedImpl

/usr/lib/go/src/runtime/proc.go

  Total:           0      360ms (flat, cum)  0.76%
   4301            .          .           	if trace.ok() { 
   4302            .          .           		traceRelease(trace) 
   4303            .          .           	} 
   4304            .          .            
   4305            .          .           	dropg() 
   4306            .       20ms           	lock(&sched.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   4307            .          .           	globrunqput(gp) 
   4308            .          .           	unlock(&sched.lock) 
   4309            .          .            
   4310            .          .           	if mainStarted { 
   4311            .      180ms           		wakep() 
   4312            .          .           	} 
   4313            .          .            
   4314            .      160ms           	schedule() 
   4315            .          .           } 
   4316            .          .            

runtime.gosched_m

/usr/lib/go/src/runtime/proc.go

  Total:           0      360ms (flat, cum)  0.76%
   4317            .          .           // Gosched continuation on g0. 
   4318            .          .           func gosched_m(gp *g) { 
   4319            .      360ms           	goschedImpl(gp, false) 
   4320            .          .           } 
   4321            .          .            
   4322            .          .           // goschedguarded is a forbidden-states-avoided version of gosched_m. 
   4323            .          .           func goschedguarded_m(gp *g) { 
   4324            .          .           	if !canPreemptM(gp.m) { 

runtime.goexit1

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4426            .          .           	runqput(pp, gp, false) 
   4427            .          .           	schedule() 
   4428            .          .           } 
   4429            .          .            
   4430            .          .           // Finishes execution of the current goroutine. 
   4431         10ms       10ms           func goexit1() { 
   4432            .          .           	if raceenabled { 
   4433            .          .           		if gp := getg(); gp.bubble != nil { 
   4434            .          .           			racereleasemergeg(gp, gp.bubble.raceaddr()) 
   4435            .          .           		} 
   4436            .          .           		racegoend() 

runtime.goexit0

/usr/lib/go/src/runtime/proc.go

  Total:        20ms      5.47s (flat, cum) 11.49%
   4442            .          .           	} 
   4443            .          .           	mcall(goexit0) 
   4444            .          .           } 
   4445            .          .            
   4446            .          .           // goexit continuation on g0. 
   4447         10ms       10ms           func goexit0(gp *g) { 
   4448            .      400ms           	gdestroy(gp) 
   4449         10ms      5.06s           	schedule() 
   4450            .          .           } 

runtime.gdestroy

/usr/lib/go/src/runtime/proc.go

  Total:       120ms      270ms (flat, cum)  0.57%
   4451            .          .            
   4452         10ms       10ms           func gdestroy(gp *g) { 
   4453            .          .           	mp := getg().m 
   4454            .          .           	pp := mp.p.ptr() 
   4455            .          .            
   4456         10ms       40ms           	casgstatus(gp, _Grunning, _Gdead) 
   4457         50ms       50ms           	gcController.addScannableStack(pp, -int64(gp.stack.hi-gp.stack.lo))                                                       c.maxStackScan.Add(pp.maxStackScanDelta)                             mgcpacer.go:924
                                                          return Xadd64(&u.value, delta)                                   types.go:344
   4458            .      120ms           	if isSystemGoroutine(gp, false) { 
   4459            .          .           		sched.ngsys.Add(-1) 
   4460            .          .           	} 
   4461         20ms       20ms           	gp.m = nil 
   4462            .          .           	locked := gp.lockedm != 0 
   4463         20ms       20ms           	gp.lockedm = 0 
   4464            .          .           	mp.lockedg = 0 
   4465            .          .           	gp.preemptStop = false 
   4466            .          .           	gp.paniconfault = false 
   4467            .          .           	gp._defer = nil // should be true already but just in case. 
   4468            .          .           	gp._panic = nil // non-nil for Goexit during panic. points at stack-allocated data. 
   4469            .          .           	gp.writebuf = nil 
   4470            .          .           	gp.waitreason = waitReasonZero 
   4471            .          .           	gp.param = nil 
   4472            .          .           	gp.labels = nil 
   4473            .          .           	gp.timer = nil 
   4474            .          .           	gp.bubble = nil 
   4475            .          .            
   4476         10ms       10ms           	if gcBlackenEnabled != 0 && gp.gcAssistBytes > 0 { 
   4477            .          .           		// Flush assist credit to the global pool. This gives 
   4478            .          .           		// better information to pacing if the application is 
   4479            .          .           		// rapidly creating an exiting goroutines. 
   4480            .          .           		assistWorkPerByte := gcController.assistWorkPerByte.Load() 
   4481            .          .           		scanCredit := int64(assistWorkPerByte * float64(gp.gcAssistBytes)) 

runtime.gdestroy

/usr/lib/go/src/runtime/proc.go

  Total:           0      130ms (flat, cum)  0.27%
   4495            .          .           		if mp.isextra { 
   4496            .          .           			throw("runtime.Goexit called in a thread that was not created by the Go runtime") 
   4497            .          .           		} 
   4498            .          .           		throw("exited a goroutine internally locked to the OS thread") 
   4499            .          .           	} 
   4500            .      130ms           	gfput(pp, gp) 
   4501            .          .           	if locked { 
   4502            .          .           		// The goroutine may have locked this thread because 
   4503            .          .           		// it put it in an unusual kernel state. Kill it 
   4504            .          .           		// rather than returning it to the thread pool. 
   4505            .          .            

runtime.save

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4521            .          .           // save must not have write barriers because invoking a write barrier 
   4522            .          .           // can clobber getg().sched. 
   4523            .          .           // 
   4524            .          .           //go:nosplit 
   4525            .          .           //go:nowritebarrierrec 
   4526         10ms       10ms           func save(pc, sp, bp uintptr) { 
   4527            .          .           	gp := getg() 
   4528            .          .            
   4529            .          .           	if gp == gp.m.g0 || gp == gp.m.gsignal { 
   4530            .          .           		// m.g0.sched is special and must describe the context 
   4531            .          .           		// for exiting the thread. mstart1 writes to it directly. 

runtime.reentersyscall

/usr/lib/go/src/runtime/proc.go

  Total:        60ms      180ms (flat, cum)  0.38%
   4570            .          .           // must always point to a valid stack frame. entersyscall below is the normal 
   4571            .          .           // entry point for syscalls, which obtains the SP and PC from the caller. 
   4572            .          .           // 
   4573            .          .           //go:nosplit 
   4574            .          .           func reentersyscall(pc, sp, bp uintptr) { 
   4575         20ms       20ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188
   4576            .          .           	gp := getg() 
   4577            .          .            
   4578            .          .           	// Disable preemption because during this function g is in Gsyscall status, 
   4579            .          .           	// but can have inconsistent g->sched, do not let GC observe it. 
   4580         30ms       30ms           	gp.m.locks++ 
   4581            .          .            
   4582            .          .           	// Entersyscall must not call any function that might split/grow the stack. 
   4583            .          .           	// (See details in comment above.) 
   4584            .          .           	// Catch calls that might, by replacing the stack guard with something that 
   4585            .          .           	// will trip any stack check and leaving a flag to tell newstack to die. 
   4586            .          .           	gp.stackguard0 = stackPreempt 
   4587            .          .           	gp.throwsplit = true 
   4588            .          .            
   4589            .          .           	// Leave SP around for GC and traceback. 
   4590            .       10ms           	save(pc, sp, bp) 
   4591            .          .           	gp.syscallsp = sp 
   4592         10ms       10ms           	gp.syscallpc = pc 
   4593            .          .           	gp.syscallbp = bp 
   4594            .      110ms           	casgstatus(gp, _Grunning, _Gsyscall) 
   4595            .          .           	if staticLockRanking { 
   4596            .          .           		// When doing static lock ranking casgstatus can call 
   4597            .          .           		// systemstack which clobbers g.sched. 
   4598            .          .           		save(pc, sp, bp) 
   4599            .          .           	} 

runtime.reentersyscall

/usr/lib/go/src/runtime/proc.go

  Total:       120ms      120ms (flat, cum)  0.25%
   4624            .          .           	if sched.sysmonwait.Load() { 
   4625            .          .           		systemstack(entersyscall_sysmon) 
   4626            .          .           		save(pc, sp, bp) 
   4627            .          .           	} 
   4628            .          .            
   4629         20ms       20ms           	if gp.m.p.ptr().runSafePointFn != 0 { 
   4630            .          .           		// runSafePointFn may stack split if run on this stack 
   4631            .          .           		systemstack(runSafePointFn) 
   4632            .          .           		save(pc, sp, bp) 
   4633            .          .           	} 
   4634            .          .            
   4635         10ms       10ms           	gp.m.syscalltick = gp.m.p.ptr().syscalltick 
   4636            .          .           	pp := gp.m.p.ptr() 
   4637            .          .           	pp.m = 0 
   4638            .          .           	gp.m.oldp.set(pp) 
   4639            .          .           	gp.m.p = 0 
   4640            .          .           	atomic.Store(&pp.status, _Psyscall) 
   4641         90ms       90ms           	if sched.gcwaiting.Load() {                                                       return b.u.Load() != 0                                               types.go:168

   4642            .          .           		systemstack(entersyscall_gcwait) 
   4643            .          .           		save(pc, sp, bp) 
   4644            .          .           	} 
   4645            .          .            
   4646            .          .           	gp.m.locks-- 

runtime.entersyscall

/usr/lib/go/src/runtime/proc.go

  Total:           0      330ms (flat, cum)  0.69%
   4663            .          .           func entersyscall() { 
   4664            .          .           	// N.B. getcallerfp cannot be written directly as argument in the call 
   4665            .          .           	// to reentersyscall because it forces spilling the other arguments to 
   4666            .          .           	// the stack. This results in exceeding the nosplit stack requirements 
   4667            .          .           	// on some platforms. 
   4668            .       30ms           	fp := getcallerfp() 
   4669            .      300ms           	reentersyscall(sys.GetCallerPC(), sys.GetCallerSP(), fp) 
   4670            .          .           } 
   4671            .          .            
   4672            .          .           func entersyscall_sysmon() { 
   4673            .          .           	lock(&sched.lock) 
   4674            .          .           	if sched.sysmonwait.Load() { 

runtime.exitsyscall

/usr/lib/go/src/runtime/proc.go

  Total:        50ms      170ms (flat, cum)  0.36%
   4799            .          .           //go:nowritebarrierrec 
   4800            .          .           //go:linkname exitsyscall 
   4801            .          .           func exitsyscall() { 
   4802            .          .           	gp := getg() 
   4803            .          .            
   4804         40ms       40ms           	gp.m.locks++ // see comment in entersyscall 
   4805            .          .           	if sys.GetCallerSP() > gp.syscallsp { 
   4806            .          .           		throw("exitsyscall: syscall frame is no longer valid") 
   4807            .          .           	} 
   4808            .          .            
   4809            .          .           	gp.waitsince = 0 
   4810         10ms       10ms           	oldp := gp.m.oldp.ptr()                                                       func (pp puintptr) ptr() *p { return (*p)(unsafe.Pointer(pp)) }      runtime2.go:266

   4811            .          .           	gp.m.oldp = 0 
   4812            .      120ms           	if exitsyscallfast(oldp) { 
   4813            .          .           		// When exitsyscallfast returns success, we have a P so can now use 
   4814            .          .           		// write barriers 
   4815            .          .           		if goroutineProfile.active { 
   4816            .          .           			// Make sure that gp has had its stack written out to the goroutine 
   4817            .          .           			// profile, exactly as it was when the goroutine profiler first 

runtime.exitsyscall

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      180ms (flat, cum)  0.38%
   4837            .          .           					trace.GoStart() 
   4838            .          .           				} 
   4839            .          .           			}) 
   4840            .          .           		} 
   4841            .          .           		// There's a cpu for us, so we can run. 
   4842         20ms       20ms           		gp.m.p.ptr().syscalltick++ 
   4843            .          .           		// We need to cas the status and scan before resuming... 
   4844            .      100ms           		casgstatus(gp, _Gsyscall, _Grunning) 
   4845            .          .           		if trace.ok() { 
   4846            .          .           			traceRelease(trace) 
   4847            .          .           		} 
   4848            .          .            
   4849            .          .           		// Garbage collector isn't running (since we are), 
   4850            .          .           		// so okay to clear syscallsp. 
   4851            .          .           		gp.syscallsp = 0 
   4852         10ms       10ms           		gp.m.locks-- 
   4853            .          .           		if gp.preempt { 
   4854            .          .           			// restore the preemption request in case we've cleared it in newstack 
   4855            .          .           			gp.stackguard0 = stackPreempt 
   4856            .          .           		} else { 
   4857            .          .           			// otherwise restore the real stackGuard, we've spoiled it in entersyscall/entersyscallblock 
   4858            .          .           			gp.stackguard0 = gp.stack.lo + stackGuard 
   4859            .          .           		} 
   4860            .          .           		gp.throwsplit = false 
   4861            .          .            
   4862         50ms       50ms           		if sched.disable.user && !schedEnabled(gp) { 
   4863            .          .           			// Scheduling of this goroutine is disabled. 
   4864            .          .           			Gosched() 
   4865            .          .           		} 
   4866            .          .            
   4867            .          .           		return 

runtime.exitsyscallfast

/usr/lib/go/src/runtime/proc.go

  Total:       100ms      120ms (flat, cum)  0.25%
   4889            .          .           	if sched.stopwait == freezeStopWait { 
   4890            .          .           		return false 
   4891            .          .           	} 
   4892            .          .            
   4893            .          .           	// Try to re-acquire the last P. 
   4894         20ms       20ms           	trace := traceAcquire() 
   4895         70ms       70ms           	if oldp != nil && oldp.status == _Psyscall && atomic.Cas(&oldp.status, _Psyscall, _Pidle) { 
   4896            .          .           		// There's a cpu for us, so we can run. 
   4897            .       10ms           		wirep(oldp) 
   4898            .       10ms           		exitsyscallfast_reacquired(trace) 
   4899            .          .           		if trace.ok() { 
   4900            .          .           			traceRelease(trace) 
   4901            .          .           		} 
   4902         10ms       10ms           		return true 
   4903            .          .           	} 
   4904            .          .           	if trace.ok() { 
   4905            .          .           		traceRelease(trace) 
   4906            .          .           	} 
   4907            .          .            

runtime.exitsyscallfast_reacquired

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   4937            .          .           				trace.ProcStart() 
   4938            .          .           			}) 
   4939            .          .           		} 
   4940            .          .           		gp.m.p.ptr().syscalltick++ 
   4941            .          .           	} 
   4942         10ms       10ms           } 
   4943            .          .            
   4944            .          .           func exitsyscallfast_pidle() bool { 
   4945            .          .           	lock(&sched.lock) 
   4946            .          .           	pp, _ := pidleget(0) 
   4947            .          .           	if pp != nil && sched.sysmonwait.Load() { 

runtime.newproc

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.25s (flat, cum)  2.62%
   5156            .          .           // Put it on the queue of g's waiting to run. 
   5157            .          .           // The compiler turns a go statement into a call to this. 
   5158            .          .           func newproc(fn *funcval) { 
   5159            .          .           	gp := getg() 
   5160            .          .           	pc := sys.GetCallerPC() 
   5161            .      1.25s           	systemstack(func() { 

runtime.newproc.func1

/usr/lib/go/src/runtime/proc.go

  Total:           0      1.24s (flat, cum)  2.60%
   5162            .      820ms           		newg := newproc1(fn, gp, pc, false, waitReasonZero) 
   5163            .          .            
   5164            .          .           		pp := getg().m.p.ptr() 
   5165            .       60ms           		runqput(pp, newg, true) 
   5166            .          .            
   5167            .          .           		if mainStarted { 
   5168            .      360ms           			wakep() 
   5169            .          .           		} 
   5170            .          .           	}) 
   5171            .          .           } 

runtime.newproc1

/usr/lib/go/src/runtime/proc.go

  Total:        90ms      540ms (flat, cum)  1.13%
   5173            .          .           // Create a new g in state _Grunnable (or _Gwaiting if parked is true), starting at fn. 
   5174            .          .           // callerpc is the address of the go statement that created this. The caller is responsible 
   5175            .          .           // for adding the new g to the scheduler. If parked is true, waitreason must be non-zero. 
   5176         10ms       10ms           func newproc1(fn *funcval, callergp *g, callerpc uintptr, parked bool, waitreason waitReason) *g { 
   5177         70ms       70ms           	if fn == nil { 
   5178            .          .           		fatal("go of nil func value") 
   5179            .          .           	} 
   5180            .          .            
   5181            .          .           	mp := acquirem() // disable preemption because we hold M and P in local vars. 
   5182            .          .           	pp := mp.p.ptr() 
   5183            .      450ms           	newg := gfget(pp) 
   5184            .          .           	if newg == nil { 
   5185            .          .           		newg = malg(stackMin) 
   5186            .          .           		casgstatus(newg, _Gidle, _Gdead) 
   5187            .          .           		allgadd(newg) // publishes with a g->status of Gdead so GC scanner doesn't look at uninitialized stack. 
   5188            .          .           	} 
   5189         10ms       10ms           	if newg.stack.hi == 0 { 
   5190            .          .           		throw("newproc1: newg missing stack") 
   5191            .          .           	} 
   5192            .          .            
   5193            .          .           	if readgstatus(newg) != _Gdead { 
   5194            .          .           		throw("newproc1: new g is not Gdead") 

runtime.newproc1

/usr/lib/go/src/runtime/proc.go

  Total:        80ms      280ms (flat, cum)  0.59%
   5210            .          .           	memclrNoHeapPointers(unsafe.Pointer(&newg.sched), unsafe.Sizeof(newg.sched)) 
   5211            .          .           	newg.sched.sp = sp 
   5212            .          .           	newg.stktopsp = sp 
   5213            .          .           	newg.sched.pc = abi.FuncPCABI0(goexit) + sys.PCQuantum // +PCQuantum so that previous instruction is in same function 
   5214            .          .           	newg.sched.g = guintptr(unsafe.Pointer(newg)) 
   5215            .       20ms           	gostartcallfn(&newg.sched, fn) 
   5216            .          .           	newg.parentGoid = callergp.goid 
   5217            .          .           	newg.gopc = callerpc 
   5218            .       10ms           	newg.ancestors = saveAncestors(callergp) 
   5219         10ms       10ms           	newg.startpc = fn.fn 
   5220            .          .           	newg.runningCleanups.Store(false) 
   5221            .      130ms           	if isSystemGoroutine(newg, false) { 
   5222            .          .           		sched.ngsys.Add(1) 
   5223            .          .           	} else { 
   5224            .          .           		// Only user goroutines inherit synctest groups and pprof labels. 
   5225            .          .           		newg.bubble = callergp.bubble 
   5226            .          .           		if mp.curg != nil { 
   5227            .          .           			newg.labels = mp.curg.labels 
   5228            .          .           		} 
   5229            .          .           		if goroutineProfile.active { 
   5230            .          .           			// A concurrent goroutine profile is running. It should include 
   5231            .          .           			// exactly the set of goroutines that were alive when the goroutine 
   5232            .          .           			// profiler first stopped the world. That does not include newg, so 
   5233            .          .           			// mark it as not needing a profile before transitioning it from 
   5234            .          .           			// _Gdead. 
   5235            .          .           			newg.goroutineProfiled.Store(goroutineProfileSatisfied) 
   5236            .          .           		} 
   5237            .          .           	} 
   5238            .          .           	// Track initial transition? 
   5239         10ms       10ms           	newg.trackingSeq = uint8(cheaprand())                                                       mp := getg().m                                                       rand.go:228

   5240            .          .           	if newg.trackingSeq%gTrackingPeriod == 0 { 
   5241            .          .           		newg.tracking = true 
   5242            .          .           	} 
   5243         30ms       30ms           	gcController.addScannableStack(pp, int64(newg.stack.hi-newg.stack.lo))                                                       c.maxStackScan.Add(pp.maxStackScanDelta)                             mgcpacer.go:924
                                                          return Xadd64(&u.value, delta)                                   types.go:344
   5244            .          .            
   5245            .          .           	// Get a goid and switch to runnable. Make all this atomic to the tracer. 
   5246            .          .           	trace := traceAcquire() 
   5247            .          .           	var status uint32 = _Grunnable 
   5248            .          .           	if parked { 
   5249            .          .           		status = _Gwaiting 
   5250            .          .           		newg.waitreason = waitreason 
   5251            .          .           	} 
   5252            .          .           	if pp.goidcache == pp.goidcacheend { 
   5253            .          .           		// Sched.goidgen is the last allocated id, 
   5254            .          .           		// this batch must be [sched.goidgen+1, sched.goidgen+GoidCacheBatch]. 
   5255            .          .           		// At startup sched.goidgen=0, so main goroutine receives goid=1. 
   5256         10ms       10ms           		pp.goidcache = sched.goidgen.Add(_GoidCacheBatch) 
   5257            .          .           		pp.goidcache -= _GoidCacheBatch - 1 
   5258            .          .           		pp.goidcacheend = pp.goidcache + _GoidCacheBatch 
   5259            .          .           	} 
   5260         10ms       10ms           	newg.goid = pp.goidcache 
   5261            .       40ms           	casgstatus(newg, _Gdead, status) 
   5262            .          .           	pp.goidcache++ 
   5263            .          .           	newg.trace.reset() 
   5264            .          .           	if trace.ok() { 
   5265            .          .           		trace.GoCreate(newg, newg.startpc, parked) 
   5266            .          .           		traceRelease(trace) 
   5267            .          .           	} 
   5268            .          .            
   5269            .          .           	// Set up race context. 
   5270            .          .           	if raceenabled { 
   5271            .          .           		newg.racectx = racegostart(callerpc) 
   5272            .          .           		newg.raceignore = 0 
   5273            .          .           		if newg.labels != nil { 
   5274            .          .           			// See note in proflabel.go on labelSync's role in synchronizing 
   5275            .          .           			// with the reads in the signal handler. 
   5276            .          .           			racereleasemergeg(newg, unsafe.Pointer(&labelSync)) 
   5277            .          .           		} 
   5278            .          .           	} 
   5279         10ms       10ms           	releasem(mp) 
   5280            .          .            
   5281            .          .           	return newg 
   5282            .          .           } 

runtime.saveAncestors

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   5284            .          .           // saveAncestors copies previous ancestors of the given caller g and 
   5285            .          .           // includes info for the current caller into a new set of tracebacks for 
   5286            .          .           // a g being created. 
   5287         10ms       10ms           func saveAncestors(callergp *g) *[]ancestorInfo { 
   5288            .          .           	// Copy all prior info, except for the root goroutine (goid 0). 
   5289            .          .           	if debug.tracebackancestors <= 0 || callergp.goid == 0 { 
   5290            .          .           		return nil 
   5291            .          .           	} 
   5292            .          .           	var callerAncestors []ancestorInfo 

runtime.gfput

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      130ms (flat, cum)  0.27%
   5316            .          .           } 
   5317            .          .            
   5318            .          .           // Put on gfree list. 
   5319            .          .           // If local list is too long, transfer a batch to the global list. 
   5320            .          .           func gfput(pp *p, gp *g) { 
   5321         10ms       10ms           	if readgstatus(gp) != _Gdead { 
   5322            .          .           		throw("gfput: bad status (not Gdead)") 
   5323            .          .           	} 
   5324            .          .            
   5325            .          .           	stksize := gp.stack.hi - gp.stack.lo 
   5326            .          .            
   5327            .          .           	if stksize != uintptr(startingStackSize) { 
   5328            .          .           		// non-standard stack size - free it. 
   5329            .      100ms           		stackfree(gp.stack) 
   5330            .          .           		gp.stack.lo = 0 
   5331            .          .           		gp.stack.hi = 0 
   5332            .          .           		gp.stackguard0 = 0 
   5333            .          .           		if valgrindenabled { 
   5334            .          .           			valgrindDeregisterStack(gp.valgrindStackID) 
   5335            .          .           			gp.valgrindStackID = 0 
   5336            .          .           		} 
   5337            .          .           	} 
   5338            .          .            
   5339            .          .           	pp.gFree.push(gp) 
   5340            .          .           	if pp.gFree.size >= 64 { 
   5341            .          .           		var ( 
   5342            .          .           			stackQ   gQueue 
   5343            .          .           			noStackQ gQueue 
   5344            .          .           		) 
   5345            .          .           		for pp.gFree.size >= 32 { 
   5346         10ms       10ms           			gp := pp.gFree.pop()                                                                       l.head = gp.schedlink                                proc.go:7420

   5347         10ms       10ms           			if gp.stack.lo == 0 { 
   5348            .          .           				noStackQ.push(gp) 
   5349            .          .           			} else { 
   5350            .          .           				stackQ.push(gp) 
   5351            .          .           			} 
   5352            .          .           		} 

runtime.gfget

/usr/lib/go/src/runtime/proc.go

  Total:       300ms      450ms (flat, cum)  0.94%
   5359            .          .            
   5360            .          .           // Get from gfree list. 
   5361            .          .           // If local list is empty, grab a batch from global list. 
   5362            .          .           func gfget(pp *p) *g { 
   5363            .          .           retry: 
   5364         10ms       10ms           	if pp.gFree.empty() && (!sched.gFree.stack.empty() || !sched.gFree.noStack.empty()) { 
   5365            .          .           		lock(&sched.gFree.lock) 
   5366            .          .           		// Move a batch of free Gs to the P. 
   5367            .          .           		for pp.gFree.size < 32 { 
   5368            .          .           			// Prefer Gs with stacks. 
   5369         10ms       10ms           			gp := sched.gFree.stack.pop()                                                                       l.head = gp.schedlink                                proc.go:7420

   5370            .          .           			if gp == nil { 
   5371         60ms       60ms           				gp = sched.gFree.noStack.pop()                                                                               l.head = gp.schedlink                        proc.go:7420

   5372            .          .           				if gp == nil { 
   5373            .          .           					break 
   5374            .          .           				} 
   5375            .          .           			} 
   5376            .          .           			pp.gFree.push(gp) 
   5377            .          .           		} 
   5378            .       10ms           		unlock(&sched.gFree.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

   5379            .          .           		goto retry 
   5380            .          .           	} 
   5381        180ms      180ms           	gp := pp.gFree.pop()                                                       l.head = gp.schedlink                                                proc.go:7420

   5382            .          .           	if gp == nil { 
   5383            .          .           		return nil 
   5384            .          .           	} 
   5385         40ms       40ms           	if gp.stack.lo != 0 && gp.stack.hi-gp.stack.lo != uintptr(startingStackSize) { 
   5386            .          .           		// Deallocate old stack. We kept it in gfput because it was the 
   5387            .          .           		// right size when the goroutine was put on the free list, but 
   5388            .          .           		// the right size has changed since then. 
   5389            .          .           		systemstack(func() { 
   5390            .          .           			stackfree(gp.stack) 
   5391            .          .           			gp.stack.lo = 0 
   5392            .          .           			gp.stack.hi = 0 
   5393            .          .           			gp.stackguard0 = 0 
   5394            .          .           			if valgrindenabled { 
   5395            .          .           				valgrindDeregisterStack(gp.valgrindStackID) 
   5396            .          .           				gp.valgrindStackID = 0 
   5397            .          .           			} 
   5398            .          .           		}) 
   5399            .          .           	} 
   5400            .          .           	if gp.stack.lo == 0 { 
   5401            .          .           		// Stack was deallocated in gfput or just above. Allocate a new one. 
   5402            .      140ms           		systemstack(func() { 

runtime.gfget.func2

/usr/lib/go/src/runtime/proc.go

  Total:           0      140ms (flat, cum)  0.29%
   5403            .      140ms           			gp.stack = stackalloc(startingStackSize) 
   5404            .          .           			if valgrindenabled { 
   5405            .          .           				gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(gp.stack.lo), unsafe.Pointer(gp.stack.hi)) 
   5406            .          .           			} 
   5407            .          .           		}) 
   5408            .          .           		gp.stackguard0 = gp.stack.lo + stackGuard 

runtime.acquirep

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       90ms (flat, cum)  0.19%
   6010            .          .           // 
   6011            .          .           // This function is allowed to have write barriers even if the caller 
   6012            .          .           // isn't because it immediately acquires pp. 
   6013            .          .           // 
   6014            .          .           //go:yeswritebarrierrec 
   6015         10ms       10ms           func acquirep(pp *p) { 
   6016            .          .           	// Do the part that isn't allowed to have write barriers. 
   6017            .       10ms           	wirep(pp) 
   6018            .          .            
   6019            .          .           	// Have p; write barriers now allowed. 
   6020            .          .            
   6021            .          .           	// Perform deferred mcache flush before this P can allocate 
   6022            .          .           	// from a potentially stale mcache. 
   6023         10ms       70ms           	pp.mcache.prepareForSweep() 
   6024            .          .            
   6025            .          .           	trace := traceAcquire() 
   6026            .          .           	if trace.ok() { 
   6027            .          .           		trace.ProcStart() 
   6028            .          .           		traceRelease(trace) 

runtime.wirep

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   6043            .          .           		// on some platforms when built with -N -l. See #64113. 
   6044            .          .           		systemstack(func() { 
   6045            .          .           			throw("wirep: already in go") 
   6046            .          .           		}) 
   6047            .          .           	} 
   6048         10ms       10ms           	if pp.m != 0 || pp.status != _Pidle { 
   6049            .          .           		// Call on the systemstack to avoid a nosplit overflow build failure 
   6050            .          .           		// on some platforms when built with -N -l. See #64113. 
   6051            .          .           		systemstack(func() { 
   6052            .          .           			id := int64(0) 
   6053            .          .           			if pp.m != 0 { 
   6054            .          .           				id = pp.m.ptr().id 
   6055            .          .           			} 
   6056            .          .           			print("wirep: p->m=", pp.m, "(", id, ") p->status=", pp.status, "\n") 
   6057            .          .           			throw("wirep: invalid p state") 
   6058            .          .           		}) 
   6059            .          .           	} 
   6060            .          .           	gp.m.p.set(pp) 
   6061            .          .           	pp.m.set(gp.m) 
   6062            .          .           	pp.status = _Prunning 
   6063         10ms       10ms           } 
   6064            .          .            

runtime.releasep

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6066            .          .           func releasep() *p { 
   6067         10ms       10ms           	trace := traceAcquire()                                                       if !traceEnabled() {                                                 traceruntime.go:188

   6068            .          .           	if trace.ok() { 
   6069            .          .           		trace.ProcStop(getg().m.p.ptr()) 
   6070            .          .           		traceRelease(trace) 
   6071            .          .           	} 
   6072            .          .           	return releasepNoTrace() 

runtime.checkdead

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6100            .          .           } 
   6101            .          .            
   6102            .          .           // Check for deadlock situation. 
   6103            .          .           // The check is based on number of running M's, if 0 -> deadlock. 
   6104            .          .           // sched.lock must be held. 
   6105         10ms       10ms           func checkdead() { 
   6106            .          .           	assertLockHeld(&sched.lock) 
   6107            .          .            
   6108            .          .           	// For -buildmode=c-shared or -buildmode=c-archive it's OK if 
   6109            .          .           	// there are no running goroutines. The calling program is 
   6110            .          .           	// assumed to be running. 

runtime.checkdead

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   6129            .          .           	var run0 int32 
   6130            .          .           	if !iscgo && cgoHasExtraM && extraMLength.Load() > 0 { 
   6131            .          .           		run0 = 1 
   6132            .          .           	} 
   6133            .          .            
   6134         10ms       10ms           	run := mcount() - sched.nmidle - sched.nmidlelocked - sched.nmsys 
   6135            .          .           	if run > run0 { 
   6136            .          .           		return 
   6137            .          .           	} 
   6138            .          .           	if run < 0 { 
   6139            .          .           		print("runtime: checkdead: nmidle=", sched.nmidle, " nmidlelocked=", sched.nmidlelocked, " mcount=", mcount(), " nmsys=", sched.nmsys, "\n") 

runtime.mput

/usr/lib/go/src/runtime/proc.go

  Total:           0       20ms (flat, cum) 0.042%
   6826            .          .           	assertLockHeld(&sched.lock) 
   6827            .          .            
   6828            .          .           	mp.schedlink = sched.midle 
   6829            .          .           	sched.midle.set(mp) 
   6830            .          .           	sched.nmidle++ 
   6831            .       20ms           	checkdead() 
   6832            .          .           } 
   6833            .          .            
   6834            .          .           // Try to get an m from midle list. 
   6835            .          .           // sched.lock must be held. 
   6836            .          .           // May run during STW, so write barriers are not allowed. 

runtime.mget

/usr/lib/go/src/runtime/proc.go

  Total:        60ms       60ms (flat, cum)  0.13%
   6839            .          .           func mget() *m { 
   6840            .          .           	assertLockHeld(&sched.lock) 
   6841            .          .            
   6842            .          .           	mp := sched.midle.ptr() 
   6843            .          .           	if mp != nil { 
   6844         60ms       60ms           		sched.midle = mp.schedlink 
   6845            .          .           		sched.nmidle-- 
   6846            .          .           	} 
   6847            .          .           	return mp 
   6848            .          .           } 
   6849            .          .            

runtime.pMask.read

/usr/lib/go/src/runtime/proc.go

  Total:       140ms      140ms (flat, cum)  0.29%
   6920            .          .            
   6921            .          .           // read returns true if P id's bit is set. 
   6922            .          .           func (p pMask) read(id uint32) bool { 
   6923            .          .           	word := id / 32 
   6924            .          .           	mask := uint32(1) << (id % 32) 
   6925        140ms      140ms           	return (atomic.Load(&p[word]) & mask) != 0 
   6926            .          .           } 
   6927            .          .            

runtime.pMask.set

/usr/lib/go/src/runtime/proc.go

  Total:        30ms       30ms (flat, cum) 0.063%
   6929            .          .           func (p pMask) set(id int32) { 
   6930            .          .           	word := id / 32 
   6931         20ms       20ms           	mask := uint32(1) << (id % 32) 
   6932         10ms       10ms           	atomic.Or(&p[word], mask) 
   6933            .          .           } 
   6934            .          .            

runtime.pMask.clear

/usr/lib/go/src/runtime/proc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   6936            .          .           func (p pMask) clear(id int32) { 
   6937            .          .           	word := id / 32 
   6938         20ms       20ms           	mask := uint32(1) << (id % 32) 
   6939            .          .           	atomic.And(&p[word], ^mask) 
   6940            .          .           } 
   6941            .          .            
   6942            .          .           // pidleput puts p on the _Pidle list. now must be a relatively recent call 
   6943            .          .           // to nanotime or zero. Returns now or the current time if now was zero. 

runtime.pidleput

/usr/lib/go/src/runtime/proc.go

  Total:       110ms      110ms (flat, cum)  0.23%
   6948            .          .           // sched.lock must be held. 
   6949            .          .           // 
   6950            .          .           // May run during STW, so write barriers are not allowed. 
   6951            .          .           // 
   6952            .          .           //go:nowritebarrierrec 
   6953         10ms       10ms           func pidleput(pp *p, now int64) int64 { 
   6954            .          .           	assertLockHeld(&sched.lock) 
   6955            .          .            
   6956            .          .           	if !runqempty(pp) { 
   6957            .          .           		throw("pidleput: P has non-empty run queue") 
   6958            .          .           	} 
   6959            .          .           	if now == 0 { 
   6960         10ms       10ms           		now = nanotime()                                                               return nanotime1()                                           time_nofake.go:33

   6961            .          .           	} 
   6962            .          .           	if pp.timers.len.Load() == 0 { 
   6963         20ms       20ms           		timerpMask.clear(pp.id)                                                               mask := uint32(1) << (id % 32)                               proc.go:6938

   6964            .          .           	} 
   6965         50ms       50ms           	idlepMask.set(pp.id) 
   6966         20ms       20ms           	pp.link = sched.pidle 
   6967            .          .           	sched.pidle.set(pp) 
   6968            .          .           	sched.npidle.Add(1) 
   6969            .          .           	if !pp.limiterEvent.start(limiterEventIdle, now) { 
   6970            .          .           		throw("must be able to track idle limiter event") 
   6971            .          .           	} 

runtime.pidleget

/usr/lib/go/src/runtime/proc.go

  Total:       150ms      190ms (flat, cum)   0.4%
   6984            .          .            
   6985            .          .           	pp := sched.pidle.ptr() 
   6986            .          .           	if pp != nil { 
   6987            .          .           		// Timer may get added at any time now. 
   6988            .          .           		if now == 0 { 
   6989         40ms       40ms           			now = nanotime()                                                                       return nanotime1()                                   time_nofake.go:33

   6990            .          .           		} 
   6991         30ms       30ms           		timerpMask.set(pp.id)                                                               atomic.Or(&p[word], mask)                                    proc.go:6932                    mask := uint32(1) << (id % 32)                               proc.go:6931

   6992         60ms       60ms           		idlepMask.clear(pp.id) 
   6993            .          .           		sched.pidle = pp.link 
   6994            .          .           		sched.npidle.Add(-1) 
   6995            .       40ms           		pp.limiterEvent.stop(limiterEventIdle, now) 
   6996            .          .           	} 
   6997         20ms       20ms           	return pp, now 
   6998            .          .           } 
   6999            .          .            
   7000            .          .           // pidlegetSpinning tries to get a p from the _Pidle list, acquiring ownership. 
   7001            .          .           // This is called by spinning Ms (or callers than need a spinning M) that have 
   7002            .          .           // found work. If no P is available, this must synchronized with non-spinning 

runtime.pidlegetSpinning

/usr/lib/go/src/runtime/proc.go

  Total:           0      190ms (flat, cum)   0.4%
   7008            .          .           // 
   7009            .          .           //go:nowritebarrierrec 
   7010            .          .           func pidlegetSpinning(now int64) (*p, int64) { 
   7011            .          .           	assertLockHeld(&sched.lock) 
   7012            .          .            
   7013            .      190ms           	pp, now := pidleget(now) 
   7014            .          .           	if pp == nil { 
   7015            .          .           		// See "Delicate dance" comment in findrunnable. We found work 
   7016            .          .           		// that we cannot take, we must synchronize with non-spinning 
   7017            .          .           		// Ms that may be preparing to drop their P. 
   7018            .          .           		sched.needspinning.Store(1) 

runtime.runqempty

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7031            .          .           	// does not mean the queue is empty. 
   7032            .          .           	for { 
   7033            .          .           		head := atomic.Load(&pp.runqhead) 
   7034            .          .           		tail := atomic.Load(&pp.runqtail) 
   7035            .          .           		runnext := atomic.Loaduintptr((*uintptr)(unsafe.Pointer(&pp.runnext))) 
   7036         10ms       10ms           		if tail == atomic.Load(&pp.runqtail) { 
   7037            .          .           			return head == tail && runnext == 0 
   7038            .          .           		} 
   7039            .          .           	} 
   7040            .          .           } 
   7041            .          .            

runtime.runqput

/usr/lib/go/src/runtime/proc.go

  Total:        70ms       70ms (flat, cum)  0.15%
   7072            .          .           	} 
   7073            .          .            
   7074            .          .           	if next { 
   7075            .          .           	retryNext: 
   7076            .          .           		oldnext := pp.runnext 
   7077         70ms       70ms           		if !pp.runnext.cas(oldnext, guintptr(unsafe.Pointer(gp))) {                                                               return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246

   7078            .          .           			goto retryNext 
   7079            .          .           		} 
   7080            .          .           		if oldnext == 0 { 
   7081            .          .           			return 
   7082            .          .           		} 

runtime.runqget

/usr/lib/go/src/runtime/proc.go

  Total:       100ms      100ms (flat, cum)  0.21%
   7179            .          .           	// If there's a runnext, it's the next G to run. 
   7180            .          .           	next := pp.runnext 
   7181            .          .           	// If the runnext is non-0 and the CAS fails, it could only have been stolen by another P, 
   7182            .          .           	// because other Ps can race to set runnext to 0, but only the current P can set it to non-0. 
   7183            .          .           	// Hence, there's no need to retry this CAS if it fails. 
   7184         50ms       50ms           	if next != 0 && pp.runnext.cas(next, 0) {                                                       return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246
   7185            .          .           		return next.ptr(), true 
   7186            .          .           	} 
   7187            .          .            
   7188            .          .           	for { 
   7189            .          .           		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers 
   7190         10ms       10ms           		t := pp.runqtail 
   7191            .          .           		if t == h { 
   7192            .          .           			return nil, false 
   7193            .          .           		} 
   7194            .          .           		gp := pp.runq[h%uint32(len(pp.runq))].ptr() 
   7195         40ms       40ms           		if atomic.CasRel(&pp.runqhead, h, h+1) { // cas-release, commits consume 
   7196            .          .           			return gp, false 
   7197            .          .           		} 
   7198            .          .           	} 
   7199            .          .           } 
   7200            .          .            

runtime.runqgrab

/usr/lib/go/src/runtime/proc.go

  Total:       400ms      560ms (flat, cum)  1.18%
   7239            .          .           // Batch is a ring buffer starting at batchHead. 
   7240            .          .           // Returns number of grabbed goroutines. 
   7241            .          .           // Can be executed by any P. 
   7242            .          .           func runqgrab(pp *p, batch *[256]guintptr, batchHead uint32, stealRunNextG bool) uint32 { 
   7243            .          .           	for { 
   7244        160ms      160ms           		h := atomic.LoadAcq(&pp.runqhead) // load-acquire, synchronize with other consumers 
   7245         10ms       10ms           		t := atomic.LoadAcq(&pp.runqtail) // load-acquire, synchronize with the producer 
   7246            .          .           		n := t - h 
   7247            .          .           		n = n - n/2 
   7248            .          .           		if n == 0 { 
   7249         30ms       30ms           			if stealRunNextG { 
   7250            .          .           				// Try to steal from pp.runnext. 
   7251         80ms       80ms           				if next := pp.runnext; next != 0 { 
   7252            .          .           					if pp.status == _Prunning { 
   7253            .          .           						// Sleep to ensure that pp isn't about to run the g 
   7254            .          .           						// we are about to steal. 
   7255            .          .           						// The important use case here is when the g running 
   7256            .          .           						// on pp ready()s another g and then almost 
   7257            .          .           						// immediately blocks. Instead of stealing runnext 
   7258            .          .           						// in this window, back off to give pp a chance to 
   7259            .          .           						// schedule runnext. This will avoid thrashing gs 
   7260            .          .           						// between different Ps. 
   7261            .          .           						// A sync chan send/recv takes ~50ns as of time of 
   7262            .          .           						// writing, so 3us gives ~50x overshoot. 
   7263            .          .           						if !osHasLowResTimer { 
   7264            .      160ms           							usleep(3) 
   7265            .          .           						} else { 
   7266            .          .           							// On some platforms system timer granularity is 
   7267            .          .           							// 1-15ms, which is way too much for this 
   7268            .          .           							// optimization. So just yield. 
   7269            .          .           							osyield() 
   7270            .          .           						} 
   7271            .          .           					} 
   7272         10ms       10ms           					if !pp.runnext.cas(next, 0) {                                                                                       return atomic.Casuintptr((*uintptr)(unsafe.Pointer(gp)), uintptr(old), uintptr(new)) runtime2.go:246

   7273            .          .           						continue 
   7274            .          .           					} 
   7275            .          .           					batch[batchHead%uint32(len(batch))] = next 
   7276            .          .           					return 1 
   7277            .          .           				} 
   7278            .          .           			} 
   7279            .          .           			return 0 
   7280            .          .           		} 
   7281            .          .           		if n > uint32(len(pp.runq)/2) { // read inconsistent h and t 
   7282            .          .           			continue 
   7283            .          .           		} 
   7284            .          .           		for i := uint32(0); i < n; i++ { 
   7285            .          .           			g := pp.runq[(h+i)%uint32(len(pp.runq))] 
   7286         50ms       50ms           			batch[(batchHead+i)%uint32(len(batch))] = g 
   7287            .          .           		} 
   7288         60ms       60ms           		if atomic.CasRel(&pp.runqhead, h, h+n) { // cas-release, commits consume 
   7289            .          .           			return n 
   7290            .          .           		} 
   7291            .          .           	} 
   7292            .          .           } 

runtime.runqsteal

/usr/lib/go/src/runtime/proc.go

  Total:        30ms      590ms (flat, cum)  1.24%
   7293            .          .            
   7294            .          .           // Steal half of elements from local runnable queue of p2 
   7295            .          .           // and put onto local runnable queue of p. 
   7296            .          .           // Returns one of the stolen elements (or nil if failed). 
   7297         20ms       20ms           func runqsteal(pp, p2 *p, stealRunNextG bool) *g { 
   7298            .          .           	t := pp.runqtail 
   7299         10ms      570ms           	n := runqgrab(p2, &pp.runq, t, stealRunNextG) 
   7300            .          .           	if n == 0 { 
   7301            .          .           		return nil 
   7302            .          .           	} 
   7303            .          .           	n-- 
   7304            .          .           	gp := pp.runq[(t+n)%uint32(len(pp.runq))].ptr() 

runtime.(*gList).pop

/usr/lib/go/src/runtime/proc.go

  Total:       260ms      260ms (flat, cum)  0.55%
   7415            .          .            
   7416            .          .           // pop removes and returns the head of l. If l is empty, it returns nil. 
   7417            .          .           func (l *gList) pop() *g { 
   7418            .          .           	gp := l.head.ptr() 
   7419            .          .           	if gp != nil { 
   7420        260ms      260ms           		l.head = gp.schedlink 
   7421            .          .           		l.size-- 
   7422            .          .           	} 
   7423            .          .           	return gp 
   7424            .          .           } 
   7425            .          .            

sync.runtime_procPin

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7474            .          .           	gp.m.locks-- 
   7475            .          .           } 
   7476            .          .            
   7477            .          .           //go:linkname sync_runtime_procPin sync.runtime_procPin 
   7478            .          .           //go:nosplit 
   7479         10ms       10ms           func sync_runtime_procPin() int { 
   7480            .          .           	return procPin() 
   7481            .          .           } 
   7482            .          .            
   7483            .          .           //go:linkname sync_runtime_procUnpin sync.runtime_procUnpin 
   7484            .          .           //go:nosplit 

runtime.(*randomOrder).start

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7587            .          .            
   7588            .          .           func (ord *randomOrder) start(i uint32) randomEnum { 
   7589            .          .           	return randomEnum{ 
   7590            .          .           		count: ord.count, 
   7591            .          .           		pos:   i % ord.count, 
   7592         10ms       10ms           		inc:   ord.coprimes[i/ord.count%uint32(len(ord.coprimes))], 
   7593            .          .           	} 
   7594            .          .           } 

runtime.(*randomEnum).done

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7595            .          .            
   7596            .          .           func (enum *randomEnum) done() bool { 
   7597         10ms       10ms           	return enum.i == enum.count 
   7598            .          .           } 
   7599            .          .            

runtime.(*randomEnum).next

/usr/lib/go/src/runtime/proc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   7600            .          .           func (enum *randomEnum) next() { 
   7601            .          .           	enum.i++ 
   7602         10ms       10ms           	enum.pos = (enum.pos + enum.inc) % enum.count 
   7603            .          .           } 
   7604            .          .            
   7605            .          .           func (enum *randomEnum) position() uint32 { 
   7606            .          .           	return enum.pos 
   7607            .          .           } 

runtime.nextFreeFast

/usr/lib/go/src/runtime/malloc.go

  Total:       870ms      870ms (flat, cum)  1.83%
    927            .          .           var zerobase uintptr 
    928            .          .            
    929            .          .           // nextFreeFast returns the next free object if one is quickly available. 
    930            .          .           // Otherwise it returns 0. 
    931            .          .           func nextFreeFast(s *mspan) gclinkptr { 
    932        510ms      510ms           	theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? 
    933            .          .           	if theBit < 64 { 
    934         90ms       90ms           		result := s.freeindex + uint16(theBit) 
    935            .          .           		if result < s.nelems { 
    936         30ms       30ms           			freeidx := result + 1 
    937            .          .           			if freeidx%64 == 0 && freeidx != s.nelems { 
    938            .          .           				return 0 
    939            .          .           			} 
    940         90ms       90ms           			s.allocCache >>= uint(theBit + 1) 
    941         40ms       40ms           			s.freeindex = freeidx 
    942         60ms       60ms           			s.allocCount++ 
    943         50ms       50ms           			return gclinkptr(uintptr(result)*s.elemsize + s.base())                             return s.startAddr                                   mheap.go:523
    944            .          .           		} 
    945            .          .           	} 
    946            .          .           	return 0 
    947            .          .           } 
    948            .          .            

runtime.(*mcache).nextFree

/usr/lib/go/src/runtime/malloc.go

  Total:        30ms      2.53s (flat, cum)  5.31%
    953            .          .           // determine whether a new GC cycle needs to be started or if the GC is active 
    954            .          .           // whether this goroutine needs to assist the GC. 
    955            .          .           // 
    956            .          .           // Must run in a non-preemptible context since otherwise the owner of 
    957            .          .           // c could change. 
    958         20ms      190ms           func (c *mcache) nextFree(spc spanClass) (v gclinkptr, s *mspan, checkGCTrigger bool) { 
    959            .          .           	s = c.alloc[spc] 
    960            .          .           	checkGCTrigger = false 
    961            .      250ms           	freeIndex := s.nextFreeIndex() 
    962         10ms       10ms           	if freeIndex == s.nelems { 
    963            .          .           		// The span is full. 
    964            .          .           		if s.allocCount != s.nelems { 
    965            .          .           			println("runtime: s.allocCount=", s.allocCount, "s.nelems=", s.nelems) 
    966            .          .           			throw("s.allocCount != s.nelems && freeIndex == s.nelems") 
    967            .          .           		} 
    968            .      2.07s           		c.refill(spc) 
    969            .          .           		checkGCTrigger = true 
    970            .          .           		s = c.alloc[spc] 
    971            .          .            
    972            .       10ms           		freeIndex = s.nextFreeIndex() 
    973            .          .           	} 
    974            .          .            
    975            .          .           	if freeIndex >= s.nelems { 
    976            .          .           		throw("freeIndex is not valid") 
    977            .          .           	} 

runtime.mallocgc

/usr/lib/go/src/runtime/malloc.go

  Total:       460ms     11.19s (flat, cum) 23.50%
   1009            .          .           // 
   1010            .          .           // Do not remove or change the type signature. 
   1011            .          .           // See go.dev/issue/67401. 
   1012            .          .           // 
   1013            .          .           //go:linkname mallocgc 
   1014        130ms      2.06s           func mallocgc(size uintptr, typ *_type, needzero bool) unsafe.Pointer { 
   1015            .          .           	if doubleCheckMalloc { 
   1016            .          .           		if gcphase == _GCmarktermination { 
   1017            .          .           			throw("mallocgc called with gcphase == _GCmarktermination") 
   1018            .          .           		} 
   1019            .          .           	} 
   1020            .          .            
   1021            .          .           	// Short-circuit zero-sized allocation requests. 
   1022         50ms       50ms           	if size == 0 { 
   1023         10ms       10ms           		return unsafe.Pointer(&zerobase) 
   1024            .          .           	} 
   1025            .          .            
   1026            .          .           	// It's possible for any malloc to trigger sweeping, which may in 
   1027            .          .           	// turn queue finalizers. Record this dynamic lock edge. 
   1028            .          .           	// N.B. Compiled away if lockrank experiment is not enabled. 
   1029            .          .           	lockRankMayQueueFinalizer() 
   1030            .          .            
   1031            .          .           	// Pre-malloc debug hooks. 
   1032        110ms      110ms           	if debug.malloc { 
   1033            .          .           		if x := preMallocgcDebug(size, typ); x != nil { 
   1034            .          .           			return x 
   1035            .          .           		} 
   1036            .          .           	} 
   1037            .          .            
   1038            .          .           	// For ASAN, we allocate extra memory around each allocation called the "redzone." 
   1039            .          .           	// These "redzones" are marked as unaddressable. 
   1040            .          .           	var asanRZ uintptr 
   1041            .          .           	if asanenabled { 
   1042            .          .           		asanRZ = redZoneSize(size) 
   1043            .          .           		size += asanRZ 
   1044            .          .           	} 
   1045            .          .            
   1046            .          .           	// Assist the GC if needed. 
   1047         60ms       60ms           	if gcBlackenEnabled != 0 { 
   1048            .          .           		deductAssistCredit(size) 
   1049            .          .           	} 
   1050            .          .            
   1051            .          .           	// Actually do the allocation. 
   1052            .          .           	var x unsafe.Pointer 
   1053            .          .           	var elemsize uintptr 
   1054         20ms       20ms           	if size <= maxSmallSize-gc.MallocHeaderSize { 
   1055         30ms       30ms           		if typ == nil || !typ.Pointers() {                     func (t *Type) Pointers() bool { return t.PtrBytes != 0 }    type.go:200

   1056            .          .           			if size < maxTinySize { 
   1057         10ms      210ms           				x, elemsize = mallocgcTiny(size, typ) 
   1058            .          .           			} else { 
   1059         10ms      2.46s           				x, elemsize = mallocgcSmallNoscan(size, typ, needzero) 
   1060            .          .           			} 
   1061            .          .           		} else { 
   1062            .          .           			if !needzero { 
   1063            .          .           				throw("objects with pointers must be zeroed") 
   1064            .          .           			} 
   1065         10ms       10ms           			if heapBitsInSpan(size) {                                                                       return userSize <= gc.MinSizeForMallocHeader         mbitmap.go:79

   1066         20ms      5.57s           				x, elemsize = mallocgcSmallScanNoHeader(size, typ) 
   1067            .          .           			} else { 
   1068            .      600ms           				x, elemsize = mallocgcSmallScanHeader(size, typ) 
   1069            .          .           			} 
   1070            .          .           		} 
   1071            .          .           	} else { 
   1072            .          .           		x, elemsize = mallocgcLarge(size, typ, needzero) 
   1073            .          .           	} 

runtime.mallocgc

/usr/lib/go/src/runtime/malloc.go

  Total:       120ms      120ms (flat, cum)  0.25%
   1092            .          .           	if valgrindenabled { 
   1093            .          .           		valgrindMalloc(x, size-asanRZ) 
   1094            .          .           	} 
   1095            .          .            
   1096            .          .           	// Adjust our GC assist debt to account for internal fragmentation. 
   1097         50ms       50ms           	if gcBlackenEnabled != 0 && elemsize != 0 { 
   1098            .          .           		if assistG := getg().m.curg; assistG != nil { 
   1099            .          .           			assistG.gcAssistBytes -= int64(elemsize - size) 
   1100            .          .           		} 
   1101            .          .           	} 
   1102            .          .            
   1103            .          .           	// Post-malloc debug hooks. 
   1104         40ms       40ms           	if debug.malloc { 
   1105            .          .           		postMallocgcDebug(x, elemsize, typ) 
   1106            .          .           	} 
   1107         30ms       30ms           	return x 
   1108            .          .           } 
   1109            .          .            
   1110            .          .           func mallocgcTiny(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1111            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1112            .          .           	mp := acquirem() 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        50ms       50ms (flat, cum)   0.1%
   1150            .          .           	// 
   1151            .          .           	// The main targets of tiny allocator are small strings and 
   1152            .          .           	// standalone escaping variables. On a json benchmark 
   1153            .          .           	// the allocator reduces number of allocations by ~12% and 
   1154            .          .           	// reduces heap size by ~20%. 
   1155         30ms       30ms           	c := getMCache(mp)                                                       c = pp.mcache                                                        mcache.go:139
   1156            .          .           	off := c.tinyoffset 
   1157            .          .           	// Align tiny pointer for required (conservative) alignment. 
   1158         20ms       20ms           	if size&7 == 0 { 
   1159            .          .           		off = alignUp(off, 8) 
   1160            .          .           	} else if goarch.PtrSize == 4 && size == 12 { 
   1161            .          .           		// Conservatively align 12-byte objects to 8 bytes on 32-bit 
   1162            .          .           		// systems so that objects whose first field is a 64-bit 
   1163            .          .           		// value is aligned to 8 bytes and does not cause a fault on 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        30ms      130ms (flat, cum)  0.27%
   1174            .          .           		// The object fits into existing tiny block. 
   1175            .          .           		x := unsafe.Pointer(c.tiny + off) 
   1176            .          .           		c.tinyoffset = off + size 
   1177            .          .           		c.tinyAllocs++ 
   1178            .          .           		mp.mallocing = 0 
   1179         10ms       10ms           		releasem(mp)                                                               if mp.locks == 0 && gp.preempt {                             runtime1.go:638

   1180            .          .           		return x, 0 
   1181            .          .           	} 
   1182            .          .           	// Allocate a new maxTinySize block. 
   1183            .          .           	checkGCTrigger := false 
   1184            .          .           	span := c.alloc[tinySpanClass] 
   1185         20ms       20ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            result := s.freeindex + uint16(theBit)                               malloc.go:934

   1186            .          .           	if v == 0 { 
   1187            .      100ms           		v, span, checkGCTrigger = c.nextFree(tinySpanClass) 
   1188            .          .           	} 
   1189            .          .           	x := unsafe.Pointer(v) 
   1190            .          .           	(*[2]uint64)(x)[0] = 0 // Always zero 
   1191            .          .           	(*[2]uint64)(x)[1] = 0 
   1192            .          .           	// See if we need to replace the existing tiny block with the new one 

runtime.mallocgcTiny

/usr/lib/go/src/runtime/malloc.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1230            .          .           	// 
   1231            .          .           	// TODO(mknyszek): We should really count the header as part 
   1232            .          .           	// of gc_sys or something. The code below just pretends it is 
   1233            .          .           	// internal fragmentation and matches the GC's accounting by 
   1234            .          .           	// using the whole allocation slot. 
   1235         10ms       10ms           	c.nextSample -= int64(span.elemsize) 
   1236            .          .           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1237            .          .           		profilealloc(mp, x, span.elemsize) 
   1238            .          .           	} 
   1239            .          .           	mp.mallocing = 0 
   1240            .          .           	releasem(mp) 
   1241            .          .            
   1242         10ms       10ms           	if checkGCTrigger { 
   1243            .          .           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1244            .          .           			gcStart(t) 
   1245            .          .           		} 
   1246            .          .           	} 
   1247            .          .            

runtime.mallocgcSmallNoscan

/usr/lib/go/src/runtime/malloc.go

  Total:       310ms      2.45s (flat, cum)  5.14%
   1261            .          .           		x = add(x, span.elemsize-size) 
   1262            .          .           	} 
   1263            .          .           	return x, span.elemsize 
   1264            .          .           } 
   1265            .          .            
   1266         10ms      1.81s           func mallocgcSmallNoscan(size uintptr, typ *_type, needzero bool) (unsafe.Pointer, uintptr) { 
   1267            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1268         30ms       30ms           	mp := acquirem()                                                       gp.m.locks++                                                         runtime1.go:630

   1269            .          .           	if doubleCheckMalloc { 
   1270            .          .           		if mp.mallocing != 0 { 
   1271            .          .           			throw("malloc deadlock") 
   1272            .          .           		} 
   1273            .          .           		if mp.gsignal == getg() { 
   1274            .          .           			throw("malloc during signal") 
   1275            .          .           		} 
   1276            .          .           		if typ != nil && typ.Pointers() { 
   1277            .          .           			throw("expected noscan type for noscan alloc") 
   1278            .          .           		} 
   1279            .          .           	} 
   1280            .          .           	mp.mallocing = 1 
   1281            .          .            
   1282            .          .           	checkGCTrigger := false 
   1283            .          .           	c := getMCache(mp) 
   1284            .          .           	var sizeclass uint8 
   1285         10ms       10ms           	if size <= gc.SmallSizeMax-8 { 
   1286            .          .           		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1287            .          .           	} else { 
   1288            .          .           		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 
   1289            .          .           	} 
   1290         10ms       10ms           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1291         10ms       10ms           	spc := makeSpanClass(sizeclass, true) 
   1292         10ms       10ms           	span := c.alloc[spc] 
   1293        160ms      160ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932
                                     ⋮
                                     ⋮
                                                      result := s.freeindex + uint16(theBit)                               malloc.go:934            s.allocCache >>= uint(theBit + 1)                                    malloc.go:940            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943
                                                          return s.startAddr                                               mheap.go:523

   1294            .          .           	if v == 0 { 
   1295            .      240ms           		v, span, checkGCTrigger = c.nextFree(spc) 
   1296            .          .           	} 
   1297            .          .           	x := unsafe.Pointer(v) 
   1298            .          .           	if needzero && span.needzero != 0 { 
   1299            .       80ms           		memclrNoHeapPointers(x, size) 
   1300            .          .           	} 
   1301            .          .            
   1302            .          .           	// Ensure that the stores above that initialize x to 
   1303            .          .           	// type-safe memory and set the heap bits occur before 
   1304            .          .           	// the caller can make x observable to the garbage 
   1305            .          .           	// collector. Otherwise, on weakly ordered machines, 
   1306            .          .           	// the garbage collector could follow a pointer to x, 
   1307            .          .           	// but see uninitialized memory or stale heap bits. 
   1308            .          .           	publicationBarrier() 
   1309            .          .            
   1310         10ms       10ms           	if writeBarrier.enabled { 
   1311            .          .           		// Allocate black during GC. 
   1312            .          .           		// All slots hold nil so no scanning is needed. 
   1313            .          .           		// This may be racing with GC so do it atomically if there can be 
   1314            .          .           		// a race marking the bit. 
   1315            .          .           		gcmarknewobject(span, uintptr(x)) 
   1316            .          .           	} else { 
   1317            .          .           		// Track the last free index before the mark phase. This field 
   1318            .          .           		// is only used by the garbage collector. During the mark phase 
   1319            .          .           		// this is used by the conservative scanner to filter out objects 
   1320            .          .           		// that are both free and recently-allocated. It's safe to do that 
   1321            .          .           		// because we allocate-black if the GC is enabled. The conservative 
   1322            .          .           		// scanner produces pointers out of thin air, so without additional 
   1323            .          .           		// synchronization it might otherwise observe a partially-initialized 
   1324            .          .           		// object, which could crash the program. 
   1325         10ms       10ms           		span.freeIndexForScan = span.freeindex 
   1326            .          .           	} 
   1327            .          .            
   1328            .          .           	// Note cache c only valid while m acquired; see #47302 
   1329            .          .           	// 
   1330            .          .           	// N.B. Use the full size because that matches how the GC 
   1331            .          .           	// will update the mem profile on the "free" side. 
   1332            .          .           	// 
   1333            .          .           	// TODO(mknyszek): We should really count the header as part 
   1334            .          .           	// of gc_sys or something. The code below just pretends it is 
   1335            .          .           	// internal fragmentation and matches the GC's accounting by 
   1336            .          .           	// using the whole allocation slot. 
   1337            .          .           	c.nextSample -= int64(size) 
   1338         20ms       20ms           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1339            .       10ms           		profilealloc(mp, x, size) 
   1340            .          .           	} 
   1341         10ms       10ms           	mp.mallocing = 0 
   1342         20ms       20ms           	releasem(mp)                                                       if mp.locks == 0 && gp.preempt {                                     runtime1.go:638            mp.locks--                                                           runtime1.go:637

   1343            .          .            
   1344            .          .           	if checkGCTrigger { 
   1345            .       10ms           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1346            .          .           			gcStart(t) 
   1347            .          .           		} 
   1348            .          .           	} 

runtime.mallocgcSmallScanNoHeader

/usr/lib/go/src/runtime/malloc.go

  Total:       2.17s      5.55s (flat, cum) 11.65%
   1349            .          .           	return x, size 
   1350            .          .           } 
   1351            .          .            
   1352         60ms      180ms           func mallocgcSmallScanNoHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1353            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1354        100ms      100ms           	mp := acquirem()                                                       gp.m.locks++                                                         runtime1.go:630            return gp.m                                                          runtime1.go:631            gp.m.locks++                                                         runtime1.go:630            return gp.m                                                          runtime1.go:631

   1355            .          .           	if doubleCheckMalloc { 
   1356            .          .           		if mp.mallocing != 0 { 
   1357            .          .           			throw("malloc deadlock") 
   1358            .          .           		} 
   1359            .          .           		if mp.gsignal == getg() { 
   1360            .          .           			throw("malloc during signal") 
   1361            .          .           		} 
   1362            .          .           		if typ == nil || !typ.Pointers() { 
   1363            .          .           			throw("noscan allocated in scan-only path") 
   1364            .          .           		} 
   1365            .          .           		if !heapBitsInSpan(size) { 
   1366            .          .           			throw("heap bits in not in span for non-header-only path") 
   1367            .          .           		} 
   1368            .          .           	} 
   1369            .          .           	mp.mallocing = 1 
   1370            .          .            
   1371            .          .           	checkGCTrigger := false 
   1372        200ms      200ms           	c := getMCache(mp)                                                       c = pp.mcache                                                        mcache.go:139
                                     ⋮
                                     ⋮
                                                      if pp == nil {                                                       mcache.go:132            c = pp.mcache                                                        mcache.go:139            pp := mp.p.ptr()                                                     mcache.go:130

   1373        150ms      150ms           	sizeclass := gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1374        160ms      160ms           	spc := makeSpanClass(sizeclass, false)                                                       return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))         mheap.go:594                return int(*(*uint8)(unsafe.Pointer(&x)))                        stubs.go:394
   1375        120ms      120ms           	span := c.alloc[spc] 
   1376        670ms      670ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            s.allocCache >>= uint(theBit + 1)                                    malloc.go:940
                                     ⋮
                                     ⋮
                                                      theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932            s.allocCount++                                                       malloc.go:942            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943            freeidx := result + 1                                                malloc.go:936            s.allocCount++                                                       malloc.go:942            s.freeindex = freeidx                                                malloc.go:941            result := s.freeindex + uint16(theBit)                               malloc.go:934            return gclinkptr(uintptr(result)*s.elemsize + s.base())              malloc.go:943

   1377            .          .           	if v == 0 { 
   1378            .      1.81s           		v, span, checkGCTrigger = c.nextFree(spc) 
   1379            .          .           	} 
   1380            .          .           	x := unsafe.Pointer(v) 
   1381         20ms       20ms           	if span.needzero != 0 { 
   1382         20ms      370ms           		memclrNoHeapPointers(x, size) 
   1383            .          .           	} 
   1384         30ms       30ms           	if goarch.PtrSize == 8 && sizeclass == 1 { 
   1385            .          .           		// initHeapBits already set the pointer bits for the 8-byte sizeclass 
   1386            .          .           		// on 64-bit platforms. 
   1387            .          .           		c.scanAlloc += 8 
   1388            .          .           	} else { 
   1389        110ms      1.01s           		c.scanAlloc += heapSetTypeNoHeader(uintptr(x), size, typ, span)                                                               scanSize := span.writeHeapBitsSmall(x, dataSize, typ)        mbitmap.go:709
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                              scanSize := span.writeHeapBitsSmall(x, dataSize, typ)        mbitmap.go:709

   1390            .          .           	} 
   1391         30ms       30ms           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1392            .          .            
   1393            .          .           	// Ensure that the stores above that initialize x to 
   1394            .          .           	// type-safe memory and set the heap bits occur before 
   1395            .          .           	// the caller can make x observable to the garbage 
   1396            .          .           	// collector. Otherwise, on weakly ordered machines, 
   1397            .          .           	// the garbage collector could follow a pointer to x, 
   1398            .          .           	// but see uninitialized memory or stale heap bits. 
   1399         20ms       20ms           	publicationBarrier() 
   1400            .          .            
   1401         10ms       10ms           	if writeBarrier.enabled { 
   1402            .          .           		// Allocate black during GC. 
   1403            .          .           		// All slots hold nil so no scanning is needed. 
   1404            .          .           		// This may be racing with GC so do it atomically if there can be 
   1405            .          .           		// a race marking the bit. 
   1406            .          .           		gcmarknewobject(span, uintptr(x)) 
   1407            .          .           	} else { 
   1408            .          .           		// Track the last free index before the mark phase. This field 
   1409            .          .           		// is only used by the garbage collector. During the mark phase 
   1410            .          .           		// this is used by the conservative scanner to filter out objects 
   1411            .          .           		// that are both free and recently-allocated. It's safe to do that 
   1412            .          .           		// because we allocate-black if the GC is enabled. The conservative 
   1413            .          .           		// scanner produces pointers out of thin air, so without additional 
   1414            .          .           		// synchronization it might otherwise observe a partially-initialized 
   1415            .          .           		// object, which could crash the program. 
   1416         40ms       40ms           		span.freeIndexForScan = span.freeindex 
   1417            .          .           	} 
   1418            .          .            
   1419            .          .           	// Note cache c only valid while m acquired; see #47302 
   1420            .          .           	// 
   1421            .          .           	// N.B. Use the full size because that matches how the GC 
   1422            .          .           	// will update the mem profile on the "free" side. 
   1423            .          .           	// 
   1424            .          .           	// TODO(mknyszek): We should really count the header as part 
   1425            .          .           	// of gc_sys or something. The code below just pretends it is 
   1426            .          .           	// internal fragmentation and matches the GC's accounting by 
   1427            .          .           	// using the whole allocation slot. 
   1428         10ms       10ms           	c.nextSample -= int64(size) 
   1429         30ms       30ms           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1430        120ms      270ms           		profilealloc(mp, x, size) 
   1431            .          .           	} 
   1432         20ms       20ms           	mp.mallocing = 0 
   1433         40ms       40ms           	releasem(mp)                                                       if mp.locks == 0 && gp.preempt {                                     runtime1.go:638
                                     ⋮
                                     ⋮

   1434            .          .            
   1435        160ms      160ms           	if checkGCTrigger { 
   1436            .       50ms           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1437            .          .           			gcStart(t) 
   1438            .          .           		} 
   1439            .          .           	} 
   1440         50ms       50ms           	return x, size 
   1441            .          .           } 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1442            .          .            
   1443         10ms       10ms           func mallocgcSmallScanHeader(size uintptr, typ *_type) (unsafe.Pointer, uintptr) { 
   1444            .          .           	// Set mp.mallocing to keep from being preempted by GC. 
   1445            .          .           	mp := acquirem() 
   1446            .          .           	if doubleCheckMalloc { 
   1447            .          .           		if mp.mallocing != 0 { 
   1448            .          .           			throw("malloc deadlock") 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        60ms      560ms (flat, cum)  1.18%
   1466            .          .           	if size <= gc.SmallSizeMax-8 { 
   1467            .          .           		sizeclass = gc.SizeToSizeClass8[divRoundUp(size, gc.SmallSizeDiv)] 
   1468            .          .           	} else { 
   1469            .          .           		sizeclass = gc.SizeToSizeClass128[divRoundUp(size-gc.SmallSizeMax, gc.LargeSizeDiv)] 
   1470            .          .           	} 
   1471         10ms       10ms           	size = uintptr(gc.SizeClassToSize[sizeclass]) 
   1472            .          .           	spc := makeSpanClass(sizeclass, false) 
   1473            .          .           	span := c.alloc[spc] 
   1474         20ms       20ms           	v := nextFreeFast(span)                                                       theBit := sys.TrailingZeros64(s.allocCache) // Is there a free object in the allocCache? malloc.go:932

   1475            .          .           	if v == 0 { 
   1476            .      380ms           		v, span, checkGCTrigger = c.nextFree(spc) 
   1477            .          .           	} 
   1478            .          .           	x := unsafe.Pointer(v) 
   1479            .          .           	if span.needzero != 0 { 
   1480            .      120ms           		memclrNoHeapPointers(x, size) 
   1481            .          .           	} 
   1482            .          .           	header := (**_type)(x) 
   1483            .          .           	x = add(x, gc.MallocHeaderSize) 
   1484         30ms       30ms           	c.scanAlloc += heapSetTypeSmallHeader(uintptr(x), size-gc.MallocHeaderSize, typ, header, span)                                                       *header = typ                                                        mbitmap.go:717
   1485            .          .            
   1486            .          .           	// Ensure that the stores above that initialize x to 
   1487            .          .           	// type-safe memory and set the heap bits occur before 
   1488            .          .           	// the caller can make x observable to the garbage 
   1489            .          .           	// collector. Otherwise, on weakly ordered machines, 

runtime.mallocgcSmallScanHeader

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms       30ms (flat, cum) 0.063%
   1518            .          .           	// of gc_sys or something. The code below just pretends it is 
   1519            .          .           	// internal fragmentation and matches the GC's accounting by 
   1520            .          .           	// using the whole allocation slot. 
   1521            .          .           	c.nextSample -= int64(size) 
   1522            .          .           	if c.nextSample < 0 || MemProfileRate != c.memProfRate { 
   1523            .       20ms           		profilealloc(mp, x, size) 
   1524            .          .           	} 
   1525            .          .           	mp.mallocing = 0 
   1526         10ms       10ms           	releasem(mp)                                                       mp.locks--                                                           runtime1.go:637

   1527            .          .            
   1528            .          .           	if checkGCTrigger { 
   1529            .          .           		if t := (gcTrigger{kind: gcTriggerHeap}); t.test() { 
   1530            .          .           			gcStart(t) 
   1531            .          .           		} 

runtime.newobject

/usr/lib/go/src/runtime/malloc.go

  Total:       440ms      5.29s (flat, cum) 11.11%
   1741            .          .           } 
   1742            .          .            
   1743            .          .           // implementation of new builtin 
   1744            .          .           // compiler (both frontend and SSA backend) knows the signature 
   1745            .          .           // of this function. 
   1746         70ms      100ms           func newobject(typ *_type) unsafe.Pointer { 
   1747        370ms      5.19s           	return mallocgc(typ.Size_, typ, true) 
   1748            .          .           } 
   1749            .          .            
   1750            .          .           //go:linkname maps_newobject internal/runtime/maps.newobject 
   1751            .          .           func maps_newobject(typ *_type) unsafe.Pointer { 
   1752            .          .           	return newobject(typ) 

runtime.newarray

/usr/lib/go/src/runtime/malloc.go

  Total:        60ms      970ms (flat, cum)  2.04%
   1784            .          .           // 
   1785            .          .           // Do not remove or change the type signature. 
   1786            .          .           // See go.dev/issue/67401. 
   1787            .          .           // 
   1788            .          .           //go:linkname newarray 
   1789         10ms       10ms           func newarray(typ *_type, n int) unsafe.Pointer { 
   1790            .          .           	if n == 1 { 
   1791         40ms      760ms           		return mallocgc(typ.Size_, typ, true) 
   1792            .          .           	} 
   1793         10ms       10ms           	mem, overflow := math.MulUintptr(typ.Size_, uintptr(n)) 
   1794            .          .           	if overflow || mem > maxAlloc || n < 0 { 
   1795            .          .           		panic(plainError("runtime: allocation size out of range")) 
   1796            .          .           	} 
   1797            .      190ms           	return mallocgc(mem, typ, true) 
   1798            .          .           } 
   1799            .          .            
   1800            .          .           // reflect_unsafe_NewArray is meant for package reflect, 
   1801            .          .           // but widely used packages access it using linkname. 
   1802            .          .           // Notable members of the hall of shame include: 

internal/runtime/maps.newarray

/usr/lib/go/src/runtime/malloc.go

  Total:           0      970ms (flat, cum)  2.04%
   1816            .          .           	return newarray(typ, n) 
   1817            .          .           } 
   1818            .          .            
   1819            .          .           //go:linkname maps_newarray internal/runtime/maps.newarray 
   1820            .          .           func maps_newarray(typ *_type, n int) unsafe.Pointer { 
   1821            .      970ms           	return newarray(typ, n) 
   1822            .          .           } 
   1823            .          .            
   1824            .          .           // profilealloc resets the current mcache's nextSample counter and 

runtime.profilealloc

/usr/lib/go/src/runtime/malloc.go

  Total:        10ms      180ms (flat, cum)  0.38%
   1826            .          .           // 
   1827            .          .           // The caller must be non-preemptible and have a P. 
   1828            .          .           func profilealloc(mp *m, x unsafe.Pointer, size uintptr) { 
   1829         10ms       10ms           	c := getMCache(mp)                                                       if pp == nil {                                                       mcache.go:132

   1830            .          .           	if c == nil { 
   1831            .          .           		throw("profilealloc called without a P or outside bootstrapping") 
   1832            .          .           	} 
   1833            .          .           	c.memProfRate = MemProfileRate 
   1834            .          .           	c.nextSample = nextSample() 
   1835            .      170ms           	mProf_Malloc(mp, x, size) 
   1836            .          .           } 
   1837            .          .            
   1838            .          .           // nextSample returns the next sampling point for heap profiling. The goal is 
   1839            .          .           // to sample allocations on average every MemProfileRate bytes, but with a 
   1840            .          .           // completely random distribution over the allocation timeline; this 

runtime.mutexPreferLowLatency

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        10ms       10ms (flat, cum) 0.021%
    133            .          .           //go:nosplit 
    134            .          .           func mutexPreferLowLatency(l *mutex) bool { 
    135            .          .           	switch l { 
    136            .          .           	default: 
    137            .          .           		return false 
    138         10ms       10ms           	case &sched.lock: 
    139            .          .           		// We often expect sched.lock to pass quickly between Ms in a way that 
    140            .          .           		// each M has unique work to do: for instance when we stop-the-world 
    141            .          .           		// (bringing each P to idle) or add new netpoller-triggered work to the 
    142            .          .           		// global run queue. 
    143            .          .           		return true 

runtime.lock

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:           0      1.08s (flat, cum)  2.27%
    147            .          .           func mutexContended(l *mutex) bool { 
    148            .          .           	return atomic.Loaduintptr(&l.key)&^mutexMMask != 0 
    149            .          .           } 
    150            .          .            
    151            .          .           func lock(l *mutex) { 
    152            .      1.08s           	lockWithRank(l, getLockRank(l))                                                       lock2(l)                                                             lockrank_off.go:24
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    153            .          .           } 

runtime.lock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:       830ms      1.08s (flat, cum)  2.27%
    154            .          .            
    155         10ms       10ms           func lock2(l *mutex) { 
    156            .          .           	gp := getg() 
    157         20ms       20ms           	if gp.m.locks < 0 { 
    158            .          .           		throw("runtime·lock: lock count") 
    159            .          .           	} 
    160         10ms       10ms           	gp.m.locks++ 
    161            .          .            
    162            .          .           	k8 := key8(&l.key) 
    163            .          .            
    164            .          .           	// Speculative grab for lock. 
    165            .          .           	v8 := atomic.Xchg8(k8, mutexLocked) 
    166        530ms      530ms           	if v8&mutexLocked == 0 { 
    167         50ms       50ms           		if v8&mutexSleeping != 0 { 
    168            .          .           			atomic.Or8(k8, mutexSleeping) 
    169            .          .           		} 
    170         40ms       40ms           		return 
    171            .          .           	} 
    172            .          .           	semacreate(gp.m) 
    173            .          .            
    174            .          .           	var startTime int64 
    175            .          .           	// On uniprocessors, no point spinning. 
    176            .          .           	// On multiprocessors, spin for mutexActiveSpinCount attempts. 
    177            .          .           	spin := 0 
    178            .          .           	if numCPUStartup > 1 { 
    179            .          .           		spin = mutexActiveSpinCount 
    180            .          .           	} 
    181            .          .            
    182            .          .           	var weSpin, atTail, haveTimers bool 
    183            .          .           	v := atomic.Loaduintptr(&l.key) 
    184            .          .           tryAcquire: 
    185            .          .           	for i := 0; ; i++ { 
    186         10ms       10ms           		if v&mutexLocked == 0 { 
    187            .          .           			if weSpin { 
    188            .          .           				next := (v &^ mutexSpinning) | mutexSleeping | mutexLocked 
    189            .          .           				if next&^mutexMMask == 0 { 
    190            .          .           					// The fast-path Xchg8 may have cleared mutexSleeping. Fix 
    191            .          .           					// the hint so unlock2 knows when to use its slow path. 
    192            .          .           					next = next &^ mutexSleeping 
    193            .          .           				} 
    194            .          .           				if atomic.Casuintptr(&l.key, v, next) { 
    195         10ms       10ms           					gp.m.mLockProfile.end(startTime)                                                                                       prof.waitTime.Add((nanotime() - start) * gTrackingPeriod) mprof.go:669
                                                                                          return nanotime1()               time_nofake.go:33

    196            .          .           					return 
    197            .          .           				} 
    198            .          .           			} else { 
    199            .          .           				prev8 := atomic.Xchg8(k8, mutexLocked|mutexSleeping) 
    200            .          .           				if prev8&mutexLocked == 0 { 
    201            .          .           					gp.m.mLockProfile.end(startTime) 
    202            .          .           					return 
    203            .          .           				} 
    204            .          .           			} 
    205            .          .           			v = atomic.Loaduintptr(&l.key) 
    206            .          .           			continue tryAcquire 
    207            .          .           		} 
    208            .          .            
    209         10ms       10ms           		if !weSpin && v&mutexSpinning == 0 && atomic.Casuintptr(&l.key, v, v|mutexSpinning) { 
    210            .          .           			v |= mutexSpinning 
    211            .          .           			weSpin = true 
    212            .          .           		} 
    213            .          .            
    214         10ms       10ms           		if weSpin || atTail || mutexPreferLowLatency(l) {                                                               case &sched.lock:                                            lock_spinbit.go:138

    215         70ms       70ms           			if i < spin { 
    216         10ms       20ms           				procyield(mutexActiveSpinSize) 
    217            .          .           				v = atomic.Loaduintptr(&l.key) 
    218            .          .           				continue tryAcquire 
    219            .          .           			} else if i < spin+mutexPassiveSpinCount { 
    220            .      200ms           				osyield() // TODO: Consider removing this step. See https://go.dev/issue/69268. 
    221            .          .           				v = atomic.Loaduintptr(&l.key) 
    222            .          .           				continue tryAcquire 
    223            .          .           			} 
    224            .          .           		} 
    225            .          .            
    226            .          .           		// Go to sleep 
    227            .          .           		if v&mutexLocked == 0 { 
    228            .          .           			throw("runtime·lock: sleeping while lock is available") 
    229            .          .           		} 
    230            .          .            
    231            .          .           		// Collect times for mutex profile (seen in unlock2 only via mWaitList), 
    232            .          .           		// and for "/sync/mutex/wait/total:seconds" metric (to match). 
    233            .          .           		if !haveTimers { 
    234         10ms       10ms           			gp.m.mWaitList.startTicks = cputicks()                                                                       return nanotime()                                    os_linux_arm64.go:23
                                                                          return nanotime1()                               time_nofake.go:33

    235            .          .           			startTime = gp.m.mLockProfile.start() 
    236            .          .           			haveTimers = true 
    237            .          .           		} 
    238            .          .           		// Store the current head of the list of sleeping Ms in our gp.m.mWaitList.next field 
    239            .          .           		gp.m.mWaitList.next = mutexWaitListHead(v) 
    240            .          .            
    241            .          .           		// Pack a (partial) pointer to this M with the current lock state bits 
    242            .          .           		next := (uintptr(unsafe.Pointer(gp.m)) &^ mutexMMask) | v&mutexMMask | mutexSleeping 
    243            .          .           		if weSpin { // If we were spinning, prepare to retire 
    244            .          .           			next = next &^ mutexSpinning 
    245            .          .           		} 
    246            .          .            
    247         10ms       10ms           		if atomic.Casuintptr(&l.key, v, next) { 
    248            .          .           			weSpin = false 
    249            .          .           			// We've pushed ourselves onto the stack of waiters. Wait. 
    250            .       40ms           			semasleep(-1) 
    251            .          .           			atTail = gp.m.mWaitList.next == 0 // we were at risk of starving 
    252            .          .           			i = 0 
    253            .          .           		} 
    254            .          .            
    255         10ms       10ms           		gp.m.mWaitList.next = 0 
    256         20ms       20ms           		v = atomic.Loaduintptr(&l.key) 
    257            .          .           	} 
    258            .          .           } 

runtime.unlock

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:           0      720ms (flat, cum)  1.51%
    259            .          .            
    260            .          .           func unlock(l *mutex) { 
    261            .      720ms           	unlockWithRank(l)                                                       unlock2(l)                                                           lockrank_off.go:35
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    262            .          .           } 
    263            .          .            

runtime.unlock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        70ms       70ms (flat, cum)  0.15%
    265            .          .           // 
    266            .          .           //go:nowritebarrier 
    267         10ms       10ms           func unlock2(l *mutex) { 
    268            .          .           	gp := getg() 
    269            .          .            
    270            .          .           	var prev8 uint8 
    271            .          .           	var haveStackLock bool 
    272            .          .           	var endTicks int64 
    273         60ms       60ms           	if !mutexSampleContention() {                                      ⋮
                                     ⋮
                                                      if rate := int64(atomic.Load64(&mutexprofilerate)); rate <= 0 {      lock_spinbit.go:330

    274            .          .           		// Not collecting a sample for the contention profile, do the quick release 
    275            .          .           		prev8 = atomic.Xchg8(key8(&l.key), 0) 
    276            .          .           	} else { 
    277            .          .           		// If there's contention, we'll sample it. Don't allow another 
    278            .          .           		// lock2/unlock2 pair to finish before us and take our blame. Prevent 

runtime.unlock2

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:       540ms      650ms (flat, cum)  1.36%
    308            .          .           	} 
    309            .          .           	if prev8&mutexLocked == 0 { 
    310            .          .           		throw("unlock of unlocked lock") 
    311            .          .           	} 
    312            .          .            
    313         10ms       10ms           	if prev8&mutexSleeping != 0 { 
    314        340ms      450ms           		unlock2Wake(l, haveStackLock, endTicks) 
    315            .          .           	} 
    316            .          .            
    317         70ms       70ms           	gp.m.mLockProfile.store()                                                       if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {    mprof.go:756
                                     ⋮
                                     ⋮
                                                      if gp := getg(); gp.m.locks == 1 && gp.m.mLockProfile.haveStack {    mprof.go:756

    318         10ms       10ms           	gp.m.locks-- 
    319         60ms       60ms           	if gp.m.locks < 0 { 
    320            .          .           		throw("runtime·unlock: lock count") 
    321            .          .           	} 
    322         40ms       40ms           	if gp.m.locks == 0 && gp.preempt { // restore the preemption request in case we've cleared it in newstack 
    323            .          .           		gp.stackguard0 = stackPreempt 
    324            .          .           	} 
    325         10ms       10ms           } 
    326            .          .            
    327            .          .           // mutexSampleContention returns whether the current mutex operation should 

runtime.mutexSampleContention

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        40ms       40ms (flat, cum) 0.084%
    328            .          .           // report any contention it discovers. 
    329            .          .           func mutexSampleContention() bool { 
    330         40ms       40ms           	if rate := int64(atomic.Load64(&mutexprofilerate)); rate <= 0 { 
    331            .          .           		return false 
    332            .          .           	} else { 
    333            .          .           		// TODO: have SetMutexProfileFraction do the clamping 
    334            .          .           		rate32 := uint32(rate) 
    335            .          .           		if int64(rate32) != rate { 

runtime.unlock2Wake

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:        40ms       40ms (flat, cum) 0.084%
    345            .          .           func unlock2Wake(l *mutex, haveStackLock bool, endTicks int64) { 
    346            .          .           	v := atomic.Loaduintptr(&l.key) 
    347            .          .            
    348            .          .           	// On occasion, seek out and wake the M at the bottom of the stack so it 
    349            .          .           	// doesn't starve. 
    350         10ms       10ms           	antiStarve := cheaprandn(mutexTailWakePeriod) == 0                                                       return uint32((uint64(cheaprand()) * uint64(n)) >> 32)               rand.go:293
                                                          hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) rand.go:236

    351            .          .            
    352         10ms       10ms           	if haveStackLock { 
    353            .          .           		goto useStackLock 
    354            .          .           	} 
    355            .          .            
    356            .          .           	if !(antiStarve || // avoiding starvation may require a wake 
    357            .          .           		v&mutexSpinning == 0 || // no spinners means we must wake 
    358            .          .           		mutexPreferLowLatency(l)) { // prefer waiters be awake as much as possible 
    359            .          .           		return 
    360            .          .           	} 
    361            .          .            
    362            .          .           	for { 
    363         10ms       10ms           		if v&^mutexMMask == 0 || v&mutexStackLocked != 0 { 
    364            .          .           			// No waiting Ms means nothing to do. 
    365            .          .           			// 
    366            .          .           			// If the stack lock is unavailable, its owner would make the same 
    367            .          .           			// wake decisions that we would, so there's nothing for us to do. 
    368            .          .           			// 
    369            .          .           			// Although: This thread may have a different call stack, which 
    370            .          .           			// would result in a different entry in the mutex contention profile 
    371            .          .           			// (upon completion of go.dev/issue/66999). That could lead to weird 
    372            .          .           			// results if a slow critical section ends but another thread 
    373            .          .           			// quickly takes the lock, finishes its own critical section, 
    374            .          .           			// releases the lock, and then grabs the stack lock. That quick 
    375            .          .           			// thread would then take credit (blame) for the delay that this 
    376            .          .           			// slow thread caused. The alternative is to have more expensive 
    377            .          .           			// atomic operations (a CAS) on the critical path of unlock2. 
    378            .          .           			return 
    379            .          .           		} 
    380            .          .           		// Other M's are waiting for the lock. 
    381            .          .           		// Obtain the stack lock, and pop off an M. 
    382            .          .           		next := v | mutexStackLocked 
    383         10ms       10ms           		if atomic.Casuintptr(&l.key, v, next) { 
    384            .          .           			break 
    385            .          .           		} 
    386            .          .           		v = atomic.Loaduintptr(&l.key) 
    387            .          .           	} 
    388            .          .            

runtime.unlock2Wake

/usr/lib/go/src/runtime/lock_spinbit.go

  Total:           0       70ms (flat, cum)  0.15%
    456            .          .            
    457            .          .           		next := headM | flags 
    458            .          .           		if atomic.Casuintptr(&l.key, v, next) { 
    459            .          .           			if wakem != nil { 
    460            .          .           				// Claimed an M. Wake it. 
    461            .       70ms           				semawakeup(wakem)                                                                               futexwakeup(&mp.waitsema, 1)                 lock_futex.go:161

    462            .          .           			} 
    463            .          .           			return 
    464            .          .           		} 
    465            .          .            
    466            .          .           		v = atomic.Loaduintptr(&l.key) 

runtime.stackpoolalloc

/usr/lib/go/src/runtime/stack.go

  Total:        90ms      100ms (flat, cum)  0.21%
    222            .          .           			s.manualFreeList = x 
    223            .          .           		} 
    224            .          .           		list.insert(s) 
    225            .          .           	} 
    226            .          .           	x := s.manualFreeList 
    227         20ms       20ms           	if x.ptr() == nil {                                                       return (*gclink)(unsafe.Pointer(p))                                  mcache.go:76

    228            .          .           		throw("span has no free stacks") 
    229            .          .           	} 
    230         70ms       70ms           	s.manualFreeList = x.ptr().next 
    231            .          .           	s.allocCount++ 
    232            .          .           	if s.manualFreeList.ptr() == nil { 
    233            .          .           		// all stacks in s are allocated. 
    234            .       10ms           		list.remove(s) 
    235            .          .           	} 
    236            .          .           	return x 
    237            .          .           } 

runtime.stackpoolfree

/usr/lib/go/src/runtime/stack.go

  Total:        80ms       80ms (flat, cum)  0.17%
    238            .          .            
    239            .          .           // Adds stack x to the free pool. Must be called with stackpool[order].item.mu held. 
    240            .          .           func stackpoolfree(x gclinkptr, order uint8) { 
    241         50ms       50ms           	s := spanOfUnchecked(uintptr(x))                                                       ai := arenaIndex(p)                                                  mheap.go:736
                                                          return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)          mheap.go:620            return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] mheap.go:737

    242         30ms       30ms           	if s.state.get() != mSpanManual {             return mSpanState(b.s.Load())                                        mheap.go:417
                                                          return Load8(&u.value)                                           types.go:124

    243            .          .           		throw("freeing stack not in a stack span") 
    244            .          .           	} 
    245            .          .           	if s.manualFreeList.ptr() == nil { 
    246            .          .           		// s will now have a free stack 
    247            .          .           		stackpool[order].item.span.insert(s) 

runtime.stackpoolfree

/usr/lib/go/src/runtime/stack.go

  Total:           0       20ms (flat, cum) 0.042%
    266            .          .           		// 
    267            .          .           		// By not freeing, we prevent step #4 until GC is done. 
    268            .          .           		stackpool[order].item.span.remove(s) 
    269            .          .           		s.manualFreeList = 0 
    270            .          .           		osStackFree(s) 
    271            .       20ms           		mheap_.freeManual(s, spanAllocStack) 
    272            .          .           	} 
    273            .          .           } 
    274            .          .            
    275            .          .           // stackcacherefill/stackcacherelease implement a global pool of stack segments. 
    276            .          .           // The pool is required to prevent unlimited growth of per-thread caches. 

runtime.stackcacherefill

/usr/lib/go/src/runtime/stack.go

  Total:           0      100ms (flat, cum)  0.21%
    285            .          .           	// Grab half of the allowed capacity (to prevent thrashing). 
    286            .          .           	var list gclinkptr 
    287            .          .           	var size uintptr 
    288            .          .           	lock(&stackpool[order].item.mu) 
    289            .          .           	for size < _StackCacheSize/2 { 
    290            .      100ms           		x := stackpoolalloc(order) 
    291            .          .           		x.ptr().next = list 
    292            .          .           		list = x 
    293            .          .           		size += fixedStack << order 
    294            .          .           	} 
    295            .          .           	unlock(&stackpool[order].item.mu) 

runtime.stackcacherelease

/usr/lib/go/src/runtime/stack.go

  Total:           0      110ms (flat, cum)  0.23%
    305            .          .           	x := c.stackcache[order].list 
    306            .          .           	size := c.stackcache[order].size 
    307            .          .           	lock(&stackpool[order].item.mu) 
    308            .          .           	for size > _StackCacheSize/2 { 
    309            .          .           		y := x.ptr().next 
    310            .      100ms           		stackpoolfree(x, order) 
    311            .          .           		x = y 
    312            .          .           		size -= fixedStack << order 
    313            .          .           	} 
    314            .       10ms           	unlock(&stackpool[order].item.mu)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

    315            .          .           	c.stackcache[order].list = x 
    316            .          .           	c.stackcache[order].size = size 
    317            .          .           } 
    318            .          .            
    319            .          .           //go:systemstack 

runtime.stackalloc

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
    339            .          .           // 
    340            .          .           // stackalloc must run on the system stack because it uses per-P 
    341            .          .           // resources and must not split the stack. 
    342            .          .           // 
    343            .          .           //go:systemstack 
    344         10ms       10ms           func stackalloc(n uint32) stack { 
    345            .          .           	// Stackalloc must be called on scheduler stack, so that we 
    346            .          .           	// never try to grow the stack during the code that stackalloc runs. 
    347            .          .           	// Doing so would cause a deadlock (issue 1547). 
    348            .          .           	thisg := getg() 
    349            .          .           	if thisg != thisg.m.g0 { 

runtime.stackalloc

/usr/lib/go/src/runtime/stack.go

  Total:       110ms      210ms (flat, cum)  0.44%
    375            .          .           		for n2 > fixedStack { 
    376            .          .           			order++ 
    377            .          .           			n2 >>= 1 
    378            .          .           		} 
    379            .          .           		var x gclinkptr 
    380         10ms       10ms           		if stackNoCache != 0 || thisg.m.p == 0 || thisg.m.preemptoff != "" { 
    381            .          .           			// thisg.m.p == 0 can happen in the guts of exitsyscall 
    382            .          .           			// or procresize. Just get a stack from the global pool. 
    383            .          .           			// Also don't touch stackcache during gc 
    384            .          .           			// as it's flushed concurrently. 
    385            .          .           			lock(&stackpool[order].item.mu) 
    386            .          .           			x = stackpoolalloc(order) 
    387            .          .           			unlock(&stackpool[order].item.mu) 
    388            .          .           		} else { 
    389            .          .           			c := thisg.m.p.ptr().mcache 
    390         10ms       10ms           			x = c.stackcache[order].list 
    391         40ms       40ms           			if x.ptr() == nil {                                                                       return (*gclink)(unsafe.Pointer(p))                  mcache.go:76

    392            .      100ms           				stackcacherefill(c, order) 
    393            .          .           				x = c.stackcache[order].list 
    394            .          .           			} 
    395         50ms       50ms           			c.stackcache[order].list = x.ptr().next 
    396            .          .           			c.stackcache[order].size -= uintptr(n) 
    397            .          .           		} 
    398            .          .           		if valgrindenabled { 
    399            .          .           			// We're about to allocate the stack region starting at x.ptr(). 
    400            .          .           			// To prevent valgrind from complaining about overlapping allocations, 

runtime.stackfree

/usr/lib/go/src/runtime/stack.go

  Total:        30ms       30ms (flat, cum) 0.063%
    461            .          .           // 
    462            .          .           //go:systemstack 
    463            .          .           func stackfree(stk stack) { 
    464            .          .           	gp := getg() 
    465            .          .           	v := unsafe.Pointer(stk.lo) 
    466         10ms       10ms           	n := stk.hi - stk.lo 
    467            .          .           	if n&(n-1) != 0 { 
    468            .          .           		throw("stack not a power of 2") 
    469            .          .           	} 
    470            .          .           	if stk.lo+n < stk.hi { 
    471            .          .           		throw("bad stack size") 
    472            .          .           	} 
    473            .          .           	if stackDebug >= 1 { 
    474            .          .           		println("stackfree", v, n) 
    475            .          .           		memclrNoHeapPointers(v, n) // for testing, clobber stack data 
    476            .          .           	} 
    477         20ms       20ms           	if debug.efence != 0 || stackFromSystem != 0 { 
    478            .          .           		if debug.efence != 0 || stackFaultOnFree != 0 { 
    479            .          .           			sysFault(v, n) 
    480            .          .           		} else { 
    481            .          .           			sysFree(v, n, &memstats.stacks_sys) 
    482            .          .           		} 

runtime.stackfree

/usr/lib/go/src/runtime/stack.go

  Total:        80ms      190ms (flat, cum)   0.4%
    496            .          .           		asanpoison(v, n) 
    497            .          .           	} 
    498            .          .           	if valgrindenabled { 
    499            .          .           		valgrindFree(v) 
    500            .          .           	} 
    501         10ms       10ms           	if n < fixedStack<<_NumStackOrders && n < _StackCacheSize { 
    502            .          .           		order := uint8(0) 
    503            .          .           		n2 := n 
    504            .          .           		for n2 > fixedStack { 
    505            .          .           			order++ 
    506            .          .           			n2 >>= 1 
    507            .          .           		} 
    508            .          .           		x := gclinkptr(v) 
    509         10ms       10ms           		if stackNoCache != 0 || gp.m.p == 0 || gp.m.preemptoff != "" { 
    510            .          .           			lock(&stackpool[order].item.mu) 
    511            .          .           			if valgrindenabled { 
    512            .          .           				// x.ptr() is the head of the list of free stacks, and will be used 
    513            .          .           				// when allocating a new stack, so it has to be marked allocated. 
    514            .          .           				valgrindMalloc(unsafe.Pointer(x.ptr()), unsafe.Sizeof(x.ptr())) 
    515            .          .           			} 
    516            .          .           			stackpoolfree(x, order) 
    517            .          .           			unlock(&stackpool[order].item.mu) 
    518            .          .           		} else { 
    519            .          .           			c := gp.m.p.ptr().mcache 
    520         60ms       60ms           			if c.stackcache[order].size >= _StackCacheSize { 
    521            .      110ms           				stackcacherelease(c, order) 
    522            .          .           			} 
    523            .          .           			if valgrindenabled { 
    524            .          .           				// x.ptr() is the head of the list of free stacks, and will 
    525            .          .           				// be used when allocating a new stack, so it has to be 
    526            .          .           				// marked allocated. 

runtime.adjustpointer

/usr/lib/go/src/runtime/stack.go

  Total:        90ms       90ms (flat, cum)  0.19%
    607            .          .            
    608            .          .           // adjustpointer checks whether *vpp is in the old stack described by adjinfo. 
    609            .          .           // If so, it rewrites *vpp to point into the new stack. 
    610            .          .           func adjustpointer(adjinfo *adjustinfo, vpp unsafe.Pointer) { 
    611            .          .           	pp := (*uintptr)(vpp) 
    612         40ms       40ms           	p := *pp 
    613            .          .           	if stackDebug >= 4 { 
    614            .          .           		print("        ", pp, ":", hex(p), "\n") 
    615            .          .           	} 
    616            .          .           	if valgrindenabled { 
    617            .          .           		// p is a pointer on a stack, it is inherently initialized, as 
    618            .          .           		// everything on the stack is, but valgrind for _some unknown reason_ 
    619            .          .           		// sometimes thinks it's uninitialized, and flags operations on p below 
    620            .          .           		// as uninitialized. We just initialize it if valgrind thinks its 
    621            .          .           		// uninitialized. 
    622            .          .           		// 
    623            .          .           		// See go.dev/issues/73801. 
    624            .          .           		valgrindMakeMemDefined(unsafe.Pointer(&p), unsafe.Sizeof(&p)) 
    625            .          .           	} 
    626         50ms       50ms           	if adjinfo.old.lo <= p && p < adjinfo.old.hi { 
    627            .          .           		*pp = p + adjinfo.delta 
    628            .          .           		if stackDebug >= 3 { 
    629            .          .           			print("        adjust ptr ", pp, ":", hex(p), " -> ", hex(*pp), "\n") 
    630            .          .           		} 
    631            .          .           	} 

runtime.adjustpointers

/usr/lib/go/src/runtime/stack.go

  Total:       390ms      390ms (flat, cum)  0.82%
    647            .          .           	return (b >> (i % 8)) & 1 
    648            .          .           } 
    649            .          .            
    650            .          .           // bv describes the memory starting at address scanp. 
    651            .          .           // Adjust any pointers contained therein. 
    652         20ms       20ms           func adjustpointers(scanp unsafe.Pointer, bv *bitvector, adjinfo *adjustinfo, f funcInfo) { 
    653            .          .           	minp := adjinfo.old.lo 
    654            .          .           	maxp := adjinfo.old.hi 
    655            .          .           	delta := adjinfo.delta 
    656         50ms       50ms           	num := uintptr(bv.n) 
    657            .          .           	// If this frame might contain channel receive slots, use CAS 
    658            .          .           	// to adjust pointers. If the slot hasn't been received into 
    659            .          .           	// yet, it may contain stack pointers and a concurrent send 
    660            .          .           	// could race with adjusting those pointers. (The sent value 
    661            .          .           	// itself can never contain stack pointers.) 
    662            .          .           	useCAS := uintptr(scanp) < adjinfo.sghi 
    663         20ms       20ms           	for i := uintptr(0); i < num; i += 8 { 
    664            .          .           		if stackDebug >= 4 { 
    665            .          .           			for j := uintptr(0); j < 8; j++ { 
    666            .          .           				print("        ", add(scanp, (i+j)*goarch.PtrSize), ":", ptrnames[bv.ptrbit(i+j)], ":", hex(*(*uintptr)(add(scanp, (i+j)*goarch.PtrSize))), " # ", i, " ", *addb(bv.bytedata, i/8), "\n") 
    667            .          .           			} 
    668            .          .           		} 
    669         10ms       10ms           		b := *(addb(bv.bytedata, i/8)) 
    670         80ms       80ms           		for b != 0 { 
    671         40ms       40ms           			j := uintptr(sys.TrailingZeros8(b)) 
    672         10ms       10ms           			b &= b - 1 
    673            .          .           			pp := (*uintptr)(add(scanp, (i+j)*goarch.PtrSize)) 
    674            .          .           		retry: 
    675         20ms       20ms           			p := *pp 
    676         60ms       60ms           			if f.valid() && 0 < p && p < minLegalPointer && debug.invalidptr != 0 { 
    677            .          .           				// Looks like a junk value in a pointer slot. 
    678            .          .           				// Live analysis wrong? 
    679            .          .           				getg().m.traceback = 2 
    680            .          .           				print("runtime: bad pointer in frame ", funcname(f), " at ", pp, ": ", hex(p), "\n") 
    681            .          .           				throw("invalid pointer found on stack") 
    682            .          .           			} 
    683         60ms       60ms           			if minp <= p && p < maxp { 
    684            .          .           				if stackDebug >= 3 { 
    685            .          .           					print("adjust ptr ", hex(p), " ", funcname(f), "\n") 
    686            .          .           				} 
    687         10ms       10ms           				if useCAS { 
    688            .          .           					ppu := (*unsafe.Pointer)(unsafe.Pointer(pp)) 
    689            .          .           					if !atomic.Casp1(ppu, unsafe.Pointer(p), unsafe.Pointer(p+delta)) { 
    690            .          .           						goto retry 
    691            .          .           					} 
    692            .          .           				} else { 
    693            .          .           					*pp = p + delta 
    694            .          .           				} 
    695            .          .           			} 
    696            .          .           		} 
    697            .          .           	} 
    698         10ms       10ms           } 
    699            .          .            

runtime.adjustframe

/usr/lib/go/src/runtime/stack.go

  Total:       390ms      2.71s (flat, cum)  5.69%
    700            .          .           // Note: the argument/return area is adjusted by the callee. 
    701         10ms       10ms           func adjustframe(frame *stkframe, adjinfo *adjustinfo) { 
    702         20ms       20ms           	if frame.continpc == 0 { 
    703            .          .           		// Frame is dead. 
    704            .          .           		return 
    705            .          .           	} 
    706            .          .           	f := frame.fn 
    707            .          .           	if stackDebug >= 2 { 
    708            .          .           		print("    adjusting ", funcname(f), " frame=[", hex(frame.sp), ",", hex(frame.fp), "] pc=", hex(frame.pc), " continpc=", hex(frame.continpc), "\n") 
    709            .          .           	} 
    710            .          .            
    711            .          .           	// Adjust saved frame pointer if there is one. 
    712         10ms       10ms           	if (goarch.ArchFamily == goarch.AMD64 || goarch.ArchFamily == goarch.ARM64) && frame.argp-frame.varp == 2*goarch.PtrSize { 
    713            .          .           		if stackDebug >= 3 { 
    714            .          .           			print("      saved bp\n") 
    715            .          .           		} 
    716            .          .           		if debugCheckBP { 
    717            .          .           			// Frame pointers should always point to the next higher frame on 
    718            .          .           			// the Go stack (or be nil, for the top frame on the stack). 
    719            .          .           			bp := *(*uintptr)(unsafe.Pointer(frame.varp)) 
    720            .          .           			if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 
    721            .          .           				println("runtime: found invalid frame pointer") 
    722            .          .           				print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 
    723            .          .           				throw("bad frame pointer") 
    724            .          .           			} 
    725            .          .           		} 
    726            .          .           		// On AMD64, this is the caller's frame pointer saved in the current 
    727            .          .           		// frame. 
    728            .          .           		// On ARM64, this is the frame pointer of the caller's caller saved 
    729            .          .           		// by the caller in its frame (one word below its SP). 
    730         40ms       40ms           		adjustpointer(adjinfo, unsafe.Pointer(frame.varp))                                                               p := *pp                                                     stack.go:612                    if adjinfo.old.lo <= p && p < adjinfo.old.hi {               stack.go:626

    731            .          .           	} 
    732            .          .            
    733         20ms      1.95s           	locals, args, objs := frame.getStackMap(true) 
    734            .          .            
    735            .          .           	// Adjust local variables if stack frame has been allocated. 
    736            .          .           	if locals.n > 0 { 
    737         10ms       10ms           		size := uintptr(locals.n) * goarch.PtrSize 
    738         10ms      190ms           		adjustpointers(unsafe.Pointer(frame.varp-size), &locals, adjinfo, f) 
    739            .          .           	} 
    740            .          .            
    741            .          .           	// Adjust arguments. 
    742         10ms       10ms           	if args.n > 0 { 
    743            .          .           		if stackDebug >= 3 { 
    744            .          .           			print("      args\n") 
    745            .          .           		} 
    746            .      210ms           		adjustpointers(unsafe.Pointer(frame.argp), &args, adjinfo, funcInfo{}) 
    747            .          .           	} 
    748            .          .            
    749            .          .           	// Adjust pointers in all stack objects (whether they are live or not). 
    750            .          .           	// See comments in mgcmark.go:scanframeworker. 
    751         30ms       30ms           	if frame.varp != 0 { 
    752            .          .           		for i := range objs { 
    753         20ms       20ms           			obj := &objs[i] 
    754            .          .           			off := obj.off 
    755            .          .           			base := frame.varp // locals base pointer 
    756         40ms       40ms           			if off >= 0 { 
    757            .          .           				base = frame.argp // arguments and return values base pointer 
    758            .          .           			} 
    759            .          .           			p := base + uintptr(off) 
    760            .          .           			if p < frame.sp { 
    761            .          .           				// Object hasn't been allocated in the frame yet. 
    762            .          .           				// (Happens when the stack bounds check fails and 
    763            .          .           				// we call into morestack.) 
    764            .          .           				continue 
    765            .          .           			} 
    766         70ms       70ms           			ptrBytes, gcData := obj.gcdata()                                                                       if datap.gofunc <= ptr && ptr < datap.end {          stack.go:1344
                                     ⋮
                                     ⋮
                                     ⋮
                                                                      res := mod.rodata + uintptr(r.gcdataoff)             stack.go:1352

    767         20ms       20ms           			for i := uintptr(0); i < ptrBytes; i += goarch.PtrSize { 
    768         40ms       40ms           				if *addb(gcData, i/(8*goarch.PtrSize))>>(i/goarch.PtrSize&7)&1 != 0 { 
    769         20ms       20ms           					adjustpointer(adjinfo, unsafe.Pointer(p+i))                                                                                       p := *pp                             stack.go:612                                            if adjinfo.old.lo <= p && p < adjinfo.old.hi { stack.go:626

    770            .          .           				} 
    771            .          .           			} 
    772            .          .           		} 
    773            .          .           	} 
    774         20ms       20ms           } 
    775            .          .            

runtime.adjustctxt

/usr/lib/go/src/runtime/stack.go

  Total:        30ms       30ms (flat, cum) 0.063%
    776            .          .           func adjustctxt(gp *g, adjinfo *adjustinfo) { 
    777         10ms       10ms           	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.ctxt))                                                       if adjinfo.old.lo <= p && p < adjinfo.old.hi {                       stack.go:626

    778            .          .           	if !framepointer_enabled { 
    779            .          .           		return 
    780            .          .           	} 
    781            .          .           	if debugCheckBP { 
    782            .          .           		bp := gp.sched.bp 
    783            .          .           		if bp != 0 && (bp < adjinfo.old.lo || bp >= adjinfo.old.hi) { 
    784            .          .           			println("runtime: found invalid top frame pointer") 
    785            .          .           			print("bp=", hex(bp), " min=", hex(adjinfo.old.lo), " max=", hex(adjinfo.old.hi), "\n") 
    786            .          .           			throw("bad top frame pointer") 
    787            .          .           		} 
    788            .          .           	} 
    789            .          .           	oldfp := gp.sched.bp 
    790         10ms       10ms           	adjustpointer(adjinfo, unsafe.Pointer(&gp.sched.bp))                                                       if adjinfo.old.lo <= p && p < adjinfo.old.hi {                       stack.go:626

    791            .          .           	if GOARCH == "arm64" { 
    792            .          .           		// On ARM64, the frame pointer is saved one word *below* the SP, 
    793            .          .           		// which is not copied or adjusted in any frame. Do it explicitly 
    794            .          .           		// here. 
    795            .          .           		if oldfp == gp.sched.sp-goarch.PtrSize { 
    796            .          .           			memmove(unsafe.Pointer(gp.sched.bp), unsafe.Pointer(oldfp), goarch.PtrSize) 
    797         10ms       10ms           			adjustpointer(adjinfo, unsafe.Pointer(gp.sched.bp)) 
    798            .          .           		} 
    799            .          .           	} 
    800            .          .           } 
    801            .          .            

runtime.adjustdefers

/usr/lib/go/src/runtime/stack.go

  Total:        20ms       20ms (flat, cum) 0.042%
    803            .          .           	// Adjust pointers in the Defer structs. 
    804            .          .           	// We need to do this first because we need to adjust the 
    805            .          .           	// defer.link fields so we always work on the new stack. 
    806            .          .           	adjustpointer(adjinfo, unsafe.Pointer(&gp._defer)) 
    807         20ms       20ms           	for d := gp._defer; d != nil; d = d.link { 
    808            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.fn)) 
    809            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.sp)) 
    810            .          .           		adjustpointer(adjinfo, unsafe.Pointer(&d.link)) 
    811            .          .           	} 

runtime.adjustpanics

/usr/lib/go/src/runtime/stack.go

  Total:        10ms       10ms (flat, cum) 0.021%
    813            .          .            
    814            .          .           func adjustpanics(gp *g, adjinfo *adjustinfo) { 
    815            .          .           	// Panics are on stack and already adjusted. 
    816            .          .           	// Update pointer to head of list in G. 
    817         10ms       10ms           	adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))                                                       p := *pp                                                             stack.go:612

    818            .          .           } 
    819            .          .            
    820            .          .           func adjustsudogs(gp *g, adjinfo *adjustinfo) { 
    821            .          .           	// the data elements pointed to by a SudoG structure 
    822            .          .           	// might be in the stack. 

runtime.copystack

/usr/lib/go/src/runtime/stack.go

  Total:        30ms      110ms (flat, cum)  0.23%
    907            .          .           	used := old.hi - gp.sched.sp 
    908            .          .           	// Add just the difference to gcController.addScannableStack. 
    909            .          .           	// g0 stacks never move, so this will never account for them. 
    910            .          .           	// It's also fine if we have no P, addScannableStack can deal with 
    911            .          .           	// that case. 
    912         10ms       10ms           	gcController.addScannableStack(getg().m.p.ptr(), int64(newsize)-int64(old.hi-old.lo))                                                       pp.maxStackScanDelta += amount                                       mgcpacer.go:922

    913            .          .            
    914            .          .           	// allocate new stack 
    915            .       80ms           	new := stackalloc(uint32(newsize)) 
    916            .          .           	if stackPoisonCopy != 0 { 
    917            .          .           		fillstack(new, 0xfd) 
    918            .          .           	} 
    919            .          .           	if stackDebug >= 1 { 
    920            .          .           		print("copystack gp=", gp, " [", hex(old.lo), " ", hex(old.hi-used), " ", hex(old.hi), "]", " -> [", hex(new.lo), " ", hex(new.hi-used), " ", hex(new.hi), "]/", newsize, "\n") 
    921            .          .           	} 
    922            .          .            
    923            .          .           	// Compute adjustment. 
    924            .          .           	var adjinfo adjustinfo 
    925            .          .           	adjinfo.old = old 
    926            .          .           	adjinfo.delta = new.hi - old.hi 
    927            .          .            
    928            .          .           	// Adjust sudogs, synchronizing with channel ops if necessary. 
    929            .          .           	ncopy := used 
    930         20ms       20ms           	if !gp.activeStackChans { 
    931            .          .           		if newsize < old.hi-old.lo && gp.parkingOnChan.Load() { 
    932            .          .           			// It's not safe for someone to shrink this stack while we're actively 
    933            .          .           			// parking on a channel, but it is safe to grow since we do that 
    934            .          .           			// ourselves and explicitly don't want to synchronize with channels 
    935            .          .           			// since we could self-deadlock. 

runtime.copystack

/usr/lib/go/src/runtime/stack.go

  Total:        90ms      5.89s (flat, cum) 12.37%
    950            .          .           		// the stack they may interact with. 
    951            .          .           		ncopy -= syncadjustsudogs(gp, used, &adjinfo) 
    952            .          .           	} 
    953            .          .            
    954            .          .           	// Copy the stack (or the rest of it) to the new location 
    955         30ms      180ms           	memmove(unsafe.Pointer(new.hi-ncopy), unsafe.Pointer(old.hi-ncopy), ncopy) 
    956            .          .            
    957            .          .           	// Adjust remaining structures that have pointers into stacks. 
    958            .          .           	// We have to do most of these before we traceback the new 
    959            .          .           	// stack because gentraceback uses them. 
    960            .       30ms           	adjustctxt(gp, &adjinfo) 
    961            .       20ms           	adjustdefers(gp, &adjinfo) 
    962         10ms       10ms           	adjustpanics(gp, &adjinfo)                                                       adjustpointer(adjinfo, unsafe.Pointer(&gp._panic))                   stack.go:817
                                                          p := *pp                                                         stack.go:612

    963         10ms       10ms           	if adjinfo.sghi != 0 { 
    964            .          .           		adjinfo.sghi += adjinfo.delta 
    965            .          .           	} 
    966            .          .            
    967            .          .           	// Swap out old stack for new one 
    968            .          .           	gp.stack = new 
    969            .          .           	gp.stackguard0 = new.lo + stackGuard // NOTE: might clobber a preempt request 
    970         10ms       10ms           	gp.sched.sp = new.hi - used 
    971            .          .           	gp.stktopsp += adjinfo.delta 
    972            .          .            
    973            .          .           	// Adjust pointers in the new stack. 
    974            .       10ms           	var u unwinder 
    975         30ms      2.79s           	for u.init(gp, 0); u.valid(); u.next() {                                                       return u.frame.pc != 0                                               traceback.go:228            u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags)           traceback.go:129
                                     ⋮
                                     ⋮

    976            .      2.71s           		adjustframe(&u.frame, &adjinfo) 
    977            .          .           	} 
    978            .          .            
    979            .          .           	if valgrindenabled { 
    980            .          .           		if gp.valgrindStackID == 0 { 
    981            .          .           			gp.valgrindStackID = valgrindRegisterStack(unsafe.Pointer(new.lo), unsafe.Pointer(new.hi)) 
    982            .          .           		} else { 
    983            .          .           			valgrindChangeStack(gp.valgrindStackID, unsafe.Pointer(new.lo), unsafe.Pointer(new.hi)) 
    984            .          .           		} 
    985            .          .           	} 
    986            .          .            
    987            .          .           	// free old stack 
    988            .          .           	if stackPoisonCopy != 0 { 
    989            .          .           		fillstack(old, 0xfc) 
    990            .          .           	} 
    991            .      120ms           	stackfree(old) 
    992            .          .           } 
    993            .          .            
    994            .          .           // round x up to a power of 2. 
    995            .          .           func round2(x int32) int32 { 
    996            .          .           	s := uint(0) 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1010            .          .           // This must be nowritebarrierrec because it can be called as part of 
   1011            .          .           // stack growth from other nowritebarrierrec functions, but the 
   1012            .          .           // compiler doesn't check this. 
   1013            .          .           // 
   1014            .          .           //go:nowritebarrierrec 
   1015         20ms       20ms           func newstack() { 
   1016            .          .           	thisg := getg() 
   1017            .          .           	// TODO: double check all gp. shouldn't be getg(). 
   1018            .          .           	if thisg.m.morebuf.g.ptr().stackguard0 == stackFork { 
   1019            .          .           		throw("stack growth after fork") 
   1020            .          .           	} 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:        20ms      370ms (flat, cum)  0.78%
   1131            .          .           	newsize := oldsize * 2 
   1132            .          .            
   1133            .          .           	// Make sure we grow at least as much as needed to fit the new frame. 
   1134            .          .           	// (This is just an optimization - the caller of morestack will 
   1135            .          .           	// recheck the bounds on return.) 
   1136            .       90ms           	if f := findfunc(gp.sched.pc); f.valid() { 
   1137            .      260ms           		max := uintptr(funcMaxSPDelta(f)) 
   1138            .          .           		needed := max + stackGuard 
   1139            .          .           		used := gp.stack.hi - gp.sched.sp 
   1140         10ms       10ms           		for newsize-used < needed { 
   1141            .          .           			newsize *= 2 
   1142            .          .           		} 
   1143            .          .           	} 
   1144            .          .            
   1145         10ms       10ms           	if stackguard0 == stackForceMove { 
   1146            .          .           		// Forced stack movement used for debugging. 
   1147            .          .           		// Don't double the stack (or we may quickly run out 
   1148            .          .           		// if this is done repeatedly). 
   1149            .          .           		newsize = oldsize 
   1150            .          .           	} 

runtime.newstack

/usr/lib/go/src/runtime/stack.go

  Total:           0      6.07s (flat, cum) 12.75%
   1163            .          .           	// so it must be Grunning (or Gscanrunning). 
   1164            .          .           	casgstatus(gp, _Grunning, _Gcopystack) 
   1165            .          .            
   1166            .          .           	// The concurrent GC will not scan the stack while we are doing the copy since 
   1167            .          .           	// the gp is in a Gcopystack status. 
   1168            .         6s           	copystack(gp, newsize) 
   1169            .          .           	if stackDebug >= 1 { 
   1170            .          .           		print("stack grow done\n") 
   1171            .          .           	} 
   1172            .       70ms           	casgstatus(gp, _Gcopystack, _Grunning) 
   1173            .          .           	gogo(&gp.sched) 
   1174            .          .           } 
   1175            .          .            
   1176            .          .           //go:nosplit 
   1177            .          .           func nilfunc() { 

runtime.gostartcallfn

/usr/lib/go/src/runtime/stack.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1181            .          .           // adjust Gobuf as if it executed a call to fn 
   1182            .          .           // and then stopped before the first instruction in fn. 
   1183            .          .           func gostartcallfn(gobuf *gobuf, fv *funcval) { 
   1184            .          .           	var fn unsafe.Pointer 
   1185            .          .           	if fv != nil { 
   1186         10ms       10ms           		fn = unsafe.Pointer(fv.fn) 
   1187            .          .           	} else { 
   1188            .          .           		fn = unsafe.Pointer(abi.FuncPCABIInternal(nilfunc)) 
   1189            .          .           	} 
   1190         10ms       10ms           	gostartcall(gobuf, fn, unsafe.Pointer(fv))                                                       if buf.lr != 0 {                                                     sys_arm64.go:12

   1191            .          .           } 
   1192            .          .            
   1193            .          .           // isShrinkStackSafe returns whether it's safe to attempt to shrink 
   1194            .          .           // gp's stack. Shrinking the stack is only safe when we have precise 
   1195            .          .           // pointer maps for all frames on the stack. The caller must hold the 

runtime.(*stackObjectRecord).gcdata

/usr/lib/go/src/runtime/stack.go

  Total:        70ms       70ms (flat, cum)  0.15%
   1339            .          .           // Note that this bitmask might be larger than internal/abi.MaxPtrmaskBytes. 
   1340            .          .           func (r *stackObjectRecord) gcdata() (uintptr, *byte) { 
   1341            .          .           	ptr := uintptr(unsafe.Pointer(r)) 
   1342            .          .           	var mod *moduledata 
   1343            .          .           	for datap := &firstmoduledata; datap != nil; datap = datap.next { 
   1344         40ms       40ms           		if datap.gofunc <= ptr && ptr < datap.end { 
   1345            .          .           			mod = datap 
   1346            .          .           			break 
   1347            .          .           		} 
   1348            .          .           	} 
   1349            .          .           	// If you get a panic here due to a nil mod, 
   1350            .          .           	// you may have made a copy of a stackObjectRecord. 
   1351            .          .           	// You must use the original pointer. 
   1352         30ms       30ms           	res := mod.rodata + uintptr(r.gcdataoff) 
   1353            .          .           	return uintptr(r.ptrBytes), (*byte)(unsafe.Pointer(res)) 
   1354            .          .           } 
   1355            .          .            
   1356            .          .           // This is exported as ABI0 via linkname so obj can call it. 
   1357            .          .           // 

internal/runtime/atomic.(*Int32).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        20ms       20ms (flat, cum) 0.042%
     16            .          .            
     17            .          .           // Load accesses and returns the value atomically. 
     18            .          .           // 
     19            .          .           //go:nosplit 
     20            .          .           func (i *Int32) Load() int32 { 
     21         20ms       20ms           	return Loadint32(&i.value) 
     22            .          .           } 
     23            .          .            
     24            .          .           // Store updates the value atomically. 
     25            .          .           // 
     26            .          .           //go:nosplit 

internal/runtime/atomic.(*Int32).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       100ms      100ms (flat, cum)  0.21%
     51            .          .           // This operation wraps around in the usual 
     52            .          .           // two's-complement way. 
     53            .          .           // 
     54            .          .           //go:nosplit 
     55            .          .           func (i *Int32) Add(delta int32) int32 { 
     56        100ms      100ms           	return Xaddint32(&i.value, delta) 
     57            .          .           } 
     58            .          .            
     59            .          .           // Int64 is an atomically accessed int64 value. 
     60            .          .           // 
     61            .          .           // 8-byte aligned on all platforms, unlike a regular int64. 

internal/runtime/atomic.(*Int64).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        40ms       40ms (flat, cum) 0.084%
     69            .          .            
     70            .          .           // Load accesses and returns the value atomically. 
     71            .          .           // 
     72            .          .           //go:nosplit 
     73            .          .           func (i *Int64) Load() int64 { 
     74         40ms       40ms           	return Loadint64(&i.value) 
     75            .          .           } 
     76            .          .            
     77            .          .           // Store updates the value atomically. 
     78            .          .           // 
     79            .          .           //go:nosplit 

internal/runtime/atomic.(*Int64).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        20ms       20ms (flat, cum) 0.042%
    104            .          .           // This operation wraps around in the usual 
    105            .          .           // two's-complement way. 
    106            .          .           // 
    107            .          .           //go:nosplit 
    108            .          .           func (i *Int64) Add(delta int64) int64 { 
    109         20ms       20ms           	return Xaddint64(&i.value, delta) 
    110            .          .           } 
    111            .          .            
    112            .          .           // Uint8 is an atomically accessed uint8 value. 
    113            .          .           // 
    114            .          .           // A Uint8 must not be copied. 

internal/runtime/atomic.(*Uint8).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        50ms       50ms (flat, cum)   0.1%
    119            .          .            
    120            .          .           // Load accesses and returns the value atomically. 
    121            .          .           // 
    122            .          .           //go:nosplit 
    123            .          .           func (u *Uint8) Load() uint8 { 
    124         50ms       50ms           	return Load8(&u.value) 
    125            .          .           } 
    126            .          .            
    127            .          .           // Store updates the value atomically. 
    128            .          .           // 
    129            .          .           //go:nosplit 

internal/runtime/atomic.(*Bool).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       130ms      130ms (flat, cum)  0.27%
    163            .          .            
    164            .          .           // Load accesses and returns the value atomically. 
    165            .          .           // 
    166            .          .           //go:nosplit 
    167            .          .           func (b *Bool) Load() bool { 
    168        130ms      130ms           	return b.u.Load() != 0                                      ⋮
                                     ⋮
                                                      return Load8(&u.value)                                               types.go:124
    169            .          .           } 
    170            .          .            
    171            .          .           // Store updates the value atomically. 
    172            .          .           // 
    173            .          .           //go:nosplit 

internal/runtime/atomic.(*Uint32).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        80ms       80ms (flat, cum)  0.17%
    189            .          .            
    190            .          .           // Load accesses and returns the value atomically. 
    191            .          .           // 
    192            .          .           //go:nosplit 
    193            .          .           func (u *Uint32) Load() uint32 { 
    194         80ms       80ms           	return Load(&u.value) 
    195            .          .           } 
    196            .          .            
    197            .          .           // LoadAcquire is a partially unsynchronized version 
    198            .          .           // of Load that relaxes ordering constraints. Other threads 
    199            .          .           // may observe operations that precede this operation to 

internal/runtime/atomic.(*Uint32).CompareAndSwap

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       380ms      380ms (flat, cum)   0.8%
    231            .          .           // and if they're equal, swaps u's value with new. 
    232            .          .           // It reports whether the swap ran. 
    233            .          .           // 
    234            .          .           //go:nosplit 
    235            .          .           func (u *Uint32) CompareAndSwap(old, new uint32) bool { 
    236        380ms      380ms           	return Cas(&u.value, old, new) 
    237            .          .           } 
    238            .          .            
    239            .          .           // CompareAndSwapRelease is a partially unsynchronized version 
    240            .          .           // of Cas that relaxes ordering constraints. Other threads 
    241            .          .           // may observe operations that occur after this operation to 

internal/runtime/atomic.(*Uint32).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       190ms      190ms (flat, cum)   0.4%
    286            .          .           // This operation wraps around in the usual 
    287            .          .           // two's-complement way. 
    288            .          .           // 
    289            .          .           //go:nosplit 
    290            .          .           func (u *Uint32) Add(delta int32) uint32 { 
    291        190ms      190ms           	return Xadd(&u.value, delta) 
    292            .          .           } 
    293            .          .            
    294            .          .           // Uint64 is an atomically accessed uint64 value. 
    295            .          .           // 
    296            .          .           // 8-byte aligned on all platforms, unlike a regular uint64. 

internal/runtime/atomic.(*Uint64).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        20ms       20ms (flat, cum) 0.042%
    304            .          .            
    305            .          .           // Load accesses and returns the value atomically. 
    306            .          .           // 
    307            .          .           //go:nosplit 
    308            .          .           func (u *Uint64) Load() uint64 { 
    309         20ms       20ms           	return Load64(&u.value) 
    310            .          .           } 
    311            .          .            
    312            .          .           // Store updates the value atomically. 
    313            .          .           // 
    314            .          .           //go:nosplit 

internal/runtime/atomic.(*Uint64).CompareAndSwap

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        50ms       50ms (flat, cum)   0.1%
    320            .          .           // and if they're equal, swaps u's value with new. 
    321            .          .           // It reports whether the swap ran. 
    322            .          .           // 
    323            .          .           //go:nosplit 
    324            .          .           func (u *Uint64) CompareAndSwap(old, new uint64) bool { 
    325         50ms       50ms           	return Cas64(&u.value, old, new) 
    326            .          .           } 
    327            .          .            
    328            .          .           // Swap replaces u's value with new, returning 
    329            .          .           // u's value before the replacement. 
    330            .          .           // 

internal/runtime/atomic.(*Uint64).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:       240ms      240ms (flat, cum)   0.5%
    339            .          .           // This operation wraps around in the usual 
    340            .          .           // two's-complement way. 
    341            .          .           // 
    342            .          .           //go:nosplit 
    343            .          .           func (u *Uint64) Add(delta int64) uint64 { 
    344        240ms      240ms           	return Xadd64(&u.value, delta) 
    345            .          .           } 
    346            .          .            
    347            .          .           // Uintptr is an atomically accessed uintptr value. 
    348            .          .           // 
    349            .          .           // A Uintptr must not be copied. 

internal/runtime/atomic.(*Uintptr).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
    354            .          .            
    355            .          .           // Load accesses and returns the value atomically. 
    356            .          .           // 
    357            .          .           //go:nosplit 
    358            .          .           func (u *Uintptr) Load() uintptr { 
    359         10ms       10ms           	return Loaduintptr(&u.value) 
    360            .          .           } 
    361            .          .            
    362            .          .           // LoadAcquire is a partially unsynchronized version 
    363            .          .           // of Load that relaxes ordering constraints. Other threads 
    364            .          .           // may observe operations that precede this operation to 

internal/runtime/atomic.(*Uintptr).Add

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        30ms       30ms (flat, cum) 0.063%
    415            .          .           // This operation wraps around in the usual 
    416            .          .           // two's-complement way. 
    417            .          .           // 
    418            .          .           //go:nosplit 
    419            .          .           func (u *Uintptr) Add(delta uintptr) uintptr { 
    420         30ms       30ms           	return Xadduintptr(&u.value, delta) 
    421            .          .           } 
    422            .          .            
    423            .          .           // Float64 is an atomically accessed float64 value. 
    424            .          .           // 
    425            .          .           // 8-byte aligned on all platforms, unlike a regular float64. 

internal/runtime/atomic.(*UnsafePointer).Load

/usr/lib/go/src/internal/runtime/atomic/types.go

  Total:        10ms       10ms (flat, cum) 0.021%
    461            .          .            
    462            .          .           // Load accesses and returns the value atomically. 
    463            .          .           // 
    464            .          .           //go:nosplit 
    465            .          .           func (u *UnsafePointer) Load() unsafe.Pointer { 
    466         10ms       10ms           	return Loadp(unsafe.Pointer(&u.value)) 
    467            .          .           } 
    468            .          .            
    469            .          .           // StoreNoWB updates the value atomically. 
    470            .          .           // 
    471            .          .           // WARNING: As the name implies this operation does *not* 

runtime.heapBitsInSpan

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
     74            .          .           // 
     75            .          .           //go:nosplit 
     76            .          .           func heapBitsInSpan(userSize uintptr) bool { 
     77            .          .           	// N.B. gc.MinSizeForMallocHeader is an exclusive minimum so that this function is 
     78            .          .           	// invariant under size-class rounding on its input. 
     79         20ms       20ms           	return userSize <= gc.MinSizeForMallocHeader 
     80            .          .           } 
     81            .          .            
     82            .          .           // typePointers is an iterator over the pointers in a heap object. 
     83            .          .           // 
     84            .          .           // Iteration through this type implements the tiling algorithm described at the 

runtime.typePointers.next

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    263            .          .           		if tp.addr >= limit { 
    264            .          .           			return typePointers{}, 0 
    265            .          .           		} 
    266            .          .            
    267            .          .           		// Grab more bits and try again. 
    268         10ms       10ms           		tp.mask = readUintptr(addb(getGCMask(tp.typ), (tp.addr-tp.elem)/goarch.PtrSize/8)) 
    269            .          .           		if tp.addr+goarch.PtrSize*ptrBits > limit { 
    270            .          .           			bits := (tp.addr + goarch.PtrSize*ptrBits - limit) / goarch.PtrSize 
    271            .          .           			tp.mask &^= ((1 << (bits)) - 1) << (ptrBits - bits) 
    272            .          .           		} 
    273            .          .           	} 

runtime.(*mspan).initHeapBits

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms      340ms (flat, cum)  0.71%
    509            .          .           	if goarch.PtrSize == 8 && !s.spanclass.noscan() && s.spanclass.sizeclass() == 1 { 
    510            .          .           		b := s.heapBits() 
    511            .          .           		for i := range b { 
    512            .          .           			b[i] = ^uintptr(0) 
    513            .          .           		} 
    514         10ms       10ms           	} else if (!s.spanclass.noscan() && heapBitsInSpan(s.elemsize)) || s.isUserArenaChunk {                                                       return userSize <= gc.MinSizeForMallocHeader                         mbitmap.go:79

    515            .       10ms           		b := s.heapBits() 
    516            .      270ms           		clear(b) 
    517            .          .           	} 
    518            .          .           	if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(s.elemsize) { 
    519            .       50ms           		s.initInlineMarkBits()                                                               s.inlineMarkBits().init(s.spanclass, s.needzero != 0)        mgcmark_greenteagc.go:190

    520            .          .           	} 
    521            .          .           } 
    522            .          .            
    523            .          .           // heapBits returns the heap ptr/scalar bits stored at the end of the span for 
    524            .          .           // small object spans and heap arena spans. 

runtime.(*mspan).heapBits

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    546            .          .           		} 
    547            .          .           	} 
    548            .          .           	// Find the bitmap at the end of the span. 
    549            .          .           	// 
    550            .          .           	// Nearly every span with heap bits is exactly one page in size. Arenas are the only exception. 
    551         10ms       10ms           	if span.npages == 1 { 
    552            .          .           		// This will be inlined and constant-folded down. 
    553            .          .           		return heapBitsSlice(span.base(), pageSize, span.elemsize) 
    554            .          .           	} 
    555            .          .           	return heapBitsSlice(span.base(), span.npages*pageSize, span.elemsize) 
    556            .          .           } 

runtime.spanHeapBitsRange

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
    569            .          .           //go:nosplit 
    570            .          .           func spanHeapBitsRange(spanBase, spanSize, elemsize uintptr) (base, size uintptr) { 
    571            .          .           	size = spanSize / goarch.PtrSize / 8 
    572            .          .           	base = spanBase + spanSize - size 
    573            .          .           	if goexperiment.GreenTeaGC && gcUsesSpanInlineMarkBits(elemsize) { 
    574         20ms       20ms           		base -= unsafe.Sizeof(spanInlineMarkBits{}) 
    575            .          .           	} 
    576            .          .           	return 
    577            .          .           } 
    578            .          .            
    579            .          .           // heapBitsSmallForAddr loads the heap bits for the object stored at addr from span.heapBits. 

runtime.(*mspan).writeHeapBitsSmall

/usr/lib/go/src/runtime/mbitmap.go

  Total:       900ms      900ms (flat, cum)  1.89%
    619            .          .           // 
    620            .          .           // Assumes dataSize is <= ptrBits*goarch.PtrSize. x must be a pointer into the span. 
    621            .          .           // heapBitsInSpan(dataSize) must be true. dataSize must be >= typ.Size_. 
    622            .          .           // 
    623            .          .           //go:nosplit 
    624         30ms       30ms           func (span *mspan) writeHeapBitsSmall(x, dataSize uintptr, typ *_type) (scanSize uintptr) { 
    625            .          .           	// The objects here are always really small, so a single load is sufficient. 
    626        170ms      170ms           	src0 := readUintptr(getGCMask(typ))             x := *(*uintptr)(unsafe.Pointer(p))                                  mbitmap.go:1474
                                     ⋮
                                     ⋮
                                                      if t.TFlag&abi.TFlagGCMaskOnDemand != 0 {                            type.go:89

    627            .          .            
    628            .          .           	// Create repetitions of the bitmap if we have a small slice backing store. 
    629            .          .           	scanSize = typ.PtrBytes 
    630            .          .           	src := src0 
    631        150ms      150ms           	if typ.Size_ == goarch.PtrSize { 
    632            .          .           		src = (1 << (dataSize / goarch.PtrSize)) - 1 
    633            .          .           	} else { 
    634            .          .           		// N.B. We rely on dataSize being an exact multiple of the type size. 
    635            .          .           		// The alternative is to be defensive and mask out src to the length 
    636            .          .           		// of dataSize. The purpose is to save on one additional masking operation. 
    637            .          .           		if doubleCheckHeapSetType && !asanenabled && dataSize%typ.Size_ != 0 { 
    638            .          .           			throw("runtime: (*mspan).writeHeapBitsSmall: dataSize is not a multiple of typ.Size_") 
    639            .          .           		} 
    640         20ms       20ms           		for i := typ.Size_; i < dataSize; i += typ.Size_ { 
    641         30ms       30ms           			src |= src0 << (i / goarch.PtrSize) 
    642            .          .           			scanSize += typ.Size_ 
    643            .          .           		} 
    644            .          .           		if asanenabled { 
    645            .          .           			// Mask src down to dataSize. dataSize is going to be a strange size because of 
    646            .          .           			// the redzone required for allocations when asan is enabled. 
    647            .          .           			src &= (1 << (dataSize / goarch.PtrSize)) - 1 
    648            .          .           		} 
    649            .          .           	} 
    650            .          .            
    651            .          .           	// Since we're never writing more than one uintptr's worth of bits, we're either going 
    652            .          .           	// to do one or two writes. 
    653         60ms       60ms           	dstBase, _ := spanHeapBitsRange(span.base(), pageSize, span.elemsize)                                                       return s.startAddr                                                   mheap.go:523            base -= unsafe.Sizeof(spanInlineMarkBits{})                          mbitmap.go:574            return s.startAddr                                                   mheap.go:523

    654            .          .           	dst := unsafe.Pointer(dstBase) 
    655            .          .           	o := (x - span.base()) / goarch.PtrSize 
    656         10ms       10ms           	i := o / ptrBits 
    657            .          .           	j := o % ptrBits 
    658         40ms       40ms           	bits := span.elemsize / goarch.PtrSize 
    659         20ms       20ms           	if j+bits > ptrBits { 
    660            .          .           		// Two writes. 
    661            .          .           		bits0 := ptrBits - j 
    662            .          .           		bits1 := bits - bits0 
    663            .          .           		dst0 := (*uintptr)(add(dst, (i+0)*goarch.PtrSize)) 
    664         30ms       30ms           		dst1 := (*uintptr)(add(dst, (i+1)*goarch.PtrSize))                                                               return unsafe.Pointer(uintptr(p) + x)                        stubs.go:25

    665         10ms       10ms           		*dst0 = (*dst0)&(^uintptr(0)>>bits0) | (src << j) 
    666         20ms       20ms           		*dst1 = (*dst1)&^((1<<bits1)-1) | (src >> bits0) 
    667            .          .           	} else { 
    668            .          .           		// One write. 
    669        180ms      180ms           		dst := (*uintptr)(add(dst, i*goarch.PtrSize)) 
    670         30ms       30ms           		*dst = (*dst)&^(((1<<bits)-1)<<j) | (src << j) 
    671            .          .           	} 
    672            .          .            
    673            .          .           	const doubleCheck = false 
    674            .          .           	if doubleCheck { 
    675            .          .           		srcRead := span.heapBitsSmallForAddr(x) 
    676            .          .           		if srcRead != src { 
    677            .          .           			print("runtime: x=", hex(x), " i=", i, " j=", j, " bits=", bits, "\n") 
    678            .          .           			print("runtime: dataSize=", dataSize, " typ.Size_=", typ.Size_, " typ.PtrBytes=", typ.PtrBytes, "\n") 
    679            .          .           			print("runtime: src0=", hex(src0), " src=", hex(src), " srcRead=", hex(srcRead), "\n") 
    680            .          .           			throw("bad pointer bits written for small object") 
    681            .          .           		} 
    682            .          .           	} 
    683        100ms      100ms           	return 
    684            .          .           } 
    685            .          .            
    686            .          .           // heapSetType* functions record that the new allocation [x, x+size) 
    687            .          .           // holds in [x, x+dataSize) one or more values of type typ. 
    688            .          .           // (The number of values is given by dataSize / typ.Size.) 

runtime.heapSetTypeNoHeader

/usr/lib/go/src/runtime/mbitmap.go

  Total:        90ms      990ms (flat, cum)  2.08%
    704            .          .            
    705            .          .           func heapSetTypeNoHeader(x, dataSize uintptr, typ *_type, span *mspan) uintptr { 
    706            .          .           	if doubleCheckHeapSetType && (!heapBitsInSpan(dataSize) || !heapBitsInSpan(span.elemsize)) { 
    707            .          .           		throw("tried to write heap bits, but no heap bits in span") 
    708            .          .           	} 
    709         90ms      990ms           	scanSize := span.writeHeapBitsSmall(x, dataSize, typ) 
    710            .          .           	if doubleCheckHeapSetType { 
    711            .          .           		doubleCheckHeapType(x, dataSize, typ, nil, span) 
    712            .          .           	} 

runtime.heapSetTypeSmallHeader

/usr/lib/go/src/runtime/mbitmap.go

  Total:        30ms       30ms (flat, cum) 0.063%
    714            .          .           } 
    715            .          .            
    716            .          .           func heapSetTypeSmallHeader(x, dataSize uintptr, typ *_type, header **_type, span *mspan) uintptr { 
    717         30ms       30ms           	*header = typ 
    718            .          .           	if doubleCheckHeapSetType { 
    719            .          .           		doubleCheckHeapType(x, dataSize, typ, header, span) 
    720            .          .           	} 
    721            .          .           	return span.elemsize 
    722            .          .           } 

runtime.(*mspan).refillAllocCache

/usr/lib/go/src/runtime/mbitmap.go

  Total:       140ms      140ms (flat, cum)  0.29%
   1071            .          .           // refillAllocCache takes 8 bytes s.allocBits starting at whichByte 
   1072            .          .           // and negates them so that ctz (count trailing zeros) instructions 
   1073            .          .           // can be used. It then places these 8 bytes into the cached 64 bit 
   1074            .          .           // s.allocCache. 
   1075            .          .           func (s *mspan) refillAllocCache(whichByte uint16) { 
   1076        140ms      140ms           	bytes := (*[8]uint8)(unsafe.Pointer(s.allocBits.bytep(uintptr(whichByte))))                                                       return addb(&b.x, n)                                                 mheap.go:2870
   1077            .          .           	aCache := uint64(0) 
   1078            .          .           	aCache |= uint64(bytes[0]) 
   1079            .          .           	aCache |= uint64(bytes[1]) << (1 * 8) 
   1080            .          .           	aCache |= uint64(bytes[2]) << (2 * 8) 
   1081            .          .           	aCache |= uint64(bytes[3]) << (3 * 8) 

runtime.(*mspan).nextFreeIndex

/usr/lib/go/src/runtime/mbitmap.go

  Total:        30ms      160ms (flat, cum)  0.34%
   1088            .          .            
   1089            .          .           // nextFreeIndex returns the index of the next free object in s at 
   1090            .          .           // or after s.freeindex. 
   1091            .          .           // There are hardware instructions that can be used to make this 
   1092            .          .           // faster if profiling warrants it. 
   1093         30ms      160ms           func (s *mspan) nextFreeIndex() uint16 { 
   1094            .          .           	sfreeindex := s.freeindex 
   1095            .          .           	snelems := s.nelems 
   1096            .          .           	if sfreeindex == snelems { 
   1097            .          .           		return sfreeindex 
   1098            .          .           	} 

runtime.(*mspan).nextFreeIndex

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms      100ms (flat, cum)  0.21%
   1122            .          .           	if result >= snelems { 
   1123            .          .           		s.freeindex = snelems 
   1124            .          .           		return snelems 
   1125            .          .           	} 
   1126            .          .            
   1127         20ms       20ms           	s.allocCache >>= uint(bitIndex + 1) 
   1128            .          .           	sfreeindex = result + 1 
   1129            .          .            
   1130            .          .           	if sfreeindex%64 == 0 && sfreeindex != snelems { 
   1131            .          .           		// We just incremented s.freeindex so it isn't 0. 
   1132            .          .           		// As each 1 in s.allocCache was encountered and used for allocation 
   1133            .          .           		// it was shifted away. At this point s.allocCache contains all 0s. 
   1134            .          .           		// Refill s.allocCache so that it corresponds 
   1135            .          .           		// to the bits at s.allocBits starting at s.freeindex. 
   1136            .          .           		whichByte := sfreeindex / 8 
   1137            .       80ms           		s.refillAllocCache(whichByte) 
   1138            .          .           	} 
   1139            .          .           	s.freeindex = sfreeindex 
   1140            .          .           	return result 
   1141            .          .           } 
   1142            .          .            

runtime.(*mspan).objIndex

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1201            .          .            
   1202            .          .           // nosplit, because it is called by other nosplit code like findObject 
   1203            .          .           // 
   1204            .          .           //go:nosplit 
   1205            .          .           func (s *mspan) objIndex(p uintptr) uintptr { 
   1206         10ms       10ms           	return s.divideByElemSize(p - s.base()) 
   1207            .          .           } 
   1208            .          .            
   1209            .          .           func markBitsForAddr(p uintptr) markBits { 
   1210            .          .           	s := spanOf(p) 
   1211            .          .           	objIndex := s.objIndex(p) 

runtime.findObject

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1344            .          .           			badPointer(s, p, refBase, refOff) 
   1345            .          .           		} 
   1346            .          .           		return 
   1347            .          .           	} 
   1348            .          .            
   1349         10ms       10ms           	objIndex = s.objIndex(p)                                                       return s.divideByElemSize(p - s.base())                              mbitmap.go:1206

   1350            .          .           	base = s.base() + objIndex*s.elemsize 
   1351            .          .           	return 
   1352            .          .           } 
   1353            .          .            
   1354            .          .           // reflect_verifyNotInHeapPtr reports whether converting the not-in-heap pointer into a unsafe.Pointer is ok. 

runtime.(*mspan).countAlloc

/usr/lib/go/src/runtime/mbitmap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1460            .          .           	for i := uintptr(0); i < bytes; i += 8 { 
   1461            .          .           		// Extract 64 bits from the byte pointer and get a OnesCount. 
   1462            .          .           		// Note that the unsafe cast here doesn't preserve endianness, 
   1463            .          .           		// but that's OK. We only care about how many bits are 1, not 
   1464            .          .           		// about the order we discover them in. 
   1465         10ms       10ms           		mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i))) 
   1466            .          .           		count += sys.OnesCount64(mrkBits) 
   1467            .          .           	} 
   1468            .          .           	return count 
   1469            .          .           } 

runtime.readUintptr

/usr/lib/go/src/runtime/mbitmap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1470            .          .            
   1471            .          .           // Read the bytes starting at the aligned pointer p into a uintptr. 
   1472            .          .           // Read is little-endian. 
   1473            .          .           func readUintptr(p *byte) uintptr { 
   1474         20ms       20ms           	x := *(*uintptr)(unsafe.Pointer(p)) 
   1475            .          .           	if goarch.BigEndian { 
   1476            .          .           		if goarch.PtrSize == 8 { 
   1477            .          .           			return uintptr(sys.Bswap64(uint64(x))) 
   1478            .          .           		} 
   1479            .          .           		return uintptr(sys.Bswap32(uint32(x))) 

runtime.memclrNoHeapPointers

/usr/lib/go/src/runtime/memclr_arm64.s

  Total:        20ms       20ms (flat, cum) 0.042%
     23            .          .           	MOVD	ZR, -8(R7) 
     24            .          .           	RET 
     25            .          .            
     26            .          .           less_than_8: 
     27            .          .           	TBZ	$2, R1, less_than_4 
     28         10ms       10ms           	MOVW	ZR, (R0) 
     29            .          .           	MOVW	ZR, -4(R7) 
     30            .          .           	RET 
     31            .          .            
     32            .          .           less_than_4: 
     33            .          .           	CBZ	R1, ending 
     34         10ms       10ms           	MOVB	ZR, (R0) 
     35            .          .           	TBZ	$1, R1, ending 
     36            .          .           	MOVH	ZR, -2(R7) 
     37            .          .            
     38            .          .           ending: 
     39            .          .           	RET 

runtime.memclrNoHeapPointers

/usr/lib/go/src/runtime/memclr_arm64.s

  Total:       200ms      200ms (flat, cum)  0.42%
     51            .          .           	BEQ	try_zva 
     52            .          .            
     53            .          .           	// Non-aligned store 
     54            .          .           	STP	(ZR, ZR), (R0) 
     55            .          .           	// Make the destination aligned 
     56         20ms       20ms           	SUB	R4, R1, R1 
     57            .          .           	ADD	R4, R0, R0 
     58            .          .           	B	try_zva 
     59            .          .            
     60            .          .           tail_maybe_long: 
     61         10ms       10ms           	CMP	$64, R1 
     62            .          .           	BHS	no_zva 
     63            .          .            
     64            .          .           tail63: 
     65            .          .           	ANDS	$48, R1, R3 
     66         10ms       10ms           	BEQ	last16 
     67            .          .           	CMPW	$32, R3 
     68            .          .           	BEQ	last48 
     69            .          .           	BLT	last32 
     70         10ms       10ms           	STP.P	(ZR, ZR), 16(R0) 
     71            .          .           last48: 
     72         30ms       30ms           	STP.P	(ZR, ZR), 16(R0) 
     73            .          .           last32: 
     74         10ms       10ms           	STP.P	(ZR, ZR), 16(R0) 
     75            .          .           	// The last store length is at most 16, so it is safe to use 
     76            .          .           	// stp to write last 16 bytes 
     77            .          .           last16: 
     78         10ms       10ms           	ANDS	$15, R1, R1 
     79            .          .           	CBZ	R1, last_end 
     80            .          .           	ADD	R1, R0, R0 
     81            .          .           	STP	(ZR, ZR), -16(R0) 
     82            .          .           last_end: 
     83         20ms       20ms           	RET 
     84            .          .            
     85            .          .           	PCALIGN	$16 
     86            .          .           no_zva: 
     87            .          .           	SUB	$16, R0, R0 
     88            .          .           	SUB	$64, R1, R1 
     89            .          .            
     90            .          .           loop_64: 
     91         20ms       20ms           	STP	(ZR, ZR), 16(R0) 
     92         40ms       40ms           	STP	(ZR, ZR), 32(R0) 
     93            .          .           	STP	(ZR, ZR), 48(R0) 
     94            .          .           	STP.W	(ZR, ZR), 64(R0) 
     95            .          .           	SUBS	$64, R1, R1 
     96            .          .           	BGE	loop_64 
     97            .          .           	ANDS	$63, R1, ZR 
     98            .          .           	ADD	$16, R0, R0 
     99            .          .           	BNE	tail63 
    100            .          .           	RET 
    101            .          .            
    102            .          .           	PCALIGN	$16 
    103            .          .           try_zva: 
    104            .          .           	// Try using the ZVA feature to zero entire cache lines 
    105            .          .           	// It is not meaningful to use ZVA if the block size is less than 64, 
    106            .          .           	// so make sure that n is greater than or equal to 64 
    107            .          .           	CMP	$63, R1 
    108         10ms       10ms           	BLE	tail63 
    109            .          .            
    110            .          .           	CMP	$128, R1 
    111            .          .           	// Ensure n is at least 128 bytes, so that there is enough to copy after 
    112            .          .           	// alignment. 
    113            .          .           	BLT	no_zva 
    114            .          .           	// Check if ZVA is allowed from user code, and if so get the block size 
    115            .          .           	MOVW	block_size<>(SB), R5 
    116            .          .           	TBNZ	$31, R5, no_zva 
    117         10ms       10ms           	CBNZ	R5, zero_by_line 
    118            .          .           	// DCZID_EL0 bit assignments 
    119            .          .           	// [63:5] Reserved 
    120            .          .           	// [4]    DZP, if bit set DC ZVA instruction is prohibited, else permitted 
    121            .          .           	// [3:0]  log2 of the block size in words, eg. if it returns 0x4 then block size is 16 words 
    122            .          .           	MRS	DCZID_EL0, R3 

runtime.memclrNoHeapPointers

/usr/lib/go/src/runtime/memclr_arm64.s

  Total:       620ms      620ms (flat, cum)  1.30%
    137            .          .           	// Block size is less than 64. 
    138            .          .           	BNE	no_zva 
    139            .          .            
    140            .          .           	PCALIGN	$16 
    141            .          .           zero_by_line: 
    142         10ms       10ms           	CMP	R5, R1 
    143            .          .           	// Not enough memory to reach alignment 
    144            .          .           	BLO	no_zva 
    145            .          .           	SUB	$1, R5, R6 
    146            .          .           	NEG	R0, R4 
    147            .          .           	ANDS	R6, R4, R4 
    148            .          .           	// Already aligned 
    149            .          .           	BEQ	aligned 
    150            .          .            
    151            .          .           	// check there is enough to copy after alignment 
    152            .          .           	SUB	R4, R1, R3 
    153            .          .            
    154            .          .           	// Check that the remaining length to ZVA after alignment 
    155            .          .           	// is greater than 64. 
    156         10ms       10ms           	CMP	$64, R3 
    157            .          .           	CCMP	GE, R3, R5, $10  // condition code GE, NZCV=0b1010 
    158            .          .           	BLT	no_zva 
    159            .          .            
    160            .          .           	// We now have at least 64 bytes to zero, update n 
    161         10ms       10ms           	MOVD	R3, R1 
    162            .          .            
    163            .          .           loop_zva_prolog: 
    164            .          .           	STP	(ZR, ZR), (R0) 
    165         20ms       20ms           	STP	(ZR, ZR), 16(R0) 
    166            .          .           	STP	(ZR, ZR), 32(R0) 
    167            .          .           	SUBS	$64, R4, R4 
    168            .          .           	STP	(ZR, ZR), 48(R0) 
    169            .          .           	ADD	$64, R0, R0 
    170            .          .           	BGE	loop_zva_prolog 
    171            .          .            
    172            .          .           	ADD	R4, R0, R0 
    173            .          .            
    174            .          .           aligned: 
    175         10ms       10ms           	SUB	R5, R1, R1 
    176            .          .            
    177            .          .           	PCALIGN	$16 
    178            .          .           loop_zva: 
    179        410ms      410ms           	WORD	$0xd50b7420 // DC ZVA, R0 
    180        150ms      150ms           	ADD	R5, R0, R0 
    181            .          .           	SUBS	R5, R1, R1 
    182            .          .           	BHS	loop_zva 
    183            .          .           	ANDS	R6, R1, R1 
    184            .          .           	BNE	tail_maybe_long 
    185            .          .           	RET 

runtime.(*unwinder).init

/usr/lib/go/src/runtime/traceback.go

  Total:           0      340ms (flat, cum)  0.71%
    124            .          .           	// provide a "valid" method. Alternatively, this could start in a "before 
    125            .          .           	// the first frame" state and "next" could return whether it was able to 
    126            .          .           	// move to the next frame, but that's both more awkward to use in a "for" 
    127            .          .           	// loop and is harder to implement because we have to do things differently 
    128            .          .           	// for the first frame. 
    129            .      340ms           	u.initAt(^uintptr(0), ^uintptr(0), ^uintptr(0), gp, flags) 
    130            .          .           } 

runtime.(*unwinder).initAt

/usr/lib/go/src/runtime/traceback.go

  Total:        20ms       20ms (flat, cum) 0.042%
    131            .          .            
    132         20ms       20ms           func (u *unwinder) initAt(pc0, sp0, lr0 uintptr, gp *g, flags unwindFlags) { 
    133            .          .           	// Don't call this "g"; it's too easy get "g" and "gp" confused. 
    134            .          .           	if ourg := getg(); ourg == gp && ourg == ourg.m.curg { 
    135            .          .           		// The starting sp has been passed in as a uintptr, and the caller may 
    136            .          .           		// have other uintptr-typed stack references as well. 
    137            .          .           		// If during one of the calls that got us here or during one of the 

runtime.(*unwinder).initAt

/usr/lib/go/src/runtime/traceback.go

  Total:        50ms      320ms (flat, cum)  0.67%
    195            .          .           		// LR are not touched. 
    196            .          .           		frame.pc = frame.lr 
    197            .          .           		frame.lr = 0 
    198            .          .           	} 
    199            .          .            
    200            .       20ms           	f := findfunc(frame.pc) 
    201            .          .           	if !f.valid() { 
    202            .          .           		if flags&unwindSilentErrors == 0 { 
    203            .          .           			print("runtime: g ", gp.goid, " gp=", gp, ": unknown pc ", hex(frame.pc), "\n") 
    204            .          .           			tracebackHexdump(gp.stack, &frame, 0) 
    205            .          .           		} 
    206            .          .           		if flags&(unwindPrintErrors|unwindSilentErrors) == 0 { 
    207            .          .           			throw("unknown pc") 
    208            .          .           		} 
    209            .          .           		*u = unwinder{} 
    210            .          .           		return 
    211            .          .           	} 
    212            .          .           	frame.fn = f 
    213            .          .            
    214            .          .           	// Populate the unwinder. 
    215            .       10ms           	*u = unwinder{ 
    216            .          .           		frame:        frame, 
    217            .          .           		g:            gp.guintptr(), 
    218         40ms       40ms           		cgoCtxt:      len(gp.cgoCtxt) - 1, 
    219            .          .           		calleeFuncID: abi.FuncIDNormal, 
    220            .          .           		flags:        flags, 
    221            .          .           	} 
    222            .          .            
    223         10ms       10ms           	isSyscall := frame.pc == pc0 && frame.sp == sp0 && pc0 == gp.syscallpc && sp0 == gp.syscallsp 
    224            .      240ms           	u.resolveInternal(true, isSyscall) 
    225            .          .           } 

runtime.(*unwinder).valid

/usr/lib/go/src/runtime/traceback.go

  Total:        20ms       20ms (flat, cum) 0.042%
    227            .          .           func (u *unwinder) valid() bool { 
    228         20ms       20ms           	return u.frame.pc != 0 
    229            .          .           } 
    230            .          .            
    231            .          .           // resolveInternal fills in u.frame based on u.frame.fn, pc, and sp. 
    232            .          .           // 
    233            .          .           // innermost indicates that this is the first resolve on this stack. If 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:       310ms      310ms (flat, cum)  0.65%
    247            .          .           // 
    248            .          .           // If fn is a stack-jumping function, resolveInternal can change the entire 
    249            .          .           // frame state to follow that stack jump. 
    250            .          .           // 
    251            .          .           // This is internal to unwinder. 
    252         10ms       10ms           func (u *unwinder) resolveInternal(innermost, isSyscall bool) { 
    253            .          .           	frame := &u.frame 
    254            .          .           	gp := u.g.ptr() 
    255            .          .            
    256            .          .           	f := frame.fn 
    257        220ms      220ms           	if f.pcsp == 0 { 
    258            .          .           		// No frame information, must be external function, like race support. 
    259            .          .           		// See golang.org/issue/13568. 
    260            .          .           		u.finishInternal() 
    261            .          .           		return 
    262            .          .           	} 
    263            .          .            
    264            .          .           	// Compute function info flags. 
    265         10ms       10ms           	flag := f.flag 
    266         70ms       70ms           	if f.funcID == abi.FuncID_cgocallback { 
    267            .          .           		// cgocallback does write SP to switch from the g0 to the curg stack, 
    268            .          .           		// but it carefully arranges that during the transition BOTH stacks 
    269            .          .           		// have cgocallback frame valid for unwinding through. 
    270            .          .           		// So we don't need to exclude it with the other SP-writing functions. 
    271            .          .           		flag &^= abi.FuncFlagSPWrite 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        70ms      1.75s (flat, cum)  3.67%
    321            .          .           				frame.sp = gp.sched.sp 
    322            .          .           				u.cgoCtxt = len(gp.cgoCtxt) - 1 
    323            .          .           				flag &^= abi.FuncFlagSPWrite 
    324            .          .           			} 
    325            .          .           		} 
    326         70ms      1.75s           		frame.fp = frame.sp + uintptr(funcspdelta(f, frame.pc))                                                               x, _ := pcvalue(f, f.pcsp, targetpc, true)                   symtab.go:1203
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

    327            .          .           		if !usesLR { 
    328            .          .           			// On x86, call instruction pushes return PC before entering new function. 
    329            .          .           			frame.fp += goarch.PtrSize 
    330            .          .           		} 
    331            .          .           	} 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        40ms       40ms (flat, cum) 0.084%
    366            .          .           		} 
    367            .          .           		frame.lr = 0 
    368            .          .           	} else { 
    369            .          .           		var lrPtr uintptr 
    370            .          .           		if usesLR { 
    371         20ms       20ms           			if innermost && frame.sp < frame.fp || frame.lr == 0 { 
    372            .          .           				lrPtr = frame.sp 
    373         20ms       20ms           				frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 
    374            .          .           			} 
    375            .          .           		} else { 
    376            .          .           			if frame.lr == 0 { 
    377            .          .           				lrPtr = frame.fp - goarch.PtrSize 
    378            .          .           				frame.lr = *(*uintptr)(unsafe.Pointer(lrPtr)) 

runtime.(*unwinder).resolveInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        30ms       30ms (flat, cum) 0.063%
    417            .          .           	// the function either doesn't return at all (if it has no defers or if the 
    418            .          .           	// defers do not recover) or it returns from one of the calls to 
    419            .          .           	// deferproc a second time (if the corresponding deferred func recovers). 
    420            .          .           	// In the latter case, use a deferreturn call site as the continuation pc. 
    421            .          .           	frame.continpc = frame.pc 
    422         30ms       30ms           	if u.calleeFuncID == abi.FuncID_sigpanic { 
    423            .          .           		if frame.fn.deferreturn != 0 { 
    424            .          .           			frame.continpc = frame.fn.entry() + uintptr(frame.fn.deferreturn) + 1 
    425            .          .           			// Note: this may perhaps keep return variables alive longer than 
    426            .          .           			// strictly necessary, as we are using "function has a defer statement" 
    427            .          .           			// as a proxy for "function actually deferred something". It seems 

runtime.(*unwinder).next

/usr/lib/go/src/runtime/traceback.go

  Total:        60ms      540ms (flat, cum)  1.13%
    435            .          .           			frame.continpc = 0 
    436            .          .           		} 
    437            .          .           	} 
    438            .          .           } 
    439            .          .            
    440         40ms       40ms           func (u *unwinder) next() { 
    441            .          .           	frame := &u.frame 
    442         10ms       10ms           	f := frame.fn 
    443            .          .           	gp := u.g.ptr() 
    444            .          .            
    445            .          .           	// Do not unwind past the bottom of the stack. 
    446         10ms       10ms           	if frame.lr == 0 { 
    447            .       20ms           		u.finishInternal() 
    448            .          .           		return 
    449            .          .           	} 
    450            .      460ms           	flr := findfunc(frame.lr) 
    451            .          .           	if !flr.valid() { 
    452            .          .           		// This happens if you get a profiling interrupt at just the wrong time. 
    453            .          .           		// In that context it is okay to stop early. 
    454            .          .           		// But if no error flags are set, we're doing a garbage collection and must 
    455            .          .           		// get everything, so crash loudly. 

runtime.(*unwinder).next

/usr/lib/go/src/runtime/traceback.go

  Total:        40ms       40ms (flat, cum) 0.084%
    479            .          .           		print("runtime: traceback stuck. pc=", hex(frame.pc), " sp=", hex(frame.sp), "\n") 
    480            .          .           		tracebackHexdump(gp.stack, frame, frame.sp) 
    481            .          .           		throw("traceback stuck") 
    482            .          .           	} 
    483            .          .            
    484         40ms       40ms           	injectedCall := f.funcID == abi.FuncID_sigpanic || f.funcID == abi.FuncID_asyncPreempt || f.funcID == abi.FuncID_debugCallV2 
    485            .          .           	if injectedCall { 
    486            .          .           		u.flags |= unwindTrap 
    487            .          .           	} else { 
    488            .          .           		u.flags &^= unwindTrap 
    489            .          .           	} 

runtime.(*unwinder).next

/usr/lib/go/src/runtime/traceback.go

  Total:        10ms      1.90s (flat, cum)  3.99%
    508            .          .           		} else if funcspdelta(f, frame.pc) == 0 { 
    509            .          .           			frame.lr = x 
    510            .          .           		} 
    511            .          .           	} 
    512            .          .            
    513            .      1.89s           	u.resolveInternal(false, false) 
    514         10ms       10ms           } 
    515            .          .            
    516            .          .           // finishInternal is an unwinder-internal helper called after the stack has been 

runtime.(*unwinder).finishInternal

/usr/lib/go/src/runtime/traceback.go

  Total:        20ms       20ms (flat, cum) 0.042%
    517            .          .           // exhausted. It sets the unwinder to an invalid state and checks that it 
    518            .          .           // successfully unwound the entire stack. 
    519         20ms       20ms           func (u *unwinder) finishInternal() { 
    520            .          .           	u.frame.pc = 0 
    521            .          .            
    522            .          .           	// Note that panic != nil is okay here: there can be leftover panics, 
    523            .          .           	// because the defers on the panic stack do not nest in frame order as 
    524            .          .           	// they do on the defer stack. If you have: 

runtime.tracebackPCs

/usr/lib/go/src/runtime/traceback.go

  Total:        30ms      120ms (flat, cum)  0.25%
    618            .          .           // 
    619            .          .           // Callers should set the unwindSilentErrors flag on u. 
    620            .          .           func tracebackPCs(u *unwinder, skip int, pcBuf []uintptr) int { 
    621            .          .           	var cgoBuf [32]uintptr 
    622            .          .           	n := 0 
    623            .       60ms           	for ; n < len(pcBuf) && u.valid(); u.next() { 
    624            .          .           		f := u.frame.fn 
    625            .          .           		cgoN := u.cgoCallers(cgoBuf[:]) 
    626            .          .            
    627            .          .           		// TODO: Why does &u.cache cause u to escape? (Same in traceback2) 
    628            .       30ms           		for iu, uf := newInlineUnwinder(f, u.symPC()); n < len(pcBuf) && uf.valid(); uf = iu.next(uf) { 
    629         30ms       30ms           			sf := iu.srcFunc(uf)                                                                       t := &u.inlTree[uf.index]                            symtabinl.go:120                            return u.f.srcFunc()                                 symtabinl.go:118
    630            .          .           			if sf.funcID == abi.FuncIDWrapper && elideWrapperCalling(u.calleeFuncID) { 
    631            .          .           				// ignore wrappers 
    632            .          .           			} else if skip > 0 { 
    633            .          .           				skip-- 
    634            .          .           			} else { 

runtime.callers

/usr/lib/go/src/runtime/traceback.go

  Total:           0      120ms (flat, cum)  0.25%
   1092            .          .           func callers(skip int, pcbuf []uintptr) int { 
   1093            .          .           	sp := sys.GetCallerSP() 
   1094            .          .           	pc := sys.GetCallerPC() 
   1095            .          .           	gp := getg() 
   1096            .          .           	var n int 
   1097            .      120ms           	systemstack(func() { 
   1098            .          .           		var u unwinder 

runtime.callers.func1

/usr/lib/go/src/runtime/traceback.go

  Total:           0      120ms (flat, cum)  0.25%
   1099            .          .           		u.initAt(pc, sp, 0, gp, unwindSilentErrors) 
   1100            .      120ms           		n = tracebackPCs(&u, skip, pcbuf) 
   1101            .          .           	}) 
   1102            .          .           	return n 
   1103            .          .           } 
   1104            .          .            
   1105            .          .           func gcallers(gp *g, skip int, pcbuf []uintptr) int { 

runtime.isSystemGoroutine

/usr/lib/go/src/runtime/traceback.go

  Total:       100ms      250ms (flat, cum)  0.52%
   1362            .          .           // runtime.runFinalizers/runtime.runCleanups. 
   1363            .          .           // 
   1364            .          .           // If fixed is true, any goroutine that can vary between user and 
   1365            .          .           // system (that is, the finalizer goroutine) is considered a user 
   1366            .          .           // goroutine. 
   1367         10ms       10ms           func isSystemGoroutine(gp *g, fixed bool) bool { 
   1368            .          .           	// Keep this in sync with internal/trace.IsSystemGoroutine. 
   1369         60ms      170ms           	f := findfunc(gp.startpc) 
   1370            .          .           	if !f.valid() { 
   1371            .          .           		return false 
   1372            .          .           	} 
   1373         10ms       10ms           	if f.funcID == abi.FuncID_runtime_main || f.funcID == abi.FuncID_corostart || f.funcID == abi.FuncID_handleAsyncEvent { 
   1374            .          .           		return false 
   1375            .          .           	} 
   1376         10ms       10ms           	if f.funcID == abi.FuncID_runFinalizers { 
   1377            .          .           		// We include the finalizer goroutine if it's calling 
   1378            .          .           		// back into user code. 
   1379            .          .           		if fixed { 
   1380            .          .           			// This goroutine can vary. In fixed mode, 
   1381            .          .           			// always consider it a user goroutine. 
   1382            .          .           			return false 
   1383            .          .           		} 
   1384            .          .           		return fingStatus.Load()&fingRunningFinalizer == 0 
   1385            .          .           	} 
   1386            .          .           	if f.funcID == abi.FuncID_runCleanups { 
   1387            .          .           		// We include the cleanup goroutines if they're calling 
   1388            .          .           		// back into user code. 
   1389            .          .           		if fixed { 
   1390            .          .           			// This goroutine can vary. In fixed mode, 
   1391            .          .           			// always consider it a user goroutine. 
   1392            .          .           			return false 
   1393            .          .           		} 
   1394            .          .           		return !gp.runningCleanups.Load() 
   1395            .          .           	} 
   1396         10ms       50ms           	return stringslite.HasPrefix(funcname(f), "runtime.")                                                       return f.datap.funcName(f.nameOff)                                   symtab.go:1142
   1397            .          .           } 
   1398            .          .            
   1399            .          .           // SetCgoTraceback records three C functions to use to gather 
   1400            .          .           // traceback information from C code and to convert that traceback 
   1401            .          .           // information into symbolic information. These are used when printing 

runtime.(*mSpanStateBox).get

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
    412            .          .           // It is nosplit because it's called indirectly by typedmemclr, 
    413            .          .           // which must not be preempted. 
    414            .          .            
    415            .          .           //go:nosplit 
    416            .          .           func (b *mSpanStateBox) get() mSpanState { 
    417         20ms       20ms           	return mSpanState(b.s.Load())                                                       return Load8(&u.value)                                               types.go:124

    418            .          .           } 
    419            .          .            
    420            .          .           type mspan struct { 
    421            .          .           	_    sys.NotInHeap 
    422            .          .           	next *mspan     // next span in list, or nil if none 

runtime.(*mspan).base

/usr/lib/go/src/runtime/mheap.go

  Total:        50ms       50ms (flat, cum)   0.1%
    518            .          .           	userArenaChunkFree    addrRange     // interval for managing chunk allocation 
    519            .          .           	largeType             *_type        // malloc header for large objects. 
    520            .          .           } 
    521            .          .            
    522            .          .           func (s *mspan) base() uintptr { 
    523         50ms       50ms           	return s.startAddr 
    524            .          .           } 
    525            .          .            
    526            .          .           func (s *mspan) layout() (size, n, total uintptr) { 
    527            .          .           	total = s.npages << gc.PageShift 
    528            .          .           	size = s.elemsize 

runtime.makeSpanClass

/usr/lib/go/src/runtime/mheap.go

  Total:       150ms      150ms (flat, cum)  0.31%
    589            .          .           	numSpanClasses = gc.NumSizeClasses << 1 
    590            .          .           	tinySpanClass  = spanClass(tinySizeClass<<1 | 1) 
    591            .          .           ) 
    592            .          .            
    593            .          .           func makeSpanClass(sizeclass uint8, noscan bool) spanClass { 
    594        150ms      150ms           	return spanClass(sizeclass<<1) | spanClass(bool2int(noscan))             return int(*(*uint8)(unsafe.Pointer(&x)))                            stubs.go:394

    595            .          .           } 
    596            .          .            
    597            .          .           //go:nosplit 
    598            .          .           func (sc spanClass) sizeclass() int8 { 
    599            .          .           	return int8(sc >> 1) 

runtime.arenaIndex

/usr/lib/go/src/runtime/mheap.go

  Total:        40ms       40ms (flat, cum) 0.084%
    615            .          .           // It is nosplit because it's called by spanOf and several other 
    616            .          .           // nosplit functions. 
    617            .          .           // 
    618            .          .           //go:nosplit 
    619            .          .           func arenaIndex(p uintptr) arenaIdx { 
    620         40ms       40ms           	return arenaIdx((p - arenaBaseOffset) / heapArenaBytes) 
    621            .          .           } 
    622            .          .            
    623            .          .           // arenaBase returns the low address of the region covered by heap 
    624            .          .           // arena i. 
    625            .          .           func arenaBase(i arenaIdx) uintptr { 

runtime.spanOfUnchecked

/usr/lib/go/src/runtime/mheap.go

  Total:        50ms       50ms (flat, cum)   0.1%
    731            .          .           // 
    732            .          .           // Must be nosplit because it has callers that are nosplit. 
    733            .          .           // 
    734            .          .           //go:nosplit 
    735            .          .           func spanOfUnchecked(p uintptr) *mspan { 
    736         20ms       20ms           	ai := arenaIndex(p)                                                       return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)              mheap.go:620

    737         30ms       30ms           	return mheap_.arenas[ai.l1()][ai.l2()].spans[(p/pageSize)%pagesPerArena] 
    738            .          .           } 
    739            .          .            
    740            .          .           // spanOfHeap is like spanOf, but returns nil if p does not point to a 
    741            .          .           // heap object. 

runtime.spanOfHeap

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
    743            .          .           // Must be nosplit because it has callers that are nosplit. 
    744            .          .           // 
    745            .          .           //go:nosplit 
    746            .          .           func spanOfHeap(p uintptr) *mspan { 
    747         10ms       10ms           	s := spanOf(p) 
    748            .          .           	// s is nil if it's never been allocated. Otherwise, we check 
    749            .          .           	// its state first because we don't trust this pointer, so we 
    750            .          .           	// have to synchronize with span initialization. Then, it's 
    751            .          .           	// still possible we picked up a stale span pointer, so we 
    752            .          .           	// have to check the span's bounds. 

runtime.(*mheap).alloc

/usr/lib/go/src/runtime/mheap.go

  Total:           0       20ms (flat, cum) 0.042%
   1001            .          .           // 
   1002            .          .           // spanclass indicates the span's size class and scannability. 
   1003            .          .           // 
   1004            .          .           // Returns a span that has been fully initialized. span.needzero indicates 
   1005            .          .           // whether the span has been zeroed. Note that it may not be. 
   1006            .       20ms           func (h *mheap) alloc(npages uintptr, spanclass spanClass) *mspan { 
   1007            .          .           	// Don't do any operations that lock the heap on the G stack. 
   1008            .          .           	// It might trigger stack growth, and the stack growth code needs 

runtime.(*mheap).alloc.func1

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms      1.08s (flat, cum)  2.27%
   1009            .          .           	// to be able to allocate heap. 
   1010            .          .           	var s *mspan 
   1011         20ms      560ms           	systemstack(func() { 
   1012            .          .           		// To prevent excessive heap growth, before allocating n pages 
   1013            .          .           		// we need to sweep and reclaim at least n pages. 
   1014            .          .           		if !isSweepDone() { 
   1015            .          .           			h.reclaim(npages) 
   1016            .          .           		} 
   1017            .      520ms           		s = h.allocSpan(npages, spanAllocHeap, spanclass) 
   1018            .          .           	}) 
   1019            .          .           	return s 
   1020            .          .           } 
   1021            .          .            
   1022            .          .           // allocManual allocates a manually-managed span of npage pages. 

runtime.(*mheap).setSpans

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1055            .          .           			ai = arenaIndex(base + n*pageSize) 
   1056            .          .           			ha = h.arenas[ai.l1()][ai.l2()] 
   1057            .          .           		} 
   1058            .          .           		ha.spans[i] = s 
   1059            .          .           	} 
   1060         20ms       20ms           } 
   1061            .          .            
   1062            .          .           // allocNeedsZero checks if the region of address space [base, base+npage*pageSize), 
   1063            .          .           // assumed to be allocated, needs to be zeroed, updating heap arena metadata for 
   1064            .          .           // future allocations. 
   1065            .          .           // 

runtime.(*mheap).allocNeedsZero

/usr/lib/go/src/runtime/mheap.go

  Total:       100ms      100ms (flat, cum)  0.21%
   1069            .          .           // critical for future page allocations. 
   1070            .          .           // 
   1071            .          .           // There are no locking constraints on this method. 
   1072            .          .           func (h *mheap) allocNeedsZero(base, npage uintptr) (needZero bool) { 
   1073            .          .           	for npage > 0 { 
   1074         20ms       20ms           		ai := arenaIndex(base)                                                               return arenaIdx((p - arenaBaseOffset) / heapArenaBytes)      mheap.go:620

   1075            .          .           		ha := h.arenas[ai.l1()][ai.l2()] 
   1076            .          .            
   1077         10ms       10ms           		zeroedBase := atomic.Loaduintptr(&ha.zeroedBase) 
   1078         70ms       70ms           		arenaBase := base % heapArenaBytes 
   1079            .          .           		if arenaBase < zeroedBase { 
   1080            .          .           			// We extended into the non-zeroed part of the 
   1081            .          .           			// arena, so this region needs to be zeroed before use. 
   1082            .          .           			// 
   1083            .          .           			// zeroedBase is monotonically increasing, so if we see this now then 

runtime.(*mheap).tryAllocMSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1141            .          .           	if pp == nil || pp.mspancache.len == 0 { 
   1142            .          .           		return nil 
   1143            .          .           	} 
   1144            .          .           	// Pull off the last entry in the cache. 
   1145            .          .           	s := pp.mspancache.buf[pp.mspancache.len-1] 
   1146         10ms       10ms           	pp.mspancache.len-- 
   1147            .          .           	return s 
   1148            .          .           } 
   1149            .          .            
   1150            .          .           // allocMSpanLocked allocates an mspan object. 
   1151            .          .           // 

runtime.(*mheap).allocMSpanLocked

/usr/lib/go/src/runtime/mheap.go

  Total:           0      110ms (flat, cum)  0.23%
   1167            .          .           	} 
   1168            .          .           	// Refill the cache if necessary. 
   1169            .          .           	if pp.mspancache.len == 0 { 
   1170            .          .           		const refillCount = len(pp.mspancache.buf) / 2 
   1171            .          .           		for i := 0; i < refillCount; i++ { 
   1172            .      110ms           			pp.mspancache.buf[i] = (*mspan)(h.spanalloc.alloc()) 
   1173            .          .           		} 
   1174            .          .           		pp.mspancache.len = refillCount 
   1175            .          .           	} 
   1176            .          .           	// Pull off the last entry in the cache. 
   1177            .          .           	s := pp.mspancache.buf[pp.mspancache.len-1] 

runtime.(*mheap).freeMSpanLocked

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       20ms (flat, cum) 0.042%
   1199            .          .           		pp.mspancache.len++ 
   1200            .          .           		return 
   1201            .          .           	} 
   1202            .          .           	// Failing that (or if we don't have a p), just free it to 
   1203            .          .           	// the heap. 
   1204         20ms       20ms           	h.spanalloc.free(unsafe.Pointer(s))                                                       f.inuse -= f.size                                                    mfixalloc.go:105
   1205            .          .           } 
   1206            .          .            
   1207            .          .           // allocSpan allocates an mspan which owns npages worth of memory. 
   1208            .          .           // 
   1209            .          .           // If typ.manual() == false, allocSpan allocates a heap span of class spanclass 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        30ms       60ms (flat, cum)  0.13%
   1219            .          .           // 
   1220            .          .           // allocSpan must be called on the system stack both because it acquires 
   1221            .          .           // the heap lock and because it must block GC transitions. 
   1222            .          .           // 
   1223            .          .           //go:systemstack 
   1224         10ms       10ms           func (h *mheap) allocSpan(npages uintptr, typ spanAllocType, spanclass spanClass) (s *mspan) { 
   1225            .          .           	// Function-global state. 
   1226            .          .           	gp := getg() 
   1227            .          .           	base, scav := uintptr(0), uintptr(0) 
   1228            .          .           	growth := uintptr(0) 
   1229            .          .            
   1230            .          .           	// On some platforms we need to provide physical page aligned stack 
   1231            .          .           	// allocations. Where the page size is less than the physical page 
   1232            .          .           	// size, we already manage to do this by default. 
   1233            .          .           	needPhysPageAlign := physPageAlignedStacks && typ == spanAllocStack && pageSize < physPageSize 
   1234            .          .            
   1235            .          .           	// If the allocation is small enough, try the page cache! 
   1236            .          .           	// The page cache does not support aligned allocations, so we cannot use 
   1237            .          .           	// it if we need to provide a physical page aligned stack allocation. 
   1238            .          .           	pp := gp.m.p.ptr() 
   1239         10ms       10ms           	if !needPhysPageAlign && pp != nil && npages < pageCachePages/4 { 
   1240            .          .           		c := &pp.pcache 
   1241            .          .            
   1242            .          .           		// If the cache is empty, refill it. 
   1243            .          .           		if c.empty() { 
   1244            .          .           			lock(&h.lock) 
   1245            .       20ms           			*c = h.pages.allocToCache() 
   1246            .          .           			unlock(&h.lock) 
   1247            .          .           		} 
   1248            .          .            
   1249            .          .           		// Try to allocate from the cache. 
   1250            .       10ms           		base, scav = c.alloc(npages) 
   1251            .          .           		if base != 0 { 
   1252         10ms       10ms           			s = h.tryAllocMSpan()                                                                       pp.mspancache.len--                                  mheap.go:1146

   1253            .          .           			if s != nil { 
   1254            .          .           				goto HaveSpan 
   1255            .          .           			} 
   1256            .          .           			// We have a base but no mspan, so we need 
   1257            .          .           			// to lock the heap. 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms      120ms (flat, cum)  0.25%
   1308            .          .           		} 
   1309            .          .           	} 
   1310            .          .           	if s == nil { 
   1311            .          .           		// We failed to get an mspan earlier, so grab 
   1312            .          .           		// one now that we have the heap lock. 
   1313            .      110ms           		s = h.allocMSpanLocked() 
   1314            .          .           	} 
   1315            .          .           	unlock(&h.lock) 
   1316            .          .            
   1317            .          .           HaveSpan: 
   1318            .          .           	// Decide if we need to scavenge in response to what we just allocated. 
   1319            .          .           	// Specifically, we track the maximum amount of memory to scavenge of all 
   1320            .          .           	// the alternatives below, assuming that the maximum satisfies *all* 
   1321            .          .           	// conditions we check (e.g. if we need to scavenge X to satisfy the 
   1322            .          .           	// memory limit and Y to satisfy heap-growth scavenging, and Y > X, then 
   1323            .          .           	// it's fine to pick Y, because the memory limit is still satisfied). 
   1324            .          .           	// 
   1325            .          .           	// It's fine to do this after allocating because we expect any scavenged 
   1326            .          .           	// pages not to get touched until we return. Simultaneously, it's important 
   1327            .          .           	// to do this before calling sysUsed because that may commit address space. 
   1328            .          .           	bytesToScavenge := uintptr(0) 
   1329            .          .           	forceScavenge := false 
   1330         10ms       10ms           	if limit := gcController.memoryLimit.Load(); !gcCPULimiter.limiting() {                                                       return l.enabled.Load()                                              mgclimit.go:91
                                                          return b.u.Load() != 0                                           types.go:168

   1331            .          .           		// Assist with scavenging to maintain the memory limit by the amount 
   1332            .          .           		// that we expect to page in. 
   1333            .          .           		inuse := gcController.mappedReady.Load() 
   1334            .          .           		// Be careful about overflow, especially with uintptrs. Even on 32-bit platforms 
   1335            .          .           		// someone can set a really big memory limit that isn't math.MaxInt64. 

runtime.(*mheap).allocSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      340ms (flat, cum)  0.71%
   1388            .          .           		} 
   1389            .          .           		scavenge.assistTime.Add(now - start) 
   1390            .          .           	} 
   1391            .          .            
   1392            .          .           	// Initialize the span. 
   1393            .      310ms           	h.initSpan(s, typ, spanclass, base, npages) 
   1394            .          .            
   1395            .          .           	if valgrindenabled { 
   1396            .          .           		valgrindMempoolMalloc(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base), npages*pageSize) 
   1397            .          .           	} 
   1398            .          .            
   1399            .          .           	// Commit and account for any scavenged memory that the span now owns. 
   1400            .          .           	nbytes := npages * pageSize 
   1401            .          .           	if scav != 0 { 
   1402            .          .           		// sysUsed all the pages that are actually available 
   1403            .          .           		// in the span since some of them might be scavenged. 
   1404            .          .           		sysUsed(unsafe.Pointer(base), nbytes, scav) 
   1405            .          .           		gcController.heapReleased.add(-int64(scav)) 
   1406            .          .           	} 
   1407            .          .           	// Update stats. 
   1408            .          .           	gcController.heapFree.add(-int64(nbytes - scav)) 
   1409            .          .           	if typ == spanAllocHeap { 
   1410            .       20ms           		gcController.heapInUse.add(int64(nbytes)) 
   1411            .          .           	} 
   1412            .          .           	// Update consistent stats. 
   1413            .       10ms           	stats := memstats.heapStats.acquire() 
   1414            .          .           	atomic.Xaddint64(&stats.committed, int64(scav)) 
   1415            .          .           	atomic.Xaddint64(&stats.released, -int64(scav)) 
   1416            .          .           	switch typ { 
   1417            .          .           	case spanAllocHeap: 
   1418            .          .           		atomic.Xaddint64(&stats.inHeap, int64(nbytes)) 

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      160ms (flat, cum)  0.34%
   1437            .          .           // initSpan initializes a blank span s which will represent the range 
   1438            .          .           // [base, base+npages*pageSize). typ is the type of span being allocated. 
   1439            .          .           func (h *mheap) initSpan(s *mspan, typ spanAllocType, spanclass spanClass, base, npages uintptr) { 
   1440            .          .           	// At this point, both s != nil and base != 0, and the heap 
   1441            .          .           	// lock is no longer held. Initialize the span. 
   1442            .       60ms           	s.init(base, npages) 
   1443            .      100ms           	if h.allocNeedsZero(base, npages) { 
   1444            .          .           		s.needzero = 1 
   1445            .          .           	} 
   1446            .          .           	nbytes := npages * pageSize 
   1447            .          .           	if typ.manual() { 
   1448            .          .           		s.manualFreeList = 0 

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       50ms (flat, cum)   0.1%
   1475            .          .           					s.nelems = uint16((nbytes - (nbytes / goarch.PtrSize / 8)) / s.elemsize) 
   1476            .          .           				} else { 
   1477            .          .           					s.nelems = uint16(nbytes / s.elemsize) 
   1478            .          .           				} 
   1479            .          .           			} 
   1480         10ms       10ms           			s.divMul = gc.SizeClassToDivMagic[sizeclass] 
   1481            .          .           		} 
   1482            .          .            
   1483            .          .           		// Initialize mark and allocation structures. 
   1484            .          .           		s.freeindex = 0 
   1485            .          .           		s.freeIndexForScan = 0 
   1486            .          .           		s.allocCache = ^uint64(0) // all 1s indicating all free. 
   1487            .       40ms           		s.gcmarkBits = newMarkBits(uintptr(s.nelems)) 
   1488            .          .           		s.allocBits = newAllocBits(uintptr(s.nelems)) 
   1489            .          .            
   1490            .          .           		// Adjust s.limit down to the object-containing part of the span. 
   1491            .          .           		s.limit = s.base() + uintptr(s.elemsize)*uintptr(s.nelems) 
   1492            .          .            

runtime.(*mheap).initSpan

/usr/lib/go/src/runtime/mheap.go

  Total:        80ms      100ms (flat, cum)  0.21%
   1513            .          .           	// This is safe to call without the lock held because the slots 
   1514            .          .           	// related to this span will only ever be read or modified by 
   1515            .          .           	// this thread until pointers into the span are published (and 
   1516            .          .           	// we execute a publication barrier at the end of this function 
   1517            .          .           	// before that happens) or pageInUse is updated. 
   1518            .       20ms           	h.setSpans(s.base(), npages, s) 
   1519            .          .            
   1520            .          .           	if !typ.manual() { 
   1521            .          .           		// Mark in-use span in arena page bitmap. 
   1522            .          .           		// 
   1523            .          .           		// This publishes the span to the page sweeper, so 
   1524            .          .           		// it's imperative that the span be completely initialized 
   1525            .          .           		// prior to this line. 
   1526            .          .           		arena, pageIdx, pageMask := pageIndexOf(s.base()) 
   1527         20ms       20ms           		atomic.Or8(&arena.pageInUse[pageIdx], pageMask) 
   1528            .          .            
   1529            .          .           		// Mark packed span. 
   1530         60ms       60ms           		if gcUsesSpanInlineMarkBits(s.elemsize) { 
   1531            .          .           			atomic.Or8(&arena.pageUseSpanInlineMarkBits[pageIdx], pageMask) 
   1532            .          .           		} 
   1533            .          .            
   1534            .          .           		// Update related page sweeper stats. 
   1535            .          .           		h.pagesInUse.Add(npages) 

runtime.(*mheap).freeSpan

/usr/lib/go/src/runtime/mheap.go

  Total:           0      330ms (flat, cum)  0.69%
   1628            .          .           	return totalGrowth, true 
   1629            .          .           } 
   1630            .          .            
   1631            .          .           // Free the span back into the heap. 
   1632            .          .           func (h *mheap) freeSpan(s *mspan) { 
   1633            .      330ms           	systemstack(func() { 
   1634            .          .           		// Trace the span free. 
   1635            .          .           		if traceAllocFreeEnabled() { 
   1636            .          .           			trace := traceAcquire() 
   1637            .          .           			if trace.ok() { 

runtime.(*sweepLocked).sweep.(*mheap).freeSpan.func2

/usr/lib/go/src/runtime/mheap.go

  Total:           0      330ms (flat, cum)  0.69%
   1639            .          .           				traceRelease(trace) 
   1640            .          .           			} 
   1641            .          .           		} 
   1642            .          .            
   1643            .       60ms           		lock(&h.lock) 
   1644            .          .           		if msanenabled { 
   1645            .          .           			// Tell msan that this entire span is no longer in use. 
   1646            .          .           			base := unsafe.Pointer(s.base()) 
   1647            .          .           			bytes := s.npages << gc.PageShift 
   1648            .          .           			msanfree(base, bytes) 
   1649            .          .           		} 
   1650            .          .           		if asanenabled { 
   1651            .          .           			// Tell asan that this entire span is no longer in use. 
   1652            .          .           			base := unsafe.Pointer(s.base()) 
   1653            .          .           			bytes := s.npages << gc.PageShift 
   1654            .          .           			asanpoison(base, bytes) 
   1655            .          .           		} 
   1656            .          .           		if valgrindenabled { 
   1657            .          .           			base := s.base() 
   1658            .          .           			valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base)) 
   1659            .          .           		} 
   1660            .      230ms           		h.freeSpanLocked(s, spanAllocHeap) 
   1661            .       40ms           		unlock(&h.lock) 
   1662            .          .           	}) 
   1663            .          .           } 
   1664            .          .            
   1665            .          .           // freeManual frees a manually-managed span returned by allocManual. 
   1666            .          .           // typ must be the same as the spanAllocType passed to the allocManual that 

runtime.(*mheap).freeManual

/usr/lib/go/src/runtime/mheap.go

  Total:           0       20ms (flat, cum) 0.042%
   1687            .          .           	lock(&h.lock) 
   1688            .          .           	if valgrindenabled { 
   1689            .          .           		base := s.base() 
   1690            .          .           		valgrindMempoolFree(unsafe.Pointer(arenaBase(arenaIndex(base))), unsafe.Pointer(base)) 
   1691            .          .           	} 
   1692            .       20ms           	h.freeSpanLocked(s, typ) 
   1693            .          .           	unlock(&h.lock) 
   1694            .          .           } 
   1695            .          .            
   1696            .          .           func (h *mheap) freeSpanLocked(s *mspan, typ spanAllocType) { 
   1697            .          .           	assertLockHeld(&h.lock) 

runtime.(*mheap).freeSpanLocked

/usr/lib/go/src/runtime/mheap.go

  Total:        40ms      250ms (flat, cum)  0.52%
   1714            .          .           		// Clear in-use bit in arena page bitmap. 
   1715            .          .           		arena, pageIdx, pageMask := pageIndexOf(s.base()) 
   1716            .          .           		atomic.And8(&arena.pageInUse[pageIdx], ^pageMask) 
   1717            .          .            
   1718            .          .           		// Clear small heap span bit if necessary. 
   1719         10ms       10ms           		if gcUsesSpanInlineMarkBits(s.elemsize) { 
   1720            .          .           			atomic.And8(&arena.pageUseSpanInlineMarkBits[pageIdx], ^pageMask) 
   1721            .          .           		} 
   1722            .          .           	default: 
   1723            .          .           		throw("mheap.freeSpanLocked - invalid span state") 
   1724            .          .           	} 
   1725            .          .            
   1726            .          .           	// Update stats. 
   1727            .          .           	// 
   1728            .          .           	// Mirrors the code in allocSpan. 
   1729            .          .           	nbytes := s.npages * pageSize 
   1730            .          .           	gcController.heapFree.add(int64(nbytes)) 
   1731            .          .           	if typ == spanAllocHeap { 
   1732            .       40ms           		gcController.heapInUse.add(-int64(nbytes)) 
   1733            .          .           	} 
   1734            .          .           	// Update consistent stats. 
   1735            .          .           	stats := memstats.heapStats.acquire() 
   1736            .          .           	switch typ { 
   1737            .          .           	case spanAllocHeap: 
   1738         10ms       10ms           		atomic.Xaddint64(&stats.inHeap, -int64(nbytes)) 
   1739            .          .           	case spanAllocStack: 
   1740            .          .           		atomic.Xaddint64(&stats.inStacks, -int64(nbytes)) 
   1741            .          .           	case spanAllocWorkBuf: 
   1742            .          .           		atomic.Xaddint64(&stats.inWorkBufs, -int64(nbytes)) 
   1743            .          .           	} 
   1744            .          .           	memstats.heapStats.release() 
   1745            .          .            
   1746            .          .           	// Mark the space as free. 
   1747            .      170ms           	h.pages.free(s.base(), s.npages) 
   1748            .          .            
   1749            .          .           	// Free the span structure. We no longer have a use for it. 
   1750            .          .           	s.state.set(mSpanDead) 
   1751         20ms       20ms           	h.freeMSpanLocked(s)                                                       h.spanalloc.free(unsafe.Pointer(s))                                  mheap.go:1204
                                                          f.inuse -= f.size                                                mfixalloc.go:105
   1752            .          .           } 
   1753            .          .            
   1754            .          .           // scavengeAll acquires the heap lock (blocking any additional 
   1755            .          .           // manipulation of the page allocator) and iterates over the whole 
   1756            .          .           // heap, scavenging every free page available. 

runtime.(*mspan).init

/usr/lib/go/src/runtime/mheap.go

  Total:        60ms       60ms (flat, cum)  0.13%
   1782            .          .           } 
   1783            .          .            
   1784            .          .           // Initialize a new span with the given start and npages. 
   1785            .          .           func (span *mspan) init(base uintptr, npages uintptr) { 
   1786            .          .           	// span is *not* zeroed. 
   1787         20ms       20ms           	span.next = nil 
   1788         40ms       40ms           	span.prev = nil 
   1789            .          .           	span.list = nil 
   1790            .          .           	span.startAddr = base 
   1791            .          .           	span.npages = npages 
   1792            .          .           	span.limit = base + npages*gc.PageSize // see go.dev/issue/74288; adjusted later for heap spans 
   1793            .          .           	span.allocCount = 0 

runtime.(*mSpanList).remove

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1834            .          .           		span.prev.next = span.next 
   1835            .          .           	} 
   1836            .          .           	if list.last == span { 
   1837            .          .           		list.last = span.prev 
   1838            .          .           	} else { 
   1839         10ms       10ms           		span.next.prev = span.prev 
   1840            .          .           	} 
   1841            .          .           	span.next = nil 
   1842            .          .           	span.prev = nil 
   1843            .          .           	span.list = nil 
   1844            .          .           } 

runtime.addspecial

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms       40ms (flat, cum) 0.084%
   2050            .          .           	} 
   2051            .          .            
   2052            .          .           	// Ensure that the span is swept. 
   2053            .          .           	// Sweeping accesses the specials list w/o locks, so we have 
   2054            .          .           	// to synchronize with it. And it's just much safer. 
   2055         10ms       10ms           	mp := acquirem()                                                       gp.m.locks++                                                         runtime1.go:630

   2056            .       20ms           	span.ensureSwept() 
   2057            .          .            
   2058            .          .           	offset := uintptr(p) - span.base() 
   2059            .          .           	kind := s.kind 
   2060            .          .            
   2061            .          .           	lock(&span.speciallock) 
   2062            .          .            
   2063            .          .           	// Find splice point, check for existing record. 
   2064            .          .           	iter, exists := span.specialFindSplicePoint(offset, kind) 
   2065            .          .           	if !exists || force { 
   2066            .          .           		// Splice in record, fill in offset. 
   2067            .          .           		s.offset = offset 
   2068            .          .           		s.next = *iter 
   2069            .          .           		*iter = s 
   2070            .          .           		spanHasSpecials(span) 
   2071            .          .           	} 
   2072            .          .            
   2073         10ms       10ms           	unlock(&span.speciallock) 
   2074            .          .           	releasem(mp) 
   2075            .          .           	// We're converting p to a uintptr and looking it up, and we 
   2076            .          .           	// don't want it to die and get swept while we're doing so. 
   2077            .          .           	KeepAlive(p) 
   2078            .          .           	return !exists || force // already exists or addition was forced 

runtime.removespecial

/usr/lib/go/src/runtime/mheap.go

  Total:           0       10ms (flat, cum) 0.021%
   2080            .          .            
   2081            .          .           // Removes the Special record of the given kind for the object p. 
   2082            .          .           // Returns the record if the record existed, nil otherwise. 
   2083            .          .           // The caller must FixAlloc_Free the result. 
   2084            .          .           func removespecial(p unsafe.Pointer, kind uint8) *special { 
   2085            .       10ms           	span := spanOfHeap(uintptr(p)) 
   2086            .          .           	if span == nil { 
   2087            .          .           		throw("removespecial on invalid pointer") 
   2088            .          .           	} 
   2089            .          .            
   2090            .          .           	// Ensure that the span is swept. 

runtime.removespecial

/usr/lib/go/src/runtime/mheap.go

  Total:           0       30ms (flat, cum) 0.063%
   2105            .          .           		result = s 
   2106            .          .           	} 
   2107            .          .           	if span.specials == nil { 
   2108            .          .           		spanHasNoSpecials(span) 
   2109            .          .           	} 
   2110            .       30ms           	unlock(&span.speciallock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2111            .          .           	releasem(mp) 
   2112            .          .           	return result 
   2113            .          .           } 
   2114            .          .            
   2115            .          .           // Find a splice point in the sorted list and check for an already existing 

runtime.addfinalizer

/usr/lib/go/src/runtime/mheap.go

  Total:        20ms      140ms (flat, cum)  0.29%
   2148            .          .           	fint    *_type   // May be a heap pointer, but always live. 
   2149            .          .           	ot      *ptrtype // May be a heap pointer, but always live. 
   2150            .          .           } 
   2151            .          .            
   2152            .          .           // Adds a finalizer to the object p. Returns true if it succeeded. 
   2153         20ms       20ms           func addfinalizer(p unsafe.Pointer, f *funcval, nret uintptr, fint *_type, ot *ptrtype) bool { 
   2154            .       40ms           	lock(&mheap_.speciallock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   2155            .       10ms           	s := (*specialfinalizer)(mheap_.specialfinalizeralloc.alloc()) 
   2156            .       30ms           	unlock(&mheap_.speciallock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2157            .          .           	s.special.kind = _KindSpecialFinalizer 
   2158            .          .           	s.fn = f 
   2159            .          .           	s.nret = nret 
   2160            .          .           	s.fint = fint 
   2161            .          .           	s.ot = ot 
   2162            .       40ms           	if addspecial(p, &s.special, false) { 
   2163            .          .           		// This is responsible for maintaining the same 
   2164            .          .           		// GC-related invariants as markrootSpans in any 
   2165            .          .           		// situation where it's possible that markrootSpans 
   2166            .          .           		// has already run but mark termination hasn't yet. 
   2167            .          .           		if gcphase != _GCoff { 

runtime.removefinalizer

/usr/lib/go/src/runtime/mheap.go

  Total:           0      100ms (flat, cum)  0.21%
   2188            .          .           	return false 
   2189            .          .           } 
   2190            .          .            
   2191            .          .           // Removes the finalizer (if any) from the object p. 
   2192            .          .           func removefinalizer(p unsafe.Pointer) { 
   2193            .       40ms           	s := (*specialfinalizer)(unsafe.Pointer(removespecial(p, _KindSpecialFinalizer))) 
   2194            .          .           	if s == nil { 
   2195            .          .           		return // there wasn't a finalizer to remove 
   2196            .          .           	} 
   2197            .       20ms           	lock(&mheap_.speciallock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

   2198            .          .           	mheap_.specialfinalizeralloc.free(unsafe.Pointer(s)) 
   2199            .       40ms           	unlock(&mheap_.speciallock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2200            .          .           } 
   2201            .          .            
   2202            .          .           // The described object has a cleanup set for it. 
   2203            .          .           type specialCleanup struct { 
   2204            .          .           	_       sys.NotInHeap 

runtime.freeSpecial

/usr/lib/go/src/runtime/mheap.go

  Total:           0       10ms (flat, cum) 0.021%
   2816            .          .           		mheap_.specialWeakHandleAlloc.free(unsafe.Pointer(s)) 
   2817            .          .           		unlock(&mheap_.speciallock) 
   2818            .          .           	case _KindSpecialProfile: 
   2819            .          .           		sp := (*specialprofile)(unsafe.Pointer(s)) 
   2820            .          .           		mProf_Free(sp.b, size) 
   2821            .       10ms           		lock(&mheap_.speciallock)                                                               lockWithRank(l, getLockRank(l))                              lock_spinbit.go:152
                                                                  lock2(l)                                                 lockrank_off.go:24

   2822            .          .           		mheap_.specialprofilealloc.free(unsafe.Pointer(sp)) 
   2823            .          .           		unlock(&mheap_.speciallock) 
   2824            .          .           	case _KindSpecialReachable: 
   2825            .          .           		sp := (*specialReachable)(unsafe.Pointer(s)) 
   2826            .          .           		sp.done = true 

runtime.(*gcBits).bytep

/usr/lib/go/src/runtime/mheap.go

  Total:        10ms       10ms (flat, cum) 0.021%
   2865            .          .           	x uint8 
   2866            .          .           } 
   2867            .          .            
   2868            .          .           // bytep returns a pointer to the n'th byte of b. 
   2869            .          .           func (b *gcBits) bytep(n uintptr) *uint8 { 
   2870         10ms       10ms           	return addb(&b.x, n) 
   2871            .          .           } 
   2872            .          .            
   2873            .          .           // bitp returns a pointer to the byte containing bit n and a mask for 
   2874            .          .           // selecting that bit from *bytep. 
   2875            .          .           func (b *gcBits) bitp(n uintptr) (bytep *uint8, mask uint8) { 

runtime.(*gcBitsArena).tryAlloc

/usr/lib/go/src/runtime/mheap.go

  Total:        70ms       70ms (flat, cum)  0.15%
   2901            .          .           } 
   2902            .          .            
   2903            .          .           // tryAlloc allocates from b or returns nil if b does not have enough room. 
   2904            .          .           // This is safe to call concurrently. 
   2905            .          .           func (b *gcBitsArena) tryAlloc(bytes uintptr) *gcBits { 
   2906         20ms       20ms           	if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { 
   2907            .          .           		return nil 
   2908            .          .           	} 
   2909            .          .           	// Try to allocate from this block. 
   2910         50ms       50ms           	end := atomic.Xadduintptr(&b.free, bytes) 
   2911            .          .           	if end > uintptr(len(b.bits)) { 
   2912            .          .           		return nil 
   2913            .          .           	} 
   2914            .          .           	// There was enough room. 
   2915            .          .           	start := end - bytes 

runtime.newMarkBits

/usr/lib/go/src/runtime/mheap.go

  Total:        70ms       70ms (flat, cum)  0.15%
   2922            .          .           	blocksNeeded := (nelems + 63) / 64 
   2923            .          .           	bytesNeeded := blocksNeeded * 8 
   2924            .          .            
   2925            .          .           	// Try directly allocating from the current head arena. 
   2926            .          .           	head := (*gcBitsArena)(atomic.Loadp(unsafe.Pointer(&gcBitsArenas.next))) 
   2927         70ms       70ms           	if p := head.tryAlloc(bytesNeeded); p != nil {                                                       end := atomic.Xadduintptr(&b.free, bytes)                            mheap.go:2910            if b == nil || atomic.Loaduintptr(&b.free)+bytes > uintptr(len(b.bits)) { mheap.go:2906

   2928            .          .           		return p 
   2929            .          .           	} 
   2930            .          .            
   2931            .          .           	// There's not enough room in the head arena. We may need to 
   2932            .          .           	// allocate a new arena. 

runtime.newMarkBits

/usr/lib/go/src/runtime/mheap.go

  Total:           0       10ms (flat, cum) 0.021%
   2962            .          .            
   2963            .          .           	// Add the fresh arena to the "next" list. 
   2964            .          .           	fresh.next = gcBitsArenas.next 
   2965            .          .           	atomic.StorepNoWB(unsafe.Pointer(&gcBitsArenas.next), unsafe.Pointer(fresh)) 
   2966            .          .            
   2967            .       10ms           	unlock(&gcBitsArenas.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

   2968            .          .           	return p 
   2969            .          .           } 
   2970            .          .            
   2971            .          .           // newAllocBits returns a pointer to 8 byte aligned bytes 
   2972            .          .           // to be used for this span's alloc bits. 

runtime.sellock

/usr/lib/go/src/runtime/select.go

  Total:        50ms      270ms (flat, cum)  0.57%
     29            .          .            
     30            .          .           func selectsetpc(pc *uintptr) { 
     31            .          .           	*pc = sys.GetCallerPC() 
     32            .          .           } 
     33            .          .            
     34         10ms       10ms           func sellock(scases []scase, lockorder []uint16) { 
     35            .          .           	var c *hchan 
     36         20ms       20ms           	for _, o := range lockorder { 
     37            .          .           		c0 := scases[o].c 
     38            .          .           		if c0 != c { 
     39            .          .           			c = c0 
     40         20ms      240ms           			lock(&c.lock)                             lockWithRank(l, getLockRank(l))                      lock_spinbit.go:152
                                                                          lock2(l)                                         lockrank_off.go:24

     41            .          .           		} 
     42            .          .           	} 
     43            .          .           } 
     44            .          .            
     45            .          .           func selunlock(scases []scase, lockorder []uint16) { 

runtime.selunlock

/usr/lib/go/src/runtime/select.go

  Total:        50ms      270ms (flat, cum)  0.57%
     49            .          .           	// First M calls runtime·park() in runtime·selectgo() passing the sel. 
     50            .          .           	// Once runtime·park() has unlocked the last lock, another M makes 
     51            .          .           	// the G that calls select runnable again and schedules it for execution. 
     52            .          .           	// When the G runs on another M, it locks all the locks and frees sel. 
     53            .          .           	// Now if the first M touches sel, it will access freed memory. 
     54         10ms       10ms           	for i := len(lockorder) - 1; i >= 0; i-- { 
     55            .          .           		c := scases[lockorder[i]].c 
     56         20ms       20ms           		if i > 0 && c == scases[lockorder[i-1]].c { 
     57            .          .           			continue // will unlock it on the next iteration 
     58            .          .           		} 
     59         20ms      240ms           		unlock(&c.lock)                     unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

     60            .          .           	} 
     61            .          .           } 
     62            .          .            
     63            .          .           func selparkcommit(gp *g, _ unsafe.Pointer) bool { 
     64            .          .           	// There are unlocked sudogs that point into gp's stack. Stack 

runtime.selparkcommit

/usr/lib/go/src/runtime/select.go

  Total:           0       20ms (flat, cum) 0.042%
     88            .          .           			// any sudog with that channel may change, 
     89            .          .           			// including c and waitlink. Since multiple 
     90            .          .           			// sudogs may have the same channel, we unlock 
     91            .          .           			// only after we've passed the last instance 
     92            .          .           			// of a channel. 
     93            .       20ms           			unlock(&lastc.lock)                                                                       unlockWithRank(l)                                    lock_spinbit.go:261
                                                                          unlock2(l)                                       lockrank_off.go:35

     94            .          .           		} 
     95            .          .           		lastc = sg.c 
     96            .          .           	} 
     97            .          .           	if lastc != nil { 
     98            .          .           		unlock(&lastc.lock) 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:       440ms      620ms (flat, cum)  1.30%
    165            .          .           	// optimizing (and needing to test). 
    166            .          .            
    167            .          .           	// generate permuted order 
    168            .          .           	norder := 0 
    169            .          .           	allSynctest := true 
    170         10ms       10ms           	for i := range scases { 
    171            .          .           		cas := &scases[i] 
    172            .          .            
    173            .          .           		// Omit cases without channels from the poll and lock orders. 
    174         30ms       30ms           		if cas.c == nil { 
    175            .          .           			cas.elem = nil // allow GC 
    176            .          .           			continue 
    177            .          .           		} 
    178            .          .            
    179            .          .           		if cas.c.bubble != nil { 
    180            .          .           			if getg().bubble != cas.c.bubble { 
    181            .          .           				fatal("select on synctest channel from outside bubble") 
    182            .          .           			} 
    183            .          .           		} else { 
    184            .          .           			allSynctest = false 
    185            .          .           		} 
    186            .          .            
    187         20ms       20ms           		if cas.c.timer != nil { 
    188            .          .           			cas.c.timer.maybeRunChan(cas.c) 
    189            .          .           		} 
    190            .          .            
    191         20ms       20ms           		j := cheaprandn(uint32(norder + 1))                                                               return uint32((uint64(cheaprand()) * uint64(n)) >> 32)       rand.go:293
                                                                  return uint32(hi ^ lo)                                   rand.go:237
    192         40ms       40ms           		pollorder[norder] = pollorder[j] 
    193            .          .           		pollorder[j] = uint16(i) 
    194            .          .           		norder++ 
    195            .          .           	} 
    196            .          .           	pollorder = pollorder[:norder] 
    197            .          .           	lockorder = lockorder[:norder] 
    198            .          .            
    199            .          .           	waitReason := waitReasonSelect 
    200         10ms       10ms           	if gp.bubble != nil && allSynctest { 
    201            .          .           		// Every channel selected on is in a synctest bubble, 
    202            .          .           		// so this goroutine will count as idle while selecting. 
    203            .          .           		waitReason = waitReasonSynctestSelect 
    204            .          .           	} 
    205            .          .            
    206            .          .           	// sort the cases by Hchan address to get the locking order. 
    207            .          .           	// simple heap sort, to guarantee n log n time and constant stack footprint. 
    208         20ms       20ms           	for i := range lockorder { 
    209            .          .           		j := i 
    210            .          .           		// Start with the pollorder to permute cases on the same channel. 
    211            .          .           		c := scases[pollorder[i]].c 
    212         80ms       80ms           		for j > 0 && scases[lockorder[(j-1)/2]].c.sortkey() < c.sortkey() {                                      ⋮
                                     ⋮
                                     ⋮
                                                              return uintptr(unsafe.Pointer(c))                            select.go:546

    213            .          .           			k := (j - 1) / 2 
    214         10ms       10ms           			lockorder[j] = lockorder[k] 
    215            .          .           			j = k 
    216            .          .           		} 
    217         30ms       30ms           		lockorder[j] = pollorder[i] 
    218            .          .           	} 
    219         10ms       10ms           	for i := len(lockorder) - 1; i >= 0; i-- { 
    220         10ms       10ms           		o := lockorder[i] 
    221         10ms       10ms           		c := scases[o].c 
    222         10ms       10ms           		lockorder[i] = lockorder[0] 
    223            .          .           		j := 0 
    224            .          .           		for { 
    225            .          .           			k := j*2 + 1 
    226            .          .           			if k >= i { 
    227            .          .           				break 
    228            .          .           			} 
    229         50ms       50ms           			if k+1 < i && scases[lockorder[k]].c.sortkey() < scases[lockorder[k+1]].c.sortkey() { 
    230            .          .           				k++ 
    231            .          .           			} 
    232         50ms       50ms           			if c.sortkey() < scases[lockorder[k]].c.sortkey() {                                      ⋮
                                     ⋮
                                                                      return uintptr(unsafe.Pointer(c))                    select.go:546
    233         10ms       10ms           				lockorder[j] = lockorder[k] 
    234            .          .           				j = k 
    235            .          .           				continue 
    236            .          .           			} 
    237            .          .           			break 
    238            .          .           		} 
    239            .          .           		lockorder[j] = o 
    240            .          .           	} 
    241            .          .            
    242            .          .           	if debugSelect { 
    243            .          .           		for i := 0; i+1 < len(lockorder); i++ { 
    244            .          .           			if scases[lockorder[i]].c.sortkey() > scases[lockorder[i+1]].c.sortkey() { 
    245            .          .           				print("i=", i, " x=", lockorder[i], " y=", lockorder[i+1], "\n") 
    246            .          .           				throw("select: broken sort") 
    247            .          .           			} 
    248            .          .           		} 
    249            .          .           	} 
    250            .          .            
    251            .          .           	// lock all the channels involved in the select 
    252            .      180ms           	sellock(scases, lockorder) 
    253            .          .            
    254            .          .           	var ( 
    255            .          .           		sg     *sudog 
    256            .          .           		c      *hchan 
    257            .          .           		k      *scase 
    258            .          .           		sglist *sudog 
    259            .          .           		sgnext *sudog 
    260            .          .           		qp     unsafe.Pointer 
    261            .          .           		nextp  **sudog 
    262            .          .           	) 
    263            .          .            
    264            .          .           	// pass 1 - look for something already waiting 
    265            .          .           	var casi int 
    266            .          .           	var cas *scase 
    267            .          .           	var caseSuccess bool 
    268            .          .           	var caseReleaseTime int64 = -1 
    269            .          .           	var recvOK bool 
    270         10ms       10ms           	for _, casei := range pollorder { 
    271            .          .           		casi = int(casei) 
    272            .          .           		cas = &scases[casi] 
    273            .          .           		c = cas.c 
    274            .          .            
    275            .          .           		if casi >= nsends { 
    276         10ms       10ms           			sg = c.sendq.dequeue()                                                                       for {                                                chan.go:887

    277            .          .           			if sg != nil { 
    278            .          .           				goto recv 
    279            .          .           			} 
    280            .          .           			if c.qcount > 0 { 
    281            .          .           				goto bufrecv 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:       200ms      350ms (flat, cum)  0.73%
    305            .          .           		casi = -1 
    306            .          .           		goto retc 
    307            .          .           	} 
    308            .          .            
    309            .          .           	// pass 2 - enqueue on all chans 
    310         10ms       10ms           	if gp.waiting != nil { 
    311            .          .           		throw("gp.waiting != nil") 
    312            .          .           	} 
    313            .          .           	nextp = &gp.waiting 
    314            .          .           	for _, casei := range lockorder { 
    315         10ms       10ms           		casi = int(casei) 
    316            .          .           		cas = &scases[casi] 
    317         10ms       10ms           		c = cas.c 
    318            .       30ms           		sg := acquireSudog() 
    319            .          .           		sg.g = gp 
    320            .          .           		sg.isSelect = true 
    321            .          .           		// No stack splits between assigning elem and enqueuing 
    322            .          .           		// sg on gp.waiting where copystack can find it. 
    323            .          .           		sg.elem = cas.elem 
    324            .          .           		sg.releasetime = 0 
    325            .          .           		if t0 != 0 { 
    326            .          .           			sg.releasetime = -1 
    327            .          .           		} 
    328            .          .           		sg.c = c 
    329            .          .           		// Construct waiting list in lock order. 
    330            .          .           		*nextp = sg 
    331            .          .           		nextp = &sg.waitlink 
    332            .          .            
    333            .          .           		if casi < nsends { 
    334            .          .           			c.sendq.enqueue(sg) 
    335            .          .           		} else { 
    336         10ms       10ms           			c.recvq.enqueue(sg)                                                                       sgp.prev = nil                                       chan.go:876

    337            .          .           		} 
    338            .          .            
    339            .          .           		if c.timer != nil { 
    340            .          .           			blockTimerChan(c) 
    341            .          .           		} 
    342            .          .           	} 
    343            .          .            
    344            .          .           	// wait for someone to wake us up 
    345         10ms       10ms           	gp.param = nil 
    346            .          .           	// Signal to anyone trying to shrink our stack that we're about 
    347            .          .           	// to park on a channel. The window between when this G's status 
    348            .          .           	// changes and when we set gp.activeStackChans is not safe for 
    349            .          .           	// stack shrinking. 
    350            .          .           	gp.parkingOnChan.Store(true) 
    351            .       10ms           	gopark(selparkcommit, nil, waitReason, traceBlockSelect, 1) 
    352         20ms       20ms           	gp.activeStackChans = false 
    353            .          .            
    354            .       90ms           	sellock(scases, lockorder) 
    355            .          .            
    356            .          .           	gp.selectDone.Store(0) 
    357            .          .           	sg = (*sudog)(gp.param) 
    358            .          .           	gp.param = nil 
    359            .          .            
    360            .          .           	// pass 3 - dequeue from unsuccessful chans 
    361            .          .           	// otherwise they stack up on quiet channels 
    362            .          .           	// record the successful case, if any. 
    363            .          .           	// We singly-linked up the SudoGs in lock order. 
    364            .          .           	casi = -1 
    365            .          .           	cas = nil 
    366            .          .           	caseSuccess = false 
    367            .          .           	sglist = gp.waiting 
    368            .          .           	// Clear all elem before unlinking from gp.waiting. 
    369         70ms       70ms           	for sg1 := gp.waiting; sg1 != nil; sg1 = sg1.waitlink { 
    370            .          .           		sg1.isSelect = false 
    371         20ms       20ms           		sg1.elem = nil 
    372         10ms       10ms           		sg1.c = nil 
    373            .          .           	} 
    374            .          .           	gp.waiting = nil 
    375            .          .            
    376            .          .           	for _, casei := range lockorder { 
    377            .          .           		k = &scases[casei] 
    378         20ms       20ms           		if k.c.timer != nil { 
    379            .          .           			unblockTimerChan(k.c) 
    380            .          .           		} 
    381            .          .           		if sg == sglist { 
    382            .          .           			// sg has already been dequeued by the G that woke us up. 
    383            .          .           			casi = int(casei) 
    384            .          .           			cas = k 
    385            .          .           			caseSuccess = sglist.success 
    386            .          .           			if sglist.releasetime > 0 { 
    387            .          .           				caseReleaseTime = sglist.releasetime 
    388            .          .           			} 
    389            .          .           		} else { 
    390            .          .           			c = k.c 
    391            .          .           			if int(casei) < nsends { 
    392            .          .           				c.sendq.dequeueSudoG(sglist) 
    393            .          .           			} else { 
    394            .          .           				c.recvq.dequeueSudoG(sglist) 
    395            .          .           			} 
    396            .          .           		} 
    397            .          .           		sgnext = sglist.waitlink 
    398         10ms       10ms           		sglist.waitlink = nil 
    399            .       20ms           		releaseSudog(sglist) 
    400            .          .           		sglist = sgnext 
    401            .          .           	} 
    402            .          .            
    403            .          .           	if cas == nil { 
    404            .          .           		throw("selectgo: bad wakeup") 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:        20ms      270ms (flat, cum)  0.57%
    438            .          .           		} else if cas.elem != nil { 
    439            .          .           			asanwrite(cas.elem, c.elemtype.Size_) 
    440            .          .           		} 
    441            .          .           	} 
    442            .          .            
    443            .       40ms           	selunlock(scases, lockorder) 
    444            .          .           	goto retc 
    445            .          .            
    446            .          .           bufrecv: 
    447            .          .           	// can receive from buffer 
    448            .          .           	if raceenabled { 
    449            .          .           		if cas.elem != nil { 
    450            .          .           			raceWriteObjectPC(c.elemtype, cas.elem, casePC(casi), chanrecvpc) 
    451            .          .           		} 
    452            .          .           		racenotify(c, c.recvx, nil) 
    453            .          .           	} 
    454            .          .           	if msanenabled && cas.elem != nil { 
    455            .          .           		msanwrite(cas.elem, c.elemtype.Size_) 
    456            .          .           	} 
    457            .          .           	if asanenabled && cas.elem != nil { 
    458            .          .           		asanwrite(cas.elem, c.elemtype.Size_) 
    459            .          .           	} 
    460            .          .           	recvOK = true 
    461         10ms       10ms           	qp = chanbuf(c, c.recvx)                                                       return add(c.buf, uintptr(i)*uintptr(c.elemsize))                    chan.go:139

    462            .          .           	if cas.elem != nil { 
    463            .       20ms           		typedmemmove(c.elemtype, cas.elem, qp) 
    464            .          .           	} 
    465         10ms       10ms           	typedmemclr(c.elemtype, qp) 
    466            .          .           	c.recvx++ 
    467            .          .           	if c.recvx == c.dataqsiz { 
    468            .          .           		c.recvx = 0 
    469            .          .           	} 
    470            .          .           	c.qcount-- 
    471            .      190ms           	selunlock(scases, lockorder) 
    472            .          .           	goto retc 
    473            .          .            
    474            .          .           bufsend: 
    475            .          .           	// can send to buffer 
    476            .          .           	if raceenabled { 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:           0       60ms (flat, cum)  0.13%
    492            .          .           	selunlock(scases, lockorder) 
    493            .          .           	goto retc 
    494            .          .            
    495            .          .           recv: 
    496            .          .           	// can receive from sleeping sender (sg) 
    497            .       30ms           	recv(c, sg, cas.elem, func() { selunlock(scases, lockorder) }, 2) 
    498            .          .           	if debugSelect { 
    499            .          .           		print("syncrecv: cas0=", cas0, " c=", c, "\n") 
    500            .          .           	} 
    501            .          .           	recvOK = true 
    502            .          .           	goto retc 
    503            .          .            
    504            .          .           rclose: 
    505            .          .           	// read at end of closed channel 
    506            .       30ms           	selunlock(scases, lockorder) 
    507            .          .           	recvOK = false 
    508            .          .           	if cas.elem != nil { 
    509            .          .           		typedmemclr(c.elemtype, cas.elem) 
    510            .          .           	} 
    511            .          .           	if raceenabled { 

runtime.selectgo

/usr/lib/go/src/runtime/select.go

  Total:        10ms       10ms (flat, cum) 0.021%
    532            .          .            
    533            .          .           retc: 
    534            .          .           	if caseReleaseTime > 0 { 
    535            .          .           		blockevent(caseReleaseTime-t0, 1) 
    536            .          .           	} 
    537         10ms       10ms           	return casi, recvOK 
    538            .          .            
    539            .          .           sclose: 
    540            .          .           	// send on closed channel 
    541            .          .           	selunlock(scases, lockorder) 

runtime.(*hchan).sortkey

/usr/lib/go/src/runtime/select.go

  Total:        40ms       40ms (flat, cum) 0.084%
    542            .          .           	panic(plainError("send on closed channel")) 
    543            .          .           } 
    544            .          .            
    545            .          .           func (c *hchan) sortkey() uintptr { 
    546         40ms       40ms           	return uintptr(unsafe.Pointer(c)) 
    547            .          .           } 
    548            .          .            
    549            .          .           // A runtimeSelect is a single case passed to rselect. 
    550            .          .           // This must match ../reflect/value.go:/runtimeSelect 
    551            .          .           type runtimeSelect struct { 

runtime.(*stkframe).argMapInternal

/usr/lib/go/src/runtime/stkframe.go

  Total:        20ms       20ms (flat, cum) 0.042%
     91            .          .           // 
     92            .          .           // hasReflectStackObj indicates that this frame also has a reflect 
     93            .          .           // function stack object, which the caller must synthesize. 
     94            .          .           func (frame *stkframe) argMapInternal() (argMap bitvector, hasReflectStackObj bool) { 
     95            .          .           	f := frame.fn 
     96         10ms       10ms           	if f.args != abi.ArgsSizeUnknown { 
     97         10ms       10ms           		argMap.n = f.args / goarch.PtrSize 
     98            .          .           		return 
     99            .          .           	} 
    100            .          .           	// Extract argument bitmaps for reflect stubs from the calls they made to reflect. 
    101            .          .           	switch funcname(f) { 
    102            .          .           	case "reflect.makeFuncStub", "reflect.methodValueCall": 

runtime.(*stkframe).getStackMap

/usr/lib/go/src/runtime/stkframe.go

  Total:       670ms      1.93s (flat, cum)  4.05%
    152            .          .           	return 
    153            .          .           } 
    154            .          .            
    155            .          .           // getStackMap returns the locals and arguments live pointer maps, and 
    156            .          .           // stack object list for frame. 
    157         30ms       30ms           func (frame *stkframe) getStackMap(debug bool) (locals, args bitvector, objs []stackObjectRecord) { 
    158            .          .           	targetpc := frame.continpc 
    159         10ms       10ms           	if targetpc == 0 { 
    160            .          .           		// Frame is dead. Return empty bitvectors. 
    161            .          .           		return 
    162            .          .           	} 
    163            .          .            
    164            .          .           	f := frame.fn 
    165            .          .           	pcdata := int32(-1) 
    166         30ms       60ms           	if targetpc != f.entry() {                                                       return f.datap.textAddr(f.entryOff)                                  symtab.go:894
                                     ⋮
                                     ⋮
                                                      return f.datap.textAddr(f.entryOff)                                  symtab.go:894

    167            .          .           		// Back up to the CALL. If we're at the function entry 
    168            .          .           		// point, we want to use the entry map (-1), even if 
    169            .          .           		// the first instruction of the function changes the 
    170            .          .           		// stack map. 
    171         10ms       10ms           		targetpc-- 
    172         10ms      1.22s           		pcdata = pcdatavalue(f, abi.PCDATA_StackMapIndex, targetpc) 
    173            .          .           	} 
    174            .          .           	if pcdata == -1 { 
    175            .          .           		// We do not have a valid pcdata value but there might be a 
    176            .          .           		// stackmap for this function. It is likely that we are looking 
    177            .          .           		// at the function prologue, assume so and hope for the best. 
    178            .          .           		pcdata = 0 
    179            .          .           	} 
    180            .          .            
    181            .          .           	// Local variables. 
    182         20ms       20ms           	size := frame.varp - frame.sp 
    183            .          .           	var minsize uintptr 
    184            .          .           	switch goarch.ArchFamily { 
    185            .          .           	case goarch.ARM64: 
    186            .          .           		minsize = sys.StackAlign 
    187            .          .           	default: 
    188            .          .           		minsize = sys.MinFrameSize 
    189            .          .           	} 
    190            .          .           	if size > minsize { 
    191            .          .           		stackid := pcdata 
    192        100ms      100ms           		stkmap := (*stackmap)(funcdata(f, abi.FUNCDATA_LocalsPointerMaps))                                                               base := f.datap.gofunc // load gofunc address early so that we calculate during cache misses symtab.go:1262                    raw := base + uintptr(off)                                   symtab.go:1272                    if i < 0 || i >= f.nfuncdata {                               symtab.go:1259                    return unsafe.Pointer(raw & mask)                            symtab.go:1273                    p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4 symtab.go:1263

    193        120ms      120ms           		if stkmap == nil || stkmap.n <= 0 { 
    194            .          .           			print("runtime: frame ", funcname(f), " untyped locals ", hex(frame.varp-size), "+", hex(size), "\n") 
    195            .          .           			throw("missing stackmap") 
    196            .          .           		} 
    197            .          .           		// If nbit == 0, there's no work to do. 
    198         10ms       10ms           		if stkmap.nbit > 0 { 
    199         10ms       10ms           			if stackid < 0 || stackid >= stkmap.n { 
    200            .          .           				// don't know where we are 
    201            .          .           				print("runtime: pcdata is ", stackid, " and ", stkmap.n, " locals stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 
    202            .          .           				throw("bad symbol table") 
    203            .          .           			} 
    204            .          .           			locals = stackmapdata(stkmap, stackid) 
    205            .          .           			if stackDebug >= 3 && debug { 
    206            .          .           				print("      locals ", stackid, "/", stkmap.n, " ", locals.n, " words ", locals.bytedata, "\n") 
    207            .          .           			} 
    208            .          .           		} else if stackDebug >= 3 && debug { 
    209            .          .           			print("      no locals to adjust\n") 
    210            .          .           		} 
    211            .          .           	} 
    212            .          .            
    213            .          .           	// Arguments. First fetch frame size and special-case argument maps. 
    214            .          .           	var isReflect bool 
    215            .       20ms           	args, isReflect = frame.argMapInternal() 
    216         20ms       20ms           	if args.n > 0 && args.bytedata == nil { 
    217            .          .           		// Non-empty argument frame, but not a special map. 
    218            .          .           		// Fetch the argument map at pcdata. 
    219         50ms       50ms           		stackmap := (*stackmap)(funcdata(f, abi.FUNCDATA_ArgsPointerMaps))                                                               if off == ^uint32(0) {                                       symtab.go:1268
                                     ⋮
                                     ⋮
                                                              raw := base + uintptr(off)                                   symtab.go:1272                    if i < 0 || i >= f.nfuncdata {                               symtab.go:1259
    220         50ms       50ms           		if stackmap == nil || stackmap.n <= 0 { 
    221            .          .           			print("runtime: frame ", funcname(f), " untyped args ", hex(frame.argp), "+", hex(args.n*goarch.PtrSize), "\n") 
    222            .          .           			throw("missing stackmap") 
    223            .          .           		} 
    224            .          .           		if pcdata < 0 || pcdata >= stackmap.n { 
    225            .          .           			// don't know where we are 
    226            .          .           			print("runtime: pcdata is ", pcdata, " and ", stackmap.n, " args stack map entries for ", funcname(f), " (targetpc=", hex(targetpc), ")\n") 
    227            .          .           			throw("bad symbol table") 
    228            .          .           		} 
    229         20ms       20ms           		if stackmap.nbit == 0 { 
    230            .          .           			args.n = 0 
    231            .          .           		} else { 
    232         10ms       10ms           			args = stackmapdata(stackmap, pcdata)                                                                       return bitvector{stkmap.nbit, addb(&stkmap.bytedata[0], uintptr(n*((stkmap.nbit+7)>>3)))} symtab.go:1330

    233            .          .           		} 
    234            .          .           	} 
    235            .          .            
    236            .          .           	// stack objects. 
    237            .          .           	if (GOARCH == "amd64" || GOARCH == "arm64" || GOARCH == "loong64" || GOARCH == "ppc64" || GOARCH == "ppc64le" || GOARCH == "riscv64") && 
    238            .          .           		unsafe.Sizeof(abi.RegArgs{}) > 0 && isReflect { 
    239            .          .           		// For reflect.makeFuncStub and reflect.methodValueCall, 
    240            .          .           		// we need to fake the stack object record. 
    241            .          .           		// These frames contain an internal/abi.RegArgs at a hard-coded offset. 
    242            .          .           		// This offset matches the assembly code on amd64 and arm64. 
    243            .          .           		objs = methodValueCallFrameObjs[:] 
    244            .          .           	} else { 
    245         20ms       20ms           		p := funcdata(f, abi.FUNCDATA_StackObjects)                                                               p := uintptr(unsafe.Pointer(&f.nfuncdata)) + unsafe.Sizeof(f.nfuncdata) + uintptr(f.npcdata)*4 + uintptr(i)*4 symtab.go:1263                    if i < 0 || i >= f.nfuncdata {                               symtab.go:1259

    246            .          .           		if p != nil { 
    247         10ms       10ms           			n := *(*uintptr)(p) 
    248         10ms       10ms           			p = add(p, goarch.PtrSize)                                                                       return unsafe.Pointer(uintptr(p) + x)                stubs.go:25

    249            .          .           			r0 := (*stackObjectRecord)(noescape(p)) 
    250         40ms       40ms           			objs = unsafe.Slice(r0, int(n)) 
    251            .          .           			// Note: the noescape above is needed to keep 
    252            .          .           			// getStackMap from "leaking param content: 
    253            .          .           			// frame".  That leak propagates up to getgcmask, then 
    254            .          .           			// GCMask, then verifyGCInfo, which converts the stack 
    255            .          .           			// gcinfo tests into heap gcinfo tests :( 
    256            .          .           		} 
    257            .          .           	} 
    258            .          .            
    259         90ms       90ms           	return 
    260            .          .           } 
    261            .          .            
    262            .          .           var methodValueCallFrameObjs [1]stackObjectRecord // initialized in stackobjectinit 
    263            .          .            
    264            .          .           func stkobjinit() { 

internal/runtime/maps.(*Map).getWithoutKeySmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        80ms      130ms (flat, cum)  0.27%
     12            .          .           	"internal/race" 
     13            .          .           	"internal/runtime/sys" 
     14            .          .           	"unsafe" 
     15            .          .           ) 
     16            .          .            
     17         10ms       60ms           func (m *Map) getWithoutKeySmallFastStr(typ *abi.SwissMapType, key string) unsafe.Pointer { 
     18            .          .           	g := groupReference{ 
     19         10ms       10ms           		data: m.dirPtr, 
     20            .          .           	} 
     21            .          .            
     22            .          .           	ctrls := *g.ctrls() 
     23            .          .           	slotKey := g.key(typ, 0) 
     24            .          .           	slotSize := typ.SlotSize 
     25            .          .            
     26            .          .           	// The 64 threshold was chosen based on performance of BenchmarkMapStringKeysEight, 
     27            .          .           	// where there are 8 keys to check, all of which don't quick-match the lookup key. 
     28            .          .           	// In that case, we can save hashing the lookup key. That savings is worth this extra code 
     29            .          .           	// for strings that are long enough that hashing is expensive. 
     30         60ms       60ms           	if len(key) > 64 { 
     31            .          .           		// String hashing and equality might be expensive. Do a quick check first. 
     32            .          .           		j := abi.SwissMapGroupSlots 
     33            .          .           		for i := range abi.SwissMapGroupSlots { 
     34            .          .           			if ctrls&(1<<7) == 0 && longStringQuickEqualityTest(key, *(*string)(slotKey)) { 
     35            .          .           				if j < abi.SwissMapGroupSlots { 

internal/runtime/maps.(*Map).getWithoutKeySmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       170ms      320ms (flat, cum)  0.67%
     54            .          .           		return nil 
     55            .          .           	} 
     56            .          .            
     57            .          .           dohash: 
     58            .          .           	// This path will cost 1 hash and 1+ε comparisons. 
     59         30ms      100ms           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&key)), m.seed) 
     60            .          .           	h2 := uint8(h2(hash)) 
     61            .          .           	ctrls = *g.ctrls() 
     62         10ms       10ms           	slotKey = g.key(typ, 0) 
     63            .          .            
     64         30ms       30ms           	for range abi.SwissMapGroupSlots { 
     65         50ms      130ms           		if uint8(ctrls) == h2 && key == *(*string)(slotKey) { 
     66         20ms       20ms           			return unsafe.Pointer(uintptr(slotKey) + 2*goarch.PtrSize) 
     67            .          .           		} 
     68         20ms       20ms           		slotKey = unsafe.Pointer(uintptr(slotKey) + slotSize) 
     69            .          .           		ctrls >>= 8 
     70            .          .           	} 
     71         10ms       10ms           	return nil 
     72            .          .           } 
     73            .          .            
     74            .          .           // Returns true if a and b might be equal. 
     75            .          .           // Returns false if a and b are definitely not equal. 
     76            .          .           // Requires len(a)>=8. 

runtime.mapaccess1_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:        10ms       60ms (flat, cum)  0.13%
    114            .          .           	if m.writing != 0 { 
    115            .          .           		fatal("concurrent map read and map write") 
    116            .          .           		return nil 
    117            .          .           	} 
    118            .          .            
    119         10ms       10ms           	if m.dirLen <= 0 { 
    120            .       50ms           		elem := m.getWithoutKeySmallFastStr(typ, key) 
    121            .          .           		if elem == nil { 
    122            .          .           			return unsafe.Pointer(&zeroVal[0]) 
    123            .          .           		} 
    124            .          .           		return elem 
    125            .          .           	} 

runtime.mapaccess2_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       110ms      660ms (flat, cum)  1.39%
    157            .          .           		} 
    158            .          .           	} 
    159            .          .           } 
    160            .          .            
    161            .          .           //go:linkname runtime_mapaccess2_faststr runtime.mapaccess2_faststr 
    162         10ms      160ms           func runtime_mapaccess2_faststr(typ *abi.SwissMapType, m *Map, key string) (unsafe.Pointer, bool) { 
    163            .          .           	if race.Enabled && m != nil { 
    164            .          .           		callerpc := sys.GetCallerPC() 
    165            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess2_faststr) 
    166            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
    167            .          .           	} 
    168            .          .            
    169         80ms       80ms           	if m == nil || m.Used() == 0 { 
    170            .          .           		return unsafe.Pointer(&zeroVal[0]), false 
    171            .          .           	} 
    172            .          .            
    173            .          .           	if m.writing != 0 { 
    174            .          .           		fatal("concurrent map read and map write") 
    175            .          .           		return nil, false 
    176            .          .           	} 
    177            .          .            
    178            .          .           	if m.dirLen <= 0 { 
    179            .      400ms           		elem := m.getWithoutKeySmallFastStr(typ, key) 
    180            .          .           		if elem == nil { 
    181            .          .           			return unsafe.Pointer(&zeroVal[0]), false 
    182            .          .           		} 
    183            .          .           		return elem, true 
    184            .          .           	} 
    185            .          .            
    186            .          .           	k := key 
    187            .          .           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) 
    188            .          .            
    189            .          .           	// Select table. 
    190            .          .           	idx := m.directoryIndex(hash) 
    191            .          .           	t := m.directoryAt(idx) 
    192            .          .            
    193            .          .           	// Probe table. 
    194         10ms       10ms           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    195            .          .           	for ; ; seq = seq.next() { 
    196            .          .           		g := t.groups.group(typ, seq.offset) 
    197            .          .            
    198         10ms       10ms           		match := g.ctrls().matchH2(h2(hash))                                                               return ctrlGroupMatchH2(g, h)                                group.go:154
                                                                  v := uint64(g) ^ (bitsetLSB * uint64(h))                 group.go:170

    199            .          .            
    200            .          .           		for match != 0 { 
    201            .          .           			i := match.first() 
    202            .          .            
    203            .          .           			slotKey := g.key(typ, i) 

internal/runtime/maps.(*Map).putSlotSmallFastStr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       110ms      110ms (flat, cum)  0.23%
    220            .          .           func (m *Map) putSlotSmallFastStr(typ *abi.SwissMapType, hash uintptr, key string) unsafe.Pointer { 
    221            .          .           	g := groupReference{ 
    222            .          .           		data: m.dirPtr, 
    223            .          .           	} 
    224            .          .            
    225         20ms       20ms           	match := g.ctrls().matchH2(h2(hash))                                                       return h & 0x7f                                                      map.go:191

    226            .          .            
    227            .          .           	// Look for an existing slot containing this key. 
    228         10ms       10ms           	for match != 0 { 
    229            .          .           		i := match.first() 
    230            .          .            
    231         10ms       10ms           		slotKey := g.key(typ, i)                                                               offset := groupSlotsOffset + i*typ.SlotSize                  group.go:285

    232            .          .           		if key == *(*string)(slotKey) { 
    233            .          .           			// Key needs update, as the backing storage may differ. 
    234            .          .           			*(*string)(slotKey) = key 
    235         10ms       10ms           			slotElem := g.elem(typ, i)                                                                       offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff group.go:292

    236            .          .           			return slotElem 
    237            .          .           		} 
    238            .          .           		match = match.removeFirst() 
    239            .          .           	} 
    240            .          .            
    241            .          .           	// There can't be deleted slots, small maps can't have them 
    242            .          .           	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 
    243            .          .           	// more efficient than matchEmpty. 
    244         30ms       30ms           	match = g.ctrls().matchEmptyOrDeleted()                                                       return (*ctrlGroup)(g.data)                                          group.go:280

    245            .          .           	if match == 0 { 
    246            .          .           		fatal("small map with no empty slot (concurrent map writes?)") 
    247            .          .           	} 
    248            .          .            
    249            .          .           	i := match.first() 
    250            .          .            
    251            .          .           	slotKey := g.key(typ, i) 
    252         10ms       10ms           	*(*string)(slotKey) = key 
    253            .          .            
    254         20ms       20ms           	slotElem := g.elem(typ, i)                                                       offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff            group.go:292

    255            .          .            
    256            .          .           	g.ctrls().set(i, ctrl(h2(hash))) 
    257            .          .           	m.used++ 
    258            .          .            

runtime.mapassign_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:       190ms      930ms (flat, cum)  1.95%
    259            .          .           	return slotElem 
    260            .          .           } 
    261            .          .            
    262            .          .           //go:linkname runtime_mapassign_faststr runtime.mapassign_faststr 
    263         20ms       20ms           func runtime_mapassign_faststr(typ *abi.SwissMapType, m *Map, key string) unsafe.Pointer { 
    264         20ms       20ms           	if m == nil { 
    265            .          .           		panic(errNilAssign) 
    266            .          .           	} 
    267            .          .           	if race.Enabled { 
    268            .          .           		callerpc := sys.GetCallerPC() 
    269            .          .           		pc := abi.FuncPCABIInternal(runtime_mapassign_faststr) 
    270            .          .           		race.WritePC(unsafe.Pointer(m), callerpc, pc) 
    271            .          .           	} 
    272         20ms       20ms           	if m.writing != 0 { 
    273            .          .           		fatal("concurrent map writes") 
    274            .          .           	} 
    275            .          .            
    276            .          .           	k := key 
    277         40ms      130ms           	hash := typ.Hasher(abi.NoEscape(unsafe.Pointer(&k)), m.seed) 
    278            .          .            
    279            .          .           	// Set writing after calling Hasher, since Hasher may panic, in which 
    280            .          .           	// case we have not actually done a write. 
    281         10ms       10ms           	m.writing ^= 1 // toggle, see comment on writing 
    282            .          .            
    283         10ms       10ms           	if m.dirPtr == nil { 
    284            .      490ms           		m.growToSmall(typ) 
    285            .          .           	} 
    286            .          .            
    287         20ms       20ms           	if m.dirLen == 0 { 
    288         10ms       10ms           		if m.used < abi.SwissMapGroupSlots { 
    289            .      110ms           			elem := m.putSlotSmallFastStr(typ, hash, key) 
    290            .          .            
    291         10ms       10ms           			if m.writing == 0 { 
    292            .          .           				fatal("concurrent map writes") 
    293            .          .           			} 
    294            .          .           			m.writing ^= 1 
    295            .          .            
    296         10ms       10ms           			return elem 
    297            .          .           		} 
    298            .          .            
    299            .          .           		// Can't fit another entry, grow to full size map. 
    300            .       50ms           		m.growToTable(typ) 
    301            .          .           	} 
    302            .          .            
    303            .          .           	var slotElem unsafe.Pointer 
    304            .          .           outer: 
    305            .          .           	for { 
    306            .          .           		// Select table. 
    307            .          .           		idx := m.directoryIndex(hash) 
    308            .          .           		t := m.directoryAt(idx) 
    309            .          .            
    310            .          .           		seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    311            .          .            
    312            .          .           		// As we look for a match, keep track of the first deleted slot 
    313            .          .           		// we find, which we'll use to insert the new entry if 
    314            .          .           		// necessary. 
    315            .          .           		var firstDeletedGroup groupReference 
    316            .          .           		var firstDeletedSlot uintptr 
    317            .          .            
    318            .          .           		for ; ; seq = seq.next() { 
    319         10ms       10ms           			g := t.groups.group(typ, seq.offset) 
    320         10ms       10ms           			match := g.ctrls().matchH2(h2(hash))                                                                       return ctrlGroupMatchH2(g, h)                        group.go:154
                                                                          v := uint64(g) ^ (bitsetLSB * uint64(h))         group.go:170

    321            .          .            
    322            .          .           			// Look for an existing slot containing this key. 
    323            .          .           			for match != 0 { 
    324            .          .           				i := match.first() 
    325            .          .            

runtime.mapdelete_faststr

/usr/lib/go/src/internal/runtime/maps/runtime_faststr_swiss.go

  Total:           0       20ms (flat, cum) 0.042%
    407            .          .            
    408            .          .           	if m == nil || m.Used() == 0 { 
    409            .          .           		return 
    410            .          .           	} 
    411            .          .            
    412            .       20ms           	m.Delete(typ, abi.NoEscape(unsafe.Pointer(&key))) 
    413            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*IR).AddBlock

/home/user/q/src/ssa/IR.go

  Total:        10ms       60ms (flat, cum)  0.13%
      5            .          .           	Blocks []*Block 
      6            .          .           } 
      7            .          .            
      8            .          .           // AddBlock adds a new block to the function. 
      9            .          .           func (ir *IR) AddBlock(block *Block) { 
     10         10ms       60ms           	ir.Blocks = append(ir.Blocks, block) 
     11            .          .           } 
     12            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).Append

/home/user/q/src/ssa/IR.go

  Total:       130ms      590ms (flat, cum)  1.24%
     13            .          .           // Append adds a new value to the last block. 
     14            .          .           func (ir *IR) Append(instr Value) Value { 
     15         10ms      440ms           	existing := ir.Block().FindExisting(instr) 
     16            .          .            
     17         20ms       20ms           	if existing != nil { 
     18            .          .           		return existing 
     19            .          .           	} 
     20            .          .            
     21         50ms       80ms           	ir.Block().Append(instr)                                                       return ir.Blocks[len(ir.Blocks)-1]                                   IR.go:27
                                     ⋮
                                     ⋮
                                                      b.Instructions = append(b.Instructions, value)                       Block.go:150
     22         50ms       50ms           	return instr 
     23            .          .           } 
     24            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).Block

/home/user/q/src/ssa/IR.go

  Total:        60ms       60ms (flat, cum)  0.13%
     25            .          .           // Block returns the last block. 
     26            .          .           func (ir *IR) Block() *Block { 
     27         60ms       60ms           	return ir.Blocks[len(ir.Blocks)-1] 
     28            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*IR).ComputeUsers

/home/user/q/src/ssa/IR.go

  Total:       210ms      910ms (flat, cum)  1.91%
     30            .          .           // ComputeUsers creates the list of users for each value. 
     31         10ms       10ms           func (ir *IR) ComputeUsers() { 
     32         20ms       20ms           	for _, block := range ir.Blocks { 
     33         10ms       10ms           		for _, value := range block.Instructions { 
     34         30ms      310ms           			for _, input := range value.Inputs() { 
     35        140ms      560ms           				input.AddUser(value) 
     36            .          .           			} 
     37            .          .           		} 
     38            .          .           	} 
     39            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*IR).CountValues

/home/user/q/src/ssa/IR.go

  Total:        50ms       50ms (flat, cum)   0.1%
     41            .          .           // CountValues returns the total number of values. 
     42            .          .           func (ir *IR) CountValues() int { 
     43            .          .           	count := 0 
     44            .          .            
     45         10ms       10ms           	for _, block := range ir.Blocks { 
     46         40ms       40ms           		count += len(block.Instructions) 
     47            .          .           	} 
     48            .          .            
     49            .          .           	return count 

git.urbach.dev/cli/q/src/ssa.(*IR).ExitBlocks

/home/user/q/src/ssa/IR.go

  Total:        30ms      150ms (flat, cum)  0.31%
     51            .          .            
     52            .          .           // ExitBlocks is an iterator for all exit blocks. 
     53            .          .           func (ir *IR) ExitBlocks(yield func(*Block) bool) { 
     54         10ms       10ms           	for _, block := range ir.Blocks { 
     55         10ms       10ms           		_, returns := block.Last().(*Return)                                                               if len(b.Instructions) == 0 {                                Block.go:264

     56            .          .            
     57            .          .           		if !returns { 
     58            .          .           			continue 
     59            .          .           		} 
     60            .          .            
     61         10ms      130ms           		if !yield(block) {                                                               for _, value := range exitBlock.Identifiers {                verifyDeallocation.go:12
                                     ⋮
                                     ⋮
                                     ⋮
                                                              resource, isResource := value.Type().(*types.Resource)       verifyDeallocation.go:23

     62            .          .           			return 
     63            .          .           		} 
     64            .          .           	} 
     65            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*IR).IsIdentified

/home/user/q/src/ssa/IR.go

  Total:        60ms      120ms (flat, cum)  0.25%
     66            .          .            
     67            .          .           // IsIdentified returns true if the value can be obtained from one of the identifiers. 
     68            .          .           func (ir *IR) IsIdentified(value Value) bool { 
     69            .          .           	for _, block := range ir.Blocks { 
     70         60ms      120ms           		if block.IsIdentified(value) {                                                               if existing == value {                                       Block.go:238                    for _, existing := range b.Identifiers {                     Block.go:237
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮

     71            .          .           			return true 
     72            .          .           		} 
     73            .          .           	} 
     74            .          .            

git.urbach.dev/cli/q/src/ssa.(*IR).ReplaceAll

/home/user/q/src/ssa/IR.go

  Total:        50ms       60ms (flat, cum)  0.13%
     76            .          .           } 
     77            .          .            
     78            .          .           // ReplaceAll replaces all occurrences of the given `old` value with the `new` value. 
     79            .          .           func (ir *IR) ReplaceAll(old Value, new Value) { 
     80         10ms       10ms           	for _, block := range ir.Blocks { 
     81         10ms       10ms           		for _, value := range block.Instructions { 
     82         30ms       40ms           			value.Replace(old, new) 
     83            .          .           		} 
     84            .          .           	} 
     85            .          .           } 

runtime.makeslicecopy

/usr/lib/go/src/runtime/slice.go

  Total:        10ms      2.05s (flat, cum)  4.30%
     33            .          .           	panic(errorString("makeslice: cap out of range")) 
     34            .          .           } 
     35            .          .            
     36            .          .           // makeslicecopy allocates a slice of "tolen" elements of type "et", 
     37            .          .           // then copies "fromlen" elements of type "et" into that new allocation from "from". 
     38            .      140ms           func makeslicecopy(et *_type, tolen int, fromlen int, from unsafe.Pointer) unsafe.Pointer { 
     39            .          .           	var tomem, copymem uintptr 
     40            .          .           	if uintptr(tolen) > uintptr(fromlen) { 
     41            .          .           		var overflow bool 
     42            .          .           		tomem, overflow = math.MulUintptr(et.Size_, uintptr(tolen)) 
     43         10ms       10ms           		if overflow || tomem > maxAlloc || tolen < 0 { 
     44            .          .           			panicmakeslicelen() 
     45            .          .           		} 
     46            .          .           		copymem = et.Size_ * uintptr(fromlen) 
     47            .          .           	} else { 
     48            .          .           		// fromlen is a known good length providing and equal or greater than tolen, 
     49            .          .           		// thereby making tolen a good slice length too as from and to slices have the 
     50            .          .           		// same element width. 
     51            .          .           		tomem = et.Size_ * uintptr(tolen) 
     52            .          .           		copymem = tomem 
     53            .          .           	} 
     54            .          .            
     55            .          .           	var to unsafe.Pointer 
     56            .          .           	if !et.Pointers() { 
     57            .      1.89s           		to = mallocgc(tomem, nil, false) 
     58            .          .           		if copymem < tomem { 
     59            .       10ms           			memclrNoHeapPointers(add(to, copymem), tomem-copymem) 
     60            .          .           		} 
     61            .          .           	} else { 
     62            .          .           		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 
     63            .          .           		to = mallocgc(tomem, et, true) 
     64            .          .           		if copymem > 0 && writeBarrier.enabled { 

runtime.makeslicecopy

/usr/lib/go/src/runtime/slice.go

  Total:        10ms       10ms (flat, cum) 0.021%
     84            .          .           		asanread(from, copymem) 
     85            .          .           	} 
     86            .          .            
     87            .          .           	memmove(to, from, copymem) 
     88            .          .            
     89         10ms       10ms           	return to 
     90            .          .           } 
     91            .          .            
     92            .          .           // makeslice should be an internal detail, 
     93            .          .           // but widely used packages access it using linkname. 
     94            .          .           // Notable members of the hall of shame include: 

runtime.makeslice

/usr/lib/go/src/runtime/slice.go

  Total:       150ms      2.80s (flat, cum)  5.88%
     96            .          .           // 
     97            .          .           // Do not remove or change the type signature. 
     98            .          .           // See go.dev/issue/67401. 
     99            .          .           // 
    100            .          .           //go:linkname makeslice 
    101         40ms       40ms           func makeslice(et *_type, len, cap int) unsafe.Pointer { 
    102         50ms       50ms           	mem, overflow := math.MulUintptr(et.Size_, uintptr(cap)) 
    103         50ms       50ms           	if overflow || mem > maxAlloc || len < 0 || len > cap { 
    104            .          .           		// NOTE: Produce a 'len out of range' error instead of a 
    105            .          .           		// 'cap out of range' error when someone does make([]T, bignumber). 
    106            .          .           		// 'cap out of range' is true too, but since the cap is only being 
    107            .          .           		// supplied implicitly, saying len is clearer. 
    108            .          .           		// See golang.org/issue/4085. 
    109            .          .           		mem, overflow := math.MulUintptr(et.Size_, uintptr(len)) 
    110            .          .           		if overflow || mem > maxAlloc || len < 0 { 
    111            .          .           			panicmakeslicelen() 
    112            .          .           		} 
    113            .          .           		panicmakeslicecap() 
    114            .          .           	} 
    115            .          .            
    116         10ms      2.66s           	return mallocgc(mem, et, true) 
    117            .          .           } 
    118            .          .            
    119            .          .           func makeslice64(et *_type, len64, cap64 int64) unsafe.Pointer { 
    120            .          .           	len := int(len64) 
    121            .          .           	if int64(len) != len64 { 

runtime.growslice

/usr/lib/go/src/runtime/slice.go

  Total:       210ms      210ms (flat, cum)  0.44%
    172            .          .           // 
    173            .          .           // Do not remove or change the type signature. 
    174            .          .           // See go.dev/issue/67401. 
    175            .          .           // 
    176            .          .           //go:linkname growslice 
    177         20ms       20ms           func growslice(oldPtr unsafe.Pointer, newLen, oldCap, num int, et *_type) slice { 
    178            .          .           	oldLen := newLen - num 
    179            .          .           	if raceenabled { 
    180            .          .           		callerpc := sys.GetCallerPC() 
    181            .          .           		racereadrangepc(oldPtr, uintptr(oldLen*int(et.Size_)), callerpc, abi.FuncPCABIInternal(growslice)) 
    182            .          .           	} 
    183            .          .           	if msanenabled { 
    184            .          .           		msanread(oldPtr, uintptr(oldLen*int(et.Size_))) 
    185            .          .           	} 
    186            .          .           	if asanenabled { 
    187            .          .           		asanread(oldPtr, uintptr(oldLen*int(et.Size_))) 
    188            .          .           	} 
    189            .          .            
    190         20ms       20ms           	if newLen < 0 { 
    191            .          .           		panic(errorString("growslice: len out of range")) 
    192            .          .           	} 
    193            .          .            
    194         10ms       10ms           	if et.Size_ == 0 { 
    195            .          .           		// append should not create a slice with nil pointer but non-zero len. 
    196            .          .           		// We assume that append doesn't need to preserve oldPtr in this case. 
    197            .          .           		return slice{unsafe.Pointer(&zerobase), newLen, newLen} 
    198            .          .           	} 
    199            .          .            
    200            .          .           	newcap := nextslicecap(newLen, oldCap) 
    201            .          .            
    202            .          .           	var overflow bool 
    203            .          .           	var lenmem, newlenmem, capmem uintptr 
    204            .          .           	// Specialize for common values of et.Size. 
    205            .          .           	// For 1 we don't need any division/multiplication. 
    206            .          .           	// For goarch.PtrSize, compiler will optimize division/multiplication into a shift by a constant. 
    207            .          .           	// For powers of 2, use a variable shift. 
    208            .          .           	noscan := !et.Pointers() 
    209            .          .           	switch { 
    210         10ms       10ms           	case et.Size_ == 1: 
    211            .          .           		lenmem = uintptr(oldLen) 
    212            .          .           		newlenmem = uintptr(newLen) 
    213            .          .           		capmem = roundupsize(uintptr(newcap), noscan) 
    214            .          .           		overflow = uintptr(newcap) > maxAlloc 
    215            .          .           		newcap = int(capmem) 
    216         10ms       10ms           	case et.Size_ == goarch.PtrSize: 
    217            .          .           		lenmem = uintptr(oldLen) * goarch.PtrSize 
    218            .          .           		newlenmem = uintptr(newLen) * goarch.PtrSize 
    219         30ms       30ms           		capmem = roundupsize(uintptr(newcap)*goarch.PtrSize, noscan)                                                               if !noscan && reqSize > gc.MinSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize) msize.go:20                    return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26                    if !noscan && reqSize > gc.MinSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize) msize.go:20

    220            .          .           		overflow = uintptr(newcap) > maxAlloc/goarch.PtrSize 
    221            .          .           		newcap = int(capmem / goarch.PtrSize) 
    222         40ms       40ms           	case isPowerOfTwo(et.Size_):                                                       return x&(x-1) == 0                                                  slice.go:351
    223            .          .           		var shift uintptr 
    224            .          .           		if goarch.PtrSize == 8 { 
    225            .          .           			// Mask shift for better code generation. 
    226            .          .           			shift = uintptr(sys.TrailingZeros64(uint64(et.Size_))) & 63 
    227            .          .           		} else { 
    228            .          .           			shift = uintptr(sys.TrailingZeros32(uint32(et.Size_))) & 31 
    229            .          .           		} 
    230            .          .           		lenmem = uintptr(oldLen) << shift 
    231            .          .           		newlenmem = uintptr(newLen) << shift 
    232         70ms       70ms           		capmem = roundupsize(uintptr(newcap)<<shift, noscan)                                                               return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26
                                     ⋮
                                     ⋮
                                                              if !noscan && reqSize > gc.MinSizeForMallocHeader { // !noscan && !heapBitsInSpan(reqSize) msize.go:20                    return uintptr(gc.SizeClassToSize[gc.SizeToSizeClass8[divRoundUp(reqSize, gc.SmallSizeDiv)]]) - (reqSize - size) msize.go:26

    233            .          .           		overflow = uintptr(newcap) > (maxAlloc >> shift) 
    234            .          .           		newcap = int(capmem >> shift) 
    235            .          .           		capmem = uintptr(newcap) << shift 
    236            .          .           	default: 
    237            .          .           		lenmem = uintptr(oldLen) * et.Size_ 

runtime.growslice

/usr/lib/go/src/runtime/slice.go

  Total:       130ms      900ms (flat, cum)  1.89%
    259            .          .           		panic(errorString("growslice: len out of range")) 
    260            .          .           	} 
    261            .          .            
    262            .          .           	var p unsafe.Pointer 
    263            .          .           	if !et.Pointers() { 
    264         10ms       70ms           		p = mallocgc(capmem, nil, false) 
    265            .          .           		// The append() that calls growslice is going to overwrite from oldLen to newLen. 
    266            .          .           		// Only clear the part that will not be overwritten. 
    267            .          .           		// The reflect_growslice() that calls growslice will manually clear 
    268            .          .           		// the region not cleared here. 
    269         10ms       20ms           		memclrNoHeapPointers(add(p, newlenmem), capmem-newlenmem) 
    270            .          .           	} else { 
    271            .          .           		// Note: can't use rawmem (which avoids zeroing of memory), because then GC can scan uninitialized memory. 
    272            .      670ms           		p = mallocgc(capmem, et, true) 
    273         30ms       30ms           		if lenmem > 0 && writeBarrier.enabled { 
    274            .          .           			// Only shade the pointers in oldPtr since we know the destination slice p 
    275            .          .           			// only contains nil pointers because it has been cleared during alloc. 
    276            .          .           			// 
    277            .          .           			// It's safe to pass a type to this function as an optimization because 
    278            .          .           			// from and to only ever refer to memory representing whole values of 
    279            .          .           			// type et. See the comment on bulkBarrierPreWrite. 
    280            .          .           			bulkBarrierPreWriteSrcOnly(uintptr(p), uintptr(oldPtr), lenmem-et.Size_+et.PtrBytes, et) 
    281            .          .           		} 
    282            .          .           	} 
    283            .       30ms           	memmove(p, oldPtr, lenmem) 
    284            .          .            
    285         80ms       80ms           	return slice{p, newLen, newcap} 
    286            .          .           } 
    287            .          .            
    288            .          .           // nextslicecap computes the next appropriate slice length. 
    289            .          .           func nextslicecap(newLen, oldCap int) int { 
    290            .          .           	newcap := oldCap 

runtime.isPowerOfTwo

/usr/lib/go/src/runtime/slice.go

  Total:        40ms       40ms (flat, cum) 0.084%
    346            .          .           	new.len = old.len // preserve the old length 
    347            .          .           	return new 
    348            .          .           } 
    349            .          .            
    350            .          .           func isPowerOfTwo(x uintptr) bool { 
    351         40ms       40ms           	return x&(x-1) == 0 
    352            .          .           } 
    353            .          .            
    354            .          .           // slicecopy is used to copy from a string or slice of pointerless elements into a slice. 
    355            .          .           func slicecopy(toPtr unsafe.Pointer, toLen int, fromPtr unsafe.Pointer, fromLen int, width uintptr) int { 
    356            .          .           	if fromLen == 0 || toLen == 0 { 

internal/bytealg.MakeNoZero

/usr/lib/go/src/runtime/slice.go

  Total:        10ms       90ms (flat, cum)  0.19%
    394            .          .           //go:linkname bytealg_MakeNoZero internal/bytealg.MakeNoZero 
    395            .          .           func bytealg_MakeNoZero(len int) []byte { 
    396            .          .           	if uintptr(len) > maxAlloc { 
    397            .          .           		panicmakeslicelen() 
    398            .          .           	} 
    399         10ms       10ms           	cap := roundupsize(uintptr(len), true) 
    400            .       80ms           	return unsafe.Slice((*byte)(mallocgc(uintptr(cap), nil, false)), cap)[:len] 
    401            .          .           } 

git.urbach.dev/cli/q/src/ssa.NewBlock

/home/user/q/src/ssa/Block.go

  Total:           0      150ms (flat, cum)  0.31%
     18            .          .           	Predecessors []*Block 
     19            .          .           } 
     20            .          .            
     21            .          .           // NewBlock creates a new basic block. 
     22            .          .           func NewBlock(label string) *Block { 
     23            .       80ms           	return &Block{ 
     24            .       70ms           		Instructions: make([]Value, 0, 8), 
     25            .          .           		Label:        label, 
     26            .          .           	} 

git.urbach.dev/cli/q/src/ssa.(*Block).AddSuccessor

/home/user/q/src/ssa/Block.go

  Total:        70ms      740ms (flat, cum)  1.55%
     28            .          .            
     29            .          .           // AddSuccessor adds the given block as a successor. 
     30         10ms       10ms           func (b *Block) AddSuccessor(successor *Block) { 
     31            .       60ms           	successor.Predecessors = append(successor.Predecessors, b) 
     32            .          .            
     33            .          .           	if len(b.Protected) > 0 { 
     34            .          .           		if successor.Protected == nil { 
     35            .       10ms           			successor.Protected = make(map[Value][]Value, len(b.Protected)) 
     36            .          .           		} 
     37            .          .            
     38         10ms       20ms           		maps.Copy(successor.Protected, b.Protected)                                                               for k, v := range src {                                      maps.go:63                    dst[k] = v                                                   maps.go:64

     39            .          .           	} 
     40            .          .            
     41            .          .           	if b.Identifiers == nil { 
     42            .          .           		return 
     43            .          .           	} 
     44            .          .            
     45            .          .           	if successor.Identifiers == nil { 
     46            .       70ms           		successor.Identifiers = make(map[string]Value, len(b.Identifiers)) 
     47            .          .            
     48            .          .           		if len(successor.Predecessors) == 1 { 
     49         30ms      420ms           			maps.Copy(successor.Identifiers, b.Identifiers)                                                                       dst[k] = v                                           maps.go:64                            for k, v := range src {                              maps.go:63
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                                      for k, v := range src {                              maps.go:63

     50            .          .           			return 
     51            .          .           		} 
     52            .          .           	} 
     53            .          .            
     54            .       20ms           	keys := make(map[string]struct{}, max(len(b.Identifiers), len(successor.Identifiers))) 
     55            .          .            
     56            .       10ms           	for name := range successor.Identifiers { 
     57         20ms       20ms           		keys[name] = struct{}{} 
     58            .          .           	} 
     59            .          .            
     60            .       20ms           	for name := range b.Identifiers { 
     61            .       30ms           		keys[name] = struct{}{} 
     62            .          .           	} 
     63            .          .            
     64            .          .           	var modifiedStructs []string 
     65            .          .            
     66            .       20ms           	for name := range keys { 
     67            .       20ms           		oldValue, oldExists := successor.Identifiers[name] 
     68            .       10ms           		newValue, newExists := b.Identifiers[name] 
     69            .          .            
     70            .          .           		switch { 
     71            .          .           		case oldExists: 
     72            .          .           			if oldValue == newValue { 
     73            .          .           				continue 

git.urbach.dev/cli/q/src/ssa.(*Block).AddSuccessor

/home/user/q/src/ssa/Block.go

  Total:           0       30ms (flat, cum) 0.063%
    103            .          .            
    104            .          .           			for i := range phi.Arguments { 
    105            .          .           				phi.Arguments[i] = oldValue 
    106            .          .           			} 
    107            .          .            
    108            .       20ms           			successor.InsertAt(phi, 0)                                                                       b.Instructions = slices.Insert(b.Instructions, index, value) Block.go:259

    109            .          .           			successor.Identifiers[name] = phi 
    110            .          .            
    111            .          .           			if newExists { 
    112            .          .           				phi.Arguments = append(phi.Arguments, newValue) 
    113            .          .           			} else { 
    114            .          .           				phi.Arguments = append(phi.Arguments, Undefined) 
    115            .          .           			} 
    116            .          .            
    117            .          .           		case newExists: 
    118            .          .           			phi := &Phi{ 
    119            .          .           				Arguments: make([]Value, len(successor.Predecessors)-1, len(successor.Predecessors)), 
    120            .       10ms           				Typ:       newValue.Type(), 
    121            .          .           			} 
    122            .          .            
    123            .          .           			for i := range phi.Arguments { 
    124            .          .           				phi.Arguments[i] = Undefined 
    125            .          .           			} 

git.urbach.dev/cli/q/src/ssa.(*Block).Append

/home/user/q/src/ssa/Block.go

  Total:        20ms       60ms (flat, cum)  0.13%
    145            .          .           	} 
    146            .          .           } 
    147            .          .            
    148            .          .           // Append adds a new value to the block. 
    149            .          .           func (b *Block) Append(value Value) { 
    150         20ms       60ms           	b.Instructions = append(b.Instructions, value) 
    151            .          .           } 
    152            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).CanReachPredecessor

/home/user/q/src/ssa/Block.go

  Total:           0       30ms (flat, cum) 0.063%
    153            .          .           // CanReachPredecessor checks if the `other` block appears as a predecessor or is the block itself. 
    154            .          .           func (b *Block) CanReachPredecessor(other *Block) bool { 
    155            .       30ms           	return b.canReachPredecessor(other, make(map[*Block]bool)) 
    156            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*Block).canReachPredecessor

/home/user/q/src/ssa/Block.go

  Total:        20ms       20ms (flat, cum) 0.042%
    158            .          .           // canReachPredecessor checks if the `other` block appears as a predecessor or is the block itself. 
    159         10ms       10ms           func (b *Block) canReachPredecessor(other *Block, traversed map[*Block]bool) bool { 
    160            .          .           	if other == b { 
    161            .          .           		return true 
    162            .          .           	} 
    163            .          .            
    164         10ms       10ms           	if traversed[b] { 
    165            .          .           		return false 
    166            .          .           	} 
    167            .          .            
    168            .          .           	traversed[b] = true 
    169            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting

/home/user/q/src/ssa/Block.go

  Total:       210ms      360ms (flat, cum)  0.76%
    180            .          .           func (b *Block) Contains(value Value) bool { 
    181            .          .           	return b.Index(value) != -1 
    182            .          .           } 
    183            .          .            
    184            .          .           // FindExisting returns an equal instruction that's already appended or `nil` if none could be found. 
    185         10ms       10ms           func (b *Block) FindExisting(instr Value) Value { 
    186         80ms      140ms           	if !instr.IsPure() { 
    187         20ms       20ms           		return nil 
    188            .          .           	} 
    189            .          .            
    190        100ms      190ms           	for _, existing := range slices.Backward(b.Instructions) {                                                       if !yield(i, s[i]) {                                                 iter.go:29
                                                          if existing.IsPure() && instr.Equals(existing) {                 Block.go:191
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                          switch existing.(type) {                                         Block.go:197                if existing.IsPure() && instr.Equals(existing) {                 Block.go:191                switch existing.(type) {                                         Block.go:197
                                     ⋮
                                     ⋮
                                                          if existing.IsPure() && instr.Equals(existing) {                 Block.go:191                case *Call, *CallExtern:                                         Block.go:198

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting-range1

/home/user/q/src/ssa/Block.go

  Total:        90ms      180ms (flat, cum)  0.38%
    191         50ms      140ms           		if existing.IsPure() && instr.Equals(existing) { 
    192            .          .           			return existing 
    193            .          .           		} 
    194            .          .            
    195            .          .           		// If we encounter a call, we can't be sure that the value is still the same. 
    196            .          .           		// TODO: This is a bit too conservative. We could check if the call affects the value. 
    197         30ms       30ms           		switch existing.(type) { 
    198         10ms       10ms           		case *Call, *CallExtern: 
    199            .          .           			return nil 

git.urbach.dev/cli/q/src/ssa.(*Block).FindExisting

/home/user/q/src/ssa/Block.go

  Total:        70ms       70ms (flat, cum)  0.15%
    200            .          .           		} 
    201         70ms       70ms           	} 
    202            .          .            
    203            .          .           	return nil 
    204            .          .           } 

git.urbach.dev/cli/q/src/ssa.(*Block).FindIdentifier

/home/user/q/src/ssa/Block.go

  Total:           0      260ms (flat, cum)  0.55%
    206            .          .           // FindIdentifier searches for all the possible values the identifier 
    207            .          .           // can have and combines them to a phi instruction if necessary. 
    208            .          .           func (b *Block) FindIdentifier(name string) (value Value, exists bool) { 
    209            .      260ms           	value, exists = b.Identifiers[name] 
    210            .          .           	return 
    211            .          .           } 
    212            .          .            
    213            .          .           // IdentifiersFor returns an iterator for all the identifiers pointing to the given value. 
    214            .          .           func (b *Block) IdentifiersFor(value Value) iter.Seq[string] { 

git.urbach.dev/cli/q/src/ssa.(*Block).Identify

/home/user/q/src/ssa/Block.go

  Total:           0      380ms (flat, cum)   0.8%
    224            .          .           } 
    225            .          .            
    226            .          .           // Identify adds a new identifier or changes an existing one. 
    227            .          .           func (b *Block) Identify(name string, value Value) { 
    228            .          .           	if b.Identifiers == nil { 
    229            .       70ms           		b.Identifiers = make(map[string]Value, 8) 
    230            .          .           	} 
    231            .          .            
    232            .      310ms           	b.Identifiers[name] = value 
    233            .          .           } 
    234            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).IsIdentified

/home/user/q/src/ssa/Block.go

  Total:        60ms      130ms (flat, cum)  0.27%
    235            .          .           // IsIdentified returns true if the value can be obtained from one of the identifiers. 
    236            .          .           func (b *Block) IsIdentified(value Value) bool { 
    237         40ms      110ms           	for _, existing := range b.Identifiers { 
    238         20ms       20ms           		if existing == value { 
    239            .          .           			return true 
    240            .          .           		} 
    241            .          .           	} 
    242            .          .            
    243            .          .           	return false 

git.urbach.dev/cli/q/src/ssa.(*Block).InsertAt

/home/user/q/src/ssa/Block.go

  Total:           0       20ms (flat, cum) 0.042%
    254            .          .           	return -1 
    255            .          .           } 
    256            .          .            
    257            .          .           // InsertAt inserts the `value` at the given `index`. 
    258            .          .           func (b *Block) InsertAt(value Value, index int) { 
    259            .       20ms           	b.Instructions = slices.Insert(b.Instructions, index, value) 
    260            .          .           } 
    261            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).Last

/home/user/q/src/ssa/Block.go

  Total:        10ms       10ms (flat, cum) 0.021%
    262            .          .           // Last returns the last value. 
    263            .          .           func (b *Block) Last() Value { 
    264         10ms       10ms           	if len(b.Instructions) == 0 { 
    265            .          .           		return nil 
    266            .          .           	} 
    267            .          .            
    268            .          .           	return b.Instructions[len(b.Instructions)-1] 

git.urbach.dev/cli/q/src/ssa.(*Block).Phis

/home/user/q/src/ssa/Block.go

  Total:        20ms       30ms (flat, cum) 0.063%
    270            .          .            
    271            .          .           // Phis is an iterator for all phis at the top of the block. 
    272            .          .           func (b *Block) Phis(yield func(*Phi) bool) { 
    273            .          .           	for _, instr := range b.Instructions { 
    274         10ms       10ms           		phi, isPhi := instr.(*Phi) 
    275            .          .            
    276         10ms       20ms           		if !isPhi || !yield(phi) {                                                               for _, instr := range block.Instructions {                   compileLoop.go:130                    instr.Replace(oldValue, phi)                                 compileLoop.go:135

    277            .          .           			return 
    278            .          .           		} 
    279            .          .           	} 
    280            .          .           } 
    281            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).RemoveNilValues.func1

/home/user/q/src/ssa/Block.go

  Total:        20ms      150ms (flat, cum)  0.31%
    290            .          .           	b.Instructions[index] = nil 
    291            .          .           } 
    292            .          .            
    293            .          .           // RemoveNilValues removes all nil values from the block. 
    294            .          .           func (b *Block) RemoveNilValues() { 
    295         20ms      150ms           	b.Instructions = slices.DeleteFunc(b.Instructions, func(value Value) bool { 
    296            .          .           		return value == nil 
    297            .          .           	}) 
    298            .          .           } 
    299            .          .            
    300            .          .           // ReplaceAllUses replaces all uses of `old` with `new`. 

git.urbach.dev/cli/q/src/ssa.(*Block).Unidentify

/home/user/q/src/ssa/Block.go

  Total:        10ms       50ms (flat, cum)   0.1%
    309            .          .           	return CleanLabel(b.Label) 
    310            .          .           } 
    311            .          .            
    312            .          .           // Unidentify deletes the identifier for the given value. 
    313            .          .           func (b *Block) Unidentify(value Value) { 
    314         10ms       30ms           	for name, existing := range b.Identifiers { 
    315            .          .           		if existing == value { 
    316            .       20ms           			delete(b.Identifiers, name) 
    317            .          .           			return 
    318            .          .           		} 
    319            .          .           	} 
    320            .          .           } 
    321            .          .            

git.urbach.dev/cli/q/src/ssa.(*Block).Unprotect

/home/user/q/src/ssa/Block.go

  Total:           0       10ms (flat, cum) 0.021%
    328            .          .           	b.Protected[err] = protected 
    329            .          .           } 
    330            .          .            
    331            .          .           // Unprotect stops protecting the variables for the given error value. 
    332            .          .           func (b *Block) Unprotect(err Value) { 
    333            .       10ms           	delete(b.Protected, err) 
    334            .          .           } 

internal/runtime/maps.newTable

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0      280ms (flat, cum)  0.59%
     74            .          .           func newTable(typ *abi.SwissMapType, capacity uint64, index int, localDepth uint8) *table { 
     75            .          .           	if capacity < abi.SwissMapGroupSlots { 
     76            .          .           		capacity = abi.SwissMapGroupSlots 
     77            .          .           	} 
     78            .          .            
     79            .       60ms           	t := &table{ 
     80            .          .           		index:      index, 
     81            .          .           		localDepth: localDepth, 
     82            .          .           	} 
     83            .          .            
     84            .          .           	if capacity > maxTableCapacity { 
     85            .          .           		panic("initial table capacity too large") 
     86            .          .           	} 
     87            .          .            
     88            .          .           	// N.B. group count must be a power of two for probeSeq to visit every 
     89            .          .           	// group. 
     90            .          .           	capacity, overflow := alignUpPow2(capacity) 
     91            .          .           	if overflow { 
     92            .          .           		panic("rounded-up capacity overflows uint64") 
     93            .          .           	} 
     94            .          .            
     95            .      220ms           	t.reset(typ, uint16(capacity)) 
     96            .          .            
     97            .          .           	return t 
     98            .          .           } 
     99            .          .            

internal/runtime/maps.(*table).reset

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        20ms      220ms (flat, cum)  0.46%
    100            .          .           // reset resets the table with new, empty groups with the specified new total 
    101            .          .           // capacity. 
    102            .          .           func (t *table) reset(typ *abi.SwissMapType, capacity uint16) { 
    103            .          .           	groupCount := uint64(capacity) / abi.SwissMapGroupSlots 
    104            .      200ms           	t.groups = newGroups(typ, groupCount)                                                       data:       newarray(typ.Group, int(length)),                        group.go:316

    105            .          .           	t.capacity = capacity 
    106            .          .           	t.growthLeft = t.maxGrowthLeft() 
    107            .          .            
    108         10ms       10ms           	for i := uint64(0); i <= t.groups.lengthMask; i++ { 
    109         10ms       10ms           		g := t.groups.group(typ, i)                                                               data: unsafe.Pointer(uintptr(g.data) + offset),              group.go:328

    110            .          .           		g.ctrls().setEmpty() 
    111            .          .           	} 
    112            .          .           } 
    113            .          .            
    114            .          .           // maxGrowthLeft is the number of inserts we can do before 

internal/runtime/maps.(*table).uncheckedPutSlot

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        20ms       20ms (flat, cum) 0.042%
    385            .          .            
    386            .          .           	// Given key and its hash hash(key), to insert it, we construct a 
    387            .          .           	// probeSeq, and use it to find the first group with an unoccupied (empty 
    388            .          .           	// or deleted) slot. We place the key/value into the first such slot in 
    389            .          .           	// the group and mark it as full with key's H2. 
    390         10ms       10ms           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask)                                                       offset: uint64(hash) & mask,                                         table.go:1254

    391            .          .           	for ; ; seq = seq.next() { 
    392         10ms       10ms           		g := t.groups.group(typ, seq.offset)                                                               data: unsafe.Pointer(uintptr(g.data) + offset),              group.go:328

    393            .          .            
    394            .          .           		match := g.ctrls().matchEmptyOrDeleted() 
    395            .          .           		if match != 0 { 
    396            .          .           			i := match.first() 
    397            .          .            

internal/runtime/maps.(*table).Delete

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
    467            .          .           				// full now, we can simply remove the element. 
    468            .          .           				// Otherwise, we create a tombstone to mark the 
    469            .          .           				// slot as deleted. 
    470            .          .           				var tombstone bool 
    471            .          .           				if g.ctrls().matchEmpty() != 0 { 
    472         10ms       10ms           					g.ctrls().set(i, ctrlEmpty)                                                                                       *(*ctrl)(unsafe.Add(unsafe.Pointer(g), i)) = c group.go:143

    473            .          .           					t.growthLeft++ 
    474            .          .           				} else { 
    475            .          .           					g.ctrls().set(i, ctrlDeleted) 
    476            .          .           					tombstone = true 
    477            .          .           				} 

internal/runtime/maps.(*Iter).Init

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        70ms      160ms (flat, cum)  0.34%
    646            .          .           	// are the group index. 
    647            .          .           	entryIdx uint64 
    648            .          .           } 
    649            .          .            
    650            .          .           // Init initializes Iter for iteration. 
    651         20ms       20ms           func (it *Iter) Init(typ *abi.SwissMapType, m *Map) { 
    652            .          .           	it.typ = typ 
    653            .          .            
    654         50ms       50ms           	if m == nil || m.used == 0 { 
    655            .          .           		return 
    656            .          .           	} 
    657            .          .            
    658            .          .           	dirIdx := 0 
    659            .          .           	var groupSmall groupReference 
    660            .          .           	if m.dirLen <= 0 { 
    661            .          .           		// Use dirIdx == -1 as sentinel for small maps. 
    662            .          .           		dirIdx = -1 
    663            .          .           		groupSmall.data = m.dirPtr 
    664            .          .           	} 
    665            .          .            
    666            .          .           	it.m = m 
    667            .       30ms           	it.entryOffset = rand() 
    668            .       60ms           	it.dirOffset = rand() 
    669            .          .           	it.globalDepth = m.globalDepth 
    670            .          .           	it.dirIdx = dirIdx 
    671            .          .           	it.group = groupSmall 
    672            .          .           	it.clearSeq = m.clearSeq 
    673            .          .           } 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:       280ms      280ms (flat, cum)  0.59%
    778            .          .           // 
    779            .          .           // The table can be mutated during iteration, though there is no guarantee that 
    780            .          .           // the mutations will be visible to the iteration. 
    781            .          .           // 
    782            .          .           // Init must be called prior to Next. 
    783         20ms       20ms           func (it *Iter) Next() { 
    784         10ms       10ms           	if it.m == nil { 
    785            .          .           		// Map was empty at Iter.Init. 
    786         10ms       10ms           		it.key = nil 
    787            .          .           		it.elem = nil 
    788            .          .           		return 
    789            .          .           	} 
    790            .          .            
    791         20ms       20ms           	if it.m.writing != 0 { 
    792            .          .           		fatal("concurrent map iteration and map write") 
    793            .          .           		return 
    794            .          .           	} 
    795            .          .            
    796            .          .           	if it.dirIdx < 0 { 
    797            .          .           		// Map was small at Init. 
    798         40ms       40ms           		for ; it.entryIdx < abi.SwissMapGroupSlots; it.entryIdx++ { 
    799         10ms       10ms           			k := uintptr(it.entryIdx+it.entryOffset) % abi.SwissMapGroupSlots 
    800            .          .            
    801        100ms      100ms           			if (it.group.ctrls().get(k) & ctrlEmpty) == ctrlEmpty {                             return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), i))    group.go:134

    802            .          .           				// Empty or deleted. 
    803            .          .           				continue 
    804            .          .           			} 
    805            .          .            
    806         40ms       40ms           			key := it.group.key(it.typ, k)                                                                       offset := groupSlotsOffset + i*typ.SlotSize          group.go:285
    807            .          .           			if it.typ.IndirectKey() { 
    808            .          .           				key = *((*unsafe.Pointer)(key)) 
    809            .          .           			} 
    810            .          .            
    811            .          .           			// As below, if we have grown to a full map since Init, 
    812            .          .           			// we continue to use the old group to decide the keys 
    813            .          .           			// to return, but must look them up again in the new 
    814            .          .           			// tables. 
    815         10ms       10ms           			grown := it.m.dirLen > 0 
    816            .          .           			var elem unsafe.Pointer 
    817            .          .           			if grown { 
    818            .          .           				var ok bool 
    819            .          .           				newKey, newElem, ok := it.m.getWithKey(it.typ, key) 
    820            .          .           				if !ok { 
    821            .          .           					// See comment below. 
    822            .          .           					if it.clearSeq == it.m.clearSeq && !it.typ.Key.Equal(key, key) { 
    823            .          .           						elem = it.group.elem(it.typ, k) 
    824            .          .           						if it.typ.IndirectElem() { 
    825            .          .           							elem = *((*unsafe.Pointer)(elem)) 
    826            .          .           						} 
    827            .          .           					} else { 
    828            .          .           						continue 
    829            .          .           					} 
    830            .          .           				} else { 
    831            .          .           					key = newKey 
    832            .          .           					elem = newElem 
    833            .          .           				} 
    834            .          .           			} else { 
    835         10ms       10ms           				elem = it.group.elem(it.typ, k)                                                                               offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff group.go:292

    836            .          .           				if it.typ.IndirectElem() { 
    837            .          .           					elem = *((*unsafe.Pointer)(elem)) 
    838            .          .           				} 
    839            .          .           			} 
    840            .          .            
    841            .          .           			it.entryIdx++ 
    842            .          .           			it.key = key 
    843            .          .           			it.elem = elem 
    844         10ms       10ms           			return 
    845            .          .           		} 
    846            .          .           		it.key = nil 
    847            .          .           		it.elem = nil 
    848            .          .           		return 
    849            .          .           	} 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        30ms       30ms (flat, cum) 0.063%
    976            .          .           			it.elem = elem 
    977            .          .           			return 
    978            .          .           		} 
    979            .          .            
    980            .          .           	next: 
    981         30ms       30ms           		it.entryIdx++ 
    982            .          .            
    983            .          .           		// Slow path: use a match on the control word to jump ahead to 
    984            .          .           		// the next full slot. 
    985            .          .           		// 
    986            .          .           		// This is highly effective for maps with particularly low load 

internal/runtime/maps.(*Iter).Next

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1028            .          .           					it.entryIdx += abi.SwissMapGroupSlots - uint64(slotIdx) 
   1029            .          .           					continue 
   1030            .          .           				} 
   1031            .          .            
   1032            .          .           				i := groupMatch.first() 
   1033         10ms       10ms           				it.entryIdx += uint64(i - slotIdx) 
   1034            .          .           				if it.entryIdx > entryMask { 
   1035            .          .           					// Past the end of this table's iteration. 
   1036            .          .           					continue 
   1037            .          .           				} 
   1038            .          .           				entryIdx += uint64(i - slotIdx) 

internal/runtime/maps.(*table).rehash

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0       10ms (flat, cum) 0.021%
   1131            .          .           	// new allocation, so the existing grow support in iteration would 
   1132            .          .           	// continue to work. 
   1133            .          .            
   1134            .          .           	newCapacity := 2 * t.capacity 
   1135            .          .           	if newCapacity <= maxTableCapacity { 
   1136            .       10ms           		t.grow(typ, m, newCapacity) 
   1137            .          .           		return 
   1138            .          .           	} 
   1139            .          .            
   1140            .          .           	t.split(typ, m) 
   1141            .          .           } 

internal/runtime/maps.(*table).grow

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:           0       10ms (flat, cum) 0.021%
   1217            .          .           				elem := g.elem(typ, j) 
   1218            .          .           				if typ.IndirectElem() { 
   1219            .          .           					elem = *((*unsafe.Pointer)(elem)) 
   1220            .          .           				} 
   1221            .          .            
   1222            .       10ms           				hash := typ.Hasher(key, m.seed) 
   1223            .          .            
   1224            .          .           				newTable.uncheckedPutSlot(typ, hash, key, elem) 
   1225            .          .           			} 
   1226            .          .           		} 
   1227            .          .           	} 

internal/runtime/maps.makeProbeSeq

/usr/lib/go/src/internal/runtime/maps/table.go

  Total:        10ms       10ms (flat, cum) 0.021%
   1249            .          .           } 
   1250            .          .            
   1251            .          .           func makeProbeSeq(hash uintptr, mask uint64) probeSeq { 
   1252            .          .           	return probeSeq{ 
   1253            .          .           		mask:   mask, 
   1254         10ms       10ms           		offset: uint64(hash) & mask, 
   1255            .          .           		index:  0, 
   1256            .          .           	} 
   1257            .          .           } 
   1258            .          .            
   1259            .          .           func (s probeSeq) next() probeSeq { 

runtime.gclinkptr.ptr

/usr/lib/go/src/runtime/mcache.go

  Total:        60ms       60ms (flat, cum)  0.13%
     71            .          .            
     72            .          .           // ptr returns the *gclink form of p. 
     73            .          .           // The result should be used for accessing fields, not stored 
     74            .          .           // in other data structures. 
     75            .          .           func (p gclinkptr) ptr() *gclink { 
     76         60ms       60ms           	return (*gclink)(unsafe.Pointer(p)) 
     77            .          .           } 
     78            .          .            
     79            .          .           type stackfreelist struct { 
     80            .          .           	list gclinkptr // linked list of free stacks 
     81            .          .           	size uintptr   // total size of stacks in list 

runtime.getMCache

/usr/lib/go/src/runtime/mcache.go

  Total:       210ms      210ms (flat, cum)  0.44%
    125            .          .           // 
    126            .          .           // Returns nil if we're not bootstrapping or we don't have a P. The caller's 
    127            .          .           // P must not change, so we must be in a non-preemptible state. 
    128            .          .           func getMCache(mp *m) *mcache { 
    129            .          .           	// Grab the mcache, since that's where stats live. 
    130         50ms       50ms           	pp := mp.p.ptr() 
    131            .          .           	var c *mcache 
    132         30ms       30ms           	if pp == nil { 
    133            .          .           		// We will be called without a P while bootstrapping, 
    134            .          .           		// in which case we use mcache0, which is set in mallocinit. 
    135            .          .           		// mcache0 is cleared when bootstrapping is complete, 
    136            .          .           		// by procresize. 
    137            .          .           		c = mcache0 
    138            .          .           	} else { 
    139        130ms      130ms           		c = pp.mcache 
    140            .          .           	} 
    141            .          .           	return c 
    142            .          .           } 
    143            .          .            

runtime.(*mcache).refill

/usr/lib/go/src/runtime/mcache.go

  Total:        50ms      2.04s (flat, cum)  4.28%
    145            .          .           // have at least one free object. The current span in c must be full. 
    146            .          .           // 
    147            .          .           // Must run in a non-preemptible context since otherwise the owner of 
    148            .          .           // c could change. 
    149         10ms       10ms           func (c *mcache) refill(spc spanClass) { 
    150            .          .           	// Return the current cached span to the central lists. 
    151         10ms       10ms           	s := c.alloc[spc] 
    152            .          .            
    153            .          .           	if s.allocCount != s.nelems { 
    154            .          .           		throw("refill of span with free space remaining") 
    155            .          .           	} 
    156            .          .           	if s != &emptymspan { 
    157            .          .           		// Mark this span as no longer cached. 
    158            .          .           		if s.sweepgen != mheap_.sweepgen+3 { 
    159            .          .           			throw("bad sweepgen in refill") 
    160            .          .           		} 
    161            .      830ms           		mheap_.central[spc].mcentral.uncacheSpan(s) 
    162            .          .            
    163            .          .           		// Count up how many slots were used and record it. 
    164            .      110ms           		stats := memstats.heapStats.acquire() 
    165            .          .           		slotsUsed := int64(s.allocCount) - int64(s.allocCountBeforeCache) 
    166         10ms       10ms           		atomic.Xadd64(&stats.smallAllocCount[spc.sizeclass()], slotsUsed) 
    167            .          .            
    168            .          .           		// Flush tinyAllocs. 
    169            .          .           		if spc == tinySpanClass { 
    170            .          .           			atomic.Xadd64(&stats.tinyAllocCount, int64(c.tinyAllocs)) 
    171            .          .           			c.tinyAllocs = 0 
    172            .          .           		} 
    173            .       10ms           		memstats.heapStats.release() 
    174            .          .            
    175            .          .           		// Count the allocs in inconsistent, internal stats. 
    176            .          .           		bytesAllocated := slotsUsed * int64(s.elemsize) 
    177         20ms       20ms           		gcController.totalAlloc.Add(bytesAllocated)                                                               return Xadd64(&u.value, delta)                               types.go:344

    178            .          .            
    179            .          .           		// Clear the second allocCount just to be safe. 
    180            .          .           		s.allocCountBeforeCache = 0 
    181            .          .           	} 
    182            .          .            
    183            .          .           	// Get a new cached span from the central lists. 
    184            .      1.04s           	s = mheap_.central[spc].mcentral.cacheSpan() 
    185            .          .           	if s == nil { 
    186            .          .           		throw("out of memory") 
    187            .          .           	} 
    188            .          .            
    189            .          .           	if s.allocCount == s.nelems { 

runtime.(*mcache).refill

/usr/lib/go/src/runtime/mcache.go

  Total:           0       30ms (flat, cum) 0.063%
    209            .          .           	// We pick an overestimate here because an underestimate leads 
    210            .          .           	// the pacer to believe that it's in better shape than it is, 
    211            .          .           	// which appears to lead to more memory used. See #53738 for 
    212            .          .           	// more details. 
    213            .          .           	usedBytes := uintptr(s.allocCount) * s.elemsize 
    214            .       30ms           	gcController.update(int64(s.npages*pageSize)-int64(usedBytes), int64(c.scanAlloc)) 
    215            .          .           	c.scanAlloc = 0 
    216            .          .            
    217            .          .           	c.alloc[spc] = s 
    218            .          .           } 
    219            .          .            

runtime.(*mcache).prepareForSweep

/usr/lib/go/src/runtime/mcache.go

  Total:        60ms       60ms (flat, cum)  0.13%
    325            .          .           	// could leave allocate-black on, allow allocation to continue 
    326            .          .           	// as usual, use a ragged barrier at the beginning of sweep to 
    327            .          .           	// ensure all cached spans are swept, and then disable 
    328            .          .           	// allocate-black. However, with this approach it's difficult 
    329            .          .           	// to avoid spilling mark bits into the *next* GC cycle. 
    330         20ms       20ms           	sg := mheap_.sweepgen 
    331         10ms       10ms           	flushGen := c.flushGen.Load()                                                       return Load(&u.value)                                                types.go:194

    332         20ms       20ms           	if flushGen == sg { 
    333         10ms       10ms           		return 
    334            .          .           	} else if flushGen != sg-2 { 
    335            .          .           		println("bad flushGen", flushGen, "in prepareForSweep; sweepgen", sg) 
    336            .          .           		throw("bad flushGen") 
    337            .          .           	} 
    338            .          .           	c.releaseAll() 

git.urbach.dev/cli/q/src/token.Tokenize

/home/user/q/src/token/Tokenize.go

  Total:       370ms      1.14s (flat, cum)  2.39%
      2            .          .            
      3            .          .           // Tokenize turns the file contents into a list of tokens. 
      4            .          .           func Tokenize(buffer []byte) List { 
      5            .          .           	var ( 
      6            .          .           		i      Position 
      7            .      340ms           		tokens = make(List, 0, 8+len(buffer)/2) 
      8            .          .           	) 
      9            .          .            
     10         20ms       20ms           	for i < Position(len(buffer)) { 
     11        150ms      150ms           		switch buffer[i] { 
     12         10ms       10ms           		case ' ', '\t', '\r': 
     13            .          .           		case ',': 
     14            .          .           			tokens = append(tokens, Token{Kind: Separator, Position: i, Length: 1}) 
     15            .          .           		case '(': 
     16            .          .           			tokens = append(tokens, Token{Kind: GroupStart, Position: i, Length: 1}) 
     17            .          .           		case ')': 
     18         30ms       30ms           			tokens = append(tokens, Token{Kind: GroupEnd, Position: i, Length: 1}) 
     19            .          .           		case '{': 
     20         10ms       10ms           			tokens = append(tokens, Token{Kind: BlockStart, Position: i, Length: 1}) 
     21            .          .           		case '}': 
     22            .          .           			tokens = append(tokens, Token{Kind: BlockEnd, Position: i, Length: 1}) 
     23            .          .           		case '[': 
     24            .          .           			tokens = append(tokens, Token{Kind: ArrayStart, Position: i, Length: 1}) 
     25         20ms       20ms           		case ']': 
     26            .          .           			tokens = append(tokens, Token{Kind: ArrayEnd, Position: i, Length: 1}) 
     27            .          .           		case '\n': 
     28         30ms       30ms           			tokens = append(tokens, Token{Kind: NewLine, Position: i, Length: 1}) 
     29         10ms       10ms           		case '-': 
     30         10ms       30ms           			tokens, i = dash(tokens, buffer, i) 
     31         10ms       10ms           		case '/': 
     32            .       20ms           			tokens, i = slash(tokens, buffer, i) 
     33            .          .           			continue 
     34         10ms       10ms           		case '"', '\'': 
     35            .          .           			tokens, i = quote(tokens, buffer, i) 
     36            .          .           			continue 
     37         10ms       10ms           		case '0': 
     38            .       70ms           			tokens, i = zero(tokens, buffer, i) 
     39            .          .           			continue 
     40            .          .           		case '#': 
     41            .          .           			tokens, i = hash(tokens, buffer, i) 
     42            .          .           			continue 
     43            .          .           		default: 
     44         30ms       30ms           			if isIdentifierStart(buffer[i]) {                                                                       return isLetter(c) || c == '_'                       identifier.go:62                                return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') identifier.go:68

     45            .      250ms           				tokens, i = identifier(tokens, buffer, i) 
     46            .          .           				continue 
     47            .          .           			} 
     48            .          .            
     49            .          .           			if isDigit(buffer[i]) { 
     50            .       10ms           				tokens, i = digit(tokens, buffer, i) 
     51            .          .           				continue 
     52            .          .           			} 
     53            .          .            
     54         10ms       10ms           			if isOperator(buffer[i]) {                                                                       case '=', ':', '.', '+', '-', '*', '/', '<', '>', '&', '|', '^', '%', '!': operator.go:40

     55         10ms       70ms           				tokens, i = operator(tokens, buffer, i) 
     56            .          .           				continue 
     57            .          .           			} 
     58            .          .            
     59            .          .           			tokens = append(tokens, Token{Kind: Invalid, Position: i, Length: 1}) 
     60            .          .           		} 

runtime.memmove

/usr/lib/go/src/runtime/memmove_arm64.s

  Total:       220ms      220ms (flat, cum)  0.46%
     25            .          .           // The destination pointer is 16-byte aligned to minimize unaligned accesses. 
     26            .          .           // The loop tail is handled by always copying 64 bytes from the end. 
     27            .          .            
     28            .          .           // func memmove(to, from unsafe.Pointer, n uintptr) 
     29            .          .           TEXT runtime·memmove<ABIInternal>(SB), NOSPLIT|NOFRAME, $0-24 
     30         20ms       20ms           	CBZ	R2, copy0 
     31            .          .            
     32            .          .           	// Small copies: 1..16 bytes 
     33            .          .           	CMP	$16, R2 
     34            .          .           	BLE	copy16 
     35            .          .            
     36            .          .           	// Large copies 
     37            .          .           	CMP	$128, R2 
     38            .          .           	BHI	copy_long 
     39            .          .           	CMP	$32, R2 
     40            .          .           	BHI	copy32_128 
     41            .          .            
     42            .          .           	// Small copies: 17..32 bytes. 
     43         10ms       10ms           	LDP	(R1), (R6, R7) 
     44            .          .           	ADD	R1, R2, R4          // R4 points just past the last source byte 
     45            .          .           	LDP	-16(R4), (R12, R13) 
     46            .          .           	STP	(R6, R7), (R0) 
     47         10ms       10ms           	ADD	R0, R2, R5          // R5 points just past the last destination byte 
     48            .          .           	STP	(R12, R13), -16(R5) 
     49            .          .           	RET 
     50            .          .            
     51            .          .           // Small copies: 1..16 bytes. 
     52            .          .           copy16: 
     53            .          .           	ADD	R1, R2, R4 // R4 points just past the last source byte 
     54            .          .           	ADD	R0, R2, R5 // R5 points just past the last destination byte 
     55         10ms       10ms           	CMP	$8, R2 
     56            .          .           	BLT	copy7 
     57            .          .           	MOVD	(R1), R6 
     58         60ms       60ms           	MOVD	-8(R4), R7 
     59         20ms       20ms           	MOVD	R6, (R0) 
     60         10ms       10ms           	MOVD	R7, -8(R5) 
     61            .          .           	RET 
     62            .          .            
     63            .          .           copy7: 
     64            .          .           	TBZ	$2, R2, copy3 
     65            .          .           	MOVWU	(R1), R6 
     66         50ms       50ms           	MOVWU	-4(R4), R7 
     67         10ms       10ms           	MOVW	R6, (R0) 
     68            .          .           	MOVW	R7, -4(R5) 
     69            .          .           	RET 
     70            .          .            
     71            .          .           copy3: 
     72            .          .           	TBZ	$1, R2, copy1 
     73            .          .           	MOVHU	(R1), R6 
     74         10ms       10ms           	MOVHU	-2(R4), R7 
     75            .          .           	MOVH	R6, (R0) 
     76         10ms       10ms           	MOVH	R7, -2(R5) 
     77            .          .           	RET 
     78            .          .            
     79            .          .           copy1: 
     80            .          .           	MOVBU	(R1), R6 
     81            .          .           	MOVB	R6, (R0) 

runtime.memmove

/usr/lib/go/src/runtime/memmove_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    144            .          .           	// the dstend pointer. 
    145            .          .            
    146            .          .           backward_check: 
    147            .          .           	// Use backward copy if there is an overlap. 
    148            .          .           	SUB	R1, R0, R14 
    149         10ms       10ms           	CBZ	R14, copy0 
    150            .          .           	CMP	R2, R14 
    151            .          .           	BCC	copy_long_backward 
    152            .          .            
    153            .          .           	// Copy 16 bytes and then align src (R1) or dst (R0) to 16-byte alignment. 
    154            .          .           	LDP	(R1), (R12, R13)     // Load  A 

runtime.memmove

/usr/lib/go/src/runtime/memmove_arm64.s

  Total:       140ms      140ms (flat, cum)  0.29%
    165            .          .           	SUBS	$144, R2, R2 
    166            .          .           	BLS	copy64_from_end 
    167            .          .            
    168            .          .           loop64: 
    169            .          .           	STP	(R6, R7), 16(R3)     // Store  B 
    170         20ms       20ms           	LDP	16(R1), (R6, R7)     // Load   B (next iteration) 
    171         10ms       10ms           	STP	(R8, R9), 32(R3)     // Store   C 
    172         30ms       30ms           	LDP	32(R1), (R8, R9)     // Load    C 
    173         10ms       10ms           	STP	(R10, R11), 48(R3)   // Store    D 
    174            .          .           	LDP	48(R1), (R10, R11)   // Load     D 
    175         10ms       10ms           	STP.W	(R12, R13), 64(R3)   // Store     E 
    176         40ms       40ms           	LDP.W	64(R1), (R12, R13)   // Load      E 
    177         10ms       10ms           	SUBS	$64, R2, R2 
    178            .          .           	BHI	loop64 
    179            .          .            
    180            .          .           	// Write the last iteration and copy 64 bytes from the end. 
    181            .          .           copy64_from_end: 
    182            .          .           	LDP	-64(R4), (R14, R15)  // Load       F 
    183            .          .           	STP	(R6, R7), 16(R3)     // Store  B 
    184            .          .           	LDP	-48(R4), (R6, R7)    // Load        G 
    185            .          .           	STP	(R8, R9), 32(R3)     // Store   C 
    186            .          .           	LDP	-32(R4), (R8, R9)    // Load         H 
    187            .          .           	STP	(R10, R11), 48(R3)   // Store    D 
    188            .          .           	LDP	-16(R4), (R10, R11)  // Load          I 
    189            .          .           	STP	(R12, R13), 64(R3)   // Store     E 
    190            .          .           	STP	(R14, R15), -64(R5)  // Store      F 
    191            .          .           	STP	(R6, R7), -48(R5)    // Store       G 
    192         10ms       10ms           	STP	(R8, R9), -32(R5)    // Store        H 
    193            .          .           	STP	(R10, R11), -16(R5)  // Store         I 
    194            .          .           	RET 
    195            .          .            
    196            .          .           	// Large backward copy for overlapping copies. 
    197            .          .           	// Copy 16 bytes and then align srcend (R4) or dstend (R5) to 16-byte alignment. 

runtime.mapaccess1

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:       150ms      280ms (flat, cum)  0.59%
     35            .          .           // the key is not in the map. 
     36            .          .           // NOTE: The returned pointer may keep the whole map live, so don't 
     37            .          .           // hold onto it for very long. 
     38            .          .           // 
     39            .          .           //go:linkname runtime_mapaccess1 runtime.mapaccess1 
     40         20ms       20ms           func runtime_mapaccess1(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) unsafe.Pointer { 
     41            .          .           	if race.Enabled && m != nil { 
     42            .          .           		callerpc := sys.GetCallerPC() 
     43            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess1) 
     44            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
     45            .          .           		race.ReadObjectPC(typ.Key, key, callerpc, pc) 
     46            .          .           	} 
     47            .          .           	if msan.Enabled && m != nil { 
     48            .          .           		msan.Read(key, typ.Key.Size_) 
     49            .          .           	} 
     50            .          .           	if asan.Enabled && m != nil { 
     51            .          .           		asan.Read(key, typ.Key.Size_) 
     52            .          .           	} 
     53            .          .            
     54         20ms       20ms           	if m == nil || m.Used() == 0 {                                                       return m.used                                                        map.go:394
     55            .          .           		if err := mapKeyError(typ, key); err != nil { 
     56            .          .           			panic(err) // see issue 23734 
     57            .          .           		} 
     58            .          .           		return unsafe.Pointer(&zeroVal[0]) 
     59            .          .           	} 
     60            .          .            
     61         10ms       10ms           	if m.writing != 0 { 
     62            .          .           		fatal("concurrent map read and map write") 
     63            .          .           	} 
     64            .          .            
     65            .       30ms           	hash := typ.Hasher(key, m.seed) 
     66            .          .            
     67         10ms       10ms           	if m.dirLen <= 0 { 
     68         10ms       30ms           		_, elem, ok := m.getWithKeySmall(typ, hash, key) 
     69            .          .           		if !ok { 
     70            .          .           			return unsafe.Pointer(&zeroVal[0]) 
     71            .          .           		} 
     72            .          .           		return elem 
     73            .          .           	} 
     74            .          .            
     75            .          .           	// Select table. 
     76         10ms       10ms           	idx := m.directoryIndex(hash) 
     77            .          .           	t := m.directoryAt(idx) 
     78            .          .            
     79            .          .           	// Probe table. 
     80            .          .           	seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
     81            .          .           	for ; ; seq = seq.next() { 
     82            .          .           		g := t.groups.group(typ, seq.offset) 
     83            .          .            
     84         20ms       20ms           		match := g.ctrls().matchH2(h2(hash))                                                               return ctrlGroupMatchH2(g, h)                                group.go:154
                                                                  v := uint64(g) ^ (bitsetLSB * uint64(h))                 group.go:170                    return h & 0x7f                                              map.go:191

     85            .          .            
     86         20ms       20ms           		for match != 0 { 
     87         20ms       20ms           			i := match.first() 
     88            .          .            
     89            .          .           			slotKey := g.key(typ, i) 
     90            .          .           			slotKeyOrig := slotKey 
     91            .          .           			if typ.IndirectKey() { 
     92            .          .           				slotKey = *((*unsafe.Pointer)(slotKey)) 
     93            .          .           			} 
     94            .       80ms           			if typ.Key.Equal(key, slotKey) { 
     95            .          .           				slotElem := unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) 
     96            .          .           				if typ.IndirectElem() { 
     97            .          .           					slotElem = *((*unsafe.Pointer)(slotElem)) 
     98            .          .           				} 
     99         10ms       10ms           				return slotElem 
    100            .          .           			} 
    101            .          .           			match = match.removeFirst() 
    102            .          .           		} 
    103            .          .            
    104            .          .           		match = g.ctrls().matchEmpty() 

runtime.mapaccess2

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:        10ms       10ms (flat, cum) 0.021%
    109            .          .           		} 
    110            .          .           	} 
    111            .          .           } 
    112            .          .            
    113            .          .           //go:linkname runtime_mapaccess2 runtime.mapaccess2 
    114         10ms       10ms           func runtime_mapaccess2(typ *abi.SwissMapType, m *Map, key unsafe.Pointer) (unsafe.Pointer, bool) { 
    115            .          .           	if race.Enabled && m != nil { 
    116            .          .           		callerpc := sys.GetCallerPC() 
    117            .          .           		pc := abi.FuncPCABIInternal(runtime_mapaccess1) 
    118            .          .           		race.ReadPC(unsafe.Pointer(m), callerpc, pc) 
    119            .          .           		race.ReadObjectPC(typ.Key, key, callerpc, pc) 

runtime.mapaccess2

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:        10ms       30ms (flat, cum) 0.063%
    130            .          .           			panic(err) // see issue 23734 
    131            .          .           		} 
    132            .          .           		return unsafe.Pointer(&zeroVal[0]), false 
    133            .          .           	} 
    134            .          .            
    135         10ms       10ms           	if m.writing != 0 { 
    136            .          .           		fatal("concurrent map read and map write") 
    137            .          .           	} 
    138            .          .            
    139            .       10ms           	hash := typ.Hasher(key, m.seed) 
    140            .          .            
    141            .          .           	if m.dirLen == 0 { 
    142            .       10ms           		_, elem, ok := m.getWithKeySmall(typ, hash, key) 
    143            .          .           		if !ok { 
    144            .          .           			return unsafe.Pointer(&zeroVal[0]), false 
    145            .          .           		} 
    146            .          .           		return elem, true 
    147            .          .           	} 

runtime.mapassign

/usr/lib/go/src/internal/runtime/maps/runtime_swiss.go

  Total:       180ms      660ms (flat, cum)  1.39%
    199            .          .           		msan.Read(key, typ.Key.Size_) 
    200            .          .           	} 
    201            .          .           	if asan.Enabled { 
    202            .          .           		asan.Read(key, typ.Key.Size_) 
    203            .          .           	} 
    204         10ms       10ms           	if m.writing != 0 { 
    205            .          .           		fatal("concurrent map writes") 
    206            .          .           	} 
    207            .          .            
    208         30ms      150ms           	hash := typ.Hasher(key, m.seed) 
    209            .          .            
    210            .          .           	// Set writing after calling Hasher, since Hasher may panic, in which 
    211            .          .           	// case we have not actually done a write. 
    212            .          .           	m.writing ^= 1 // toggle, see comment on writing 
    213            .          .            
    214         20ms       20ms           	if m.dirPtr == nil { 
    215            .      200ms           		m.growToSmall(typ) 
    216            .          .           	} 
    217            .          .            
    218            .          .           	if m.dirLen == 0 { 
    219            .          .           		if m.used < abi.SwissMapGroupSlots { 
    220            .       90ms           			elem := m.putSlotSmall(typ, hash, key) 
    221            .          .            
    222            .          .           			if m.writing == 0 { 
    223            .          .           				fatal("concurrent map writes") 
    224            .          .           			} 
    225            .          .           			m.writing ^= 1 
    226            .          .            
    227            .          .           			return elem 
    228            .          .           		} 
    229            .          .            
    230            .          .           		// Can't fit another entry, grow to full size map. 
    231            .       20ms           		m.growToTable(typ) 
    232            .          .           	} 
    233            .          .            
    234            .          .           	var slotElem unsafe.Pointer 
    235            .          .           outer: 
    236            .          .           	for { 
    237            .          .           		// Select table. 
    238            .          .           		idx := m.directoryIndex(hash) 
    239            .          .           		t := m.directoryAt(idx) 
    240            .          .            
    241            .          .           		seq := makeProbeSeq(h1(hash), t.groups.lengthMask) 
    242            .          .            
    243            .          .           		// As we look for a match, keep track of the first deleted slot 
    244            .          .           		// we find, which we'll use to insert the new entry if 
    245            .          .           		// necessary. 
    246            .          .           		var firstDeletedGroup groupReference 
    247            .          .           		var firstDeletedSlot uintptr 
    248            .          .            
    249            .          .           		for ; ; seq = seq.next() { 
    250         20ms       20ms           			g := t.groups.group(typ, seq.offset)                                                                       offset := uintptr(i) * typ.GroupSize                 group.go:325

    251         30ms       30ms           			match := g.ctrls().matchH2(h2(hash))                                                                       return ctrlGroupMatchH2(g, h)                        group.go:154
                                                                          v := uint64(g) ^ (bitsetLSB * uint64(h))         group.go:170

    252            .          .            
    253            .          .           			// Look for an existing slot containing this key. 
    254            .          .           			for match != 0 { 
    255            .          .           				i := match.first() 
    256            .          .            
    257            .          .           				slotKey := g.key(typ, i) 
    258            .          .           				slotKeyOrig := slotKey 
    259            .          .           				if typ.IndirectKey() { 
    260            .          .           					slotKey = *((*unsafe.Pointer)(slotKey)) 
    261            .          .           				} 
    262         10ms       10ms           				if typ.Key.Equal(key, slotKey) { 
    263            .          .           					if typ.NeedKeyUpdate() { 
    264            .          .           						typedmemmove(typ.Key, slotKey, key) 
    265            .          .           					} 
    266            .          .            
    267            .          .           					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) 
    268            .          .           					if typ.IndirectElem() { 
    269            .          .           						slotElem = *((*unsafe.Pointer)(slotElem)) 
    270            .          .           					} 
    271            .          .            
    272            .          .           					t.checkInvariants(typ, m) 
    273            .          .           					break outer 
    274            .          .           				} 
    275            .          .           				match = match.removeFirst() 
    276            .          .           			} 
    277            .          .            
    278            .          .           			// No existing slot for this key in this group. Is this the end 
    279            .          .           			// of the probe sequence? 
    280         20ms       20ms           			match = g.ctrls().matchEmpty()                                                                       return (*ctrlGroup)(g.data)                          group.go:280                            return ctrlGroupMatchEmpty(g)                        group.go:176
                                                                          return bitset((v &^ (v << 6)) & bitsetMSB)       group.go:191

    281            .          .           			if match != 0 { 
    282            .          .           				// Finding an empty slot means we've reached the end of 
    283            .          .           				// the probe sequence. 
    284            .          .            
    285            .          .           				var i uintptr 
    286            .          .            
    287            .          .           				// If we found a deleted slot along the way, we 
    288            .          .           				// can replace it without consuming growthLeft. 
    289            .          .           				if firstDeletedGroup.data != nil { 
    290            .          .           					g = firstDeletedGroup 
    291         20ms       20ms           					i = firstDeletedSlot 
    292            .          .           					t.growthLeft++ // will be decremented below to become a no-op. 
    293            .          .           				} else { 
    294            .          .           					// Otherwise, use the empty slot. 
    295         10ms       10ms           					i = match.first()                                                                                       return bitsetFirst(b)                group.go:50
                                                                                          return uintptr(sys.TrailingZeros64(uint64(b))) >> 3 group.go:58

    296            .          .           				} 
    297            .          .            
    298            .          .           				// If there is room left to grow, just insert the new entry. 
    299            .          .           				if t.growthLeft > 0 { 
    300            .          .           					slotKey := g.key(typ, i) 
    301            .          .           					slotKeyOrig := slotKey 
    302            .          .           					if typ.IndirectKey() { 
    303            .          .           						kmem := newobject(typ.Key) 
    304            .          .           						*(*unsafe.Pointer)(slotKey) = kmem 
    305            .          .           						slotKey = kmem 
    306            .          .           					} 
    307            .       40ms           					typedmemmove(typ.Key, slotKey, key) 
    308            .          .            
    309            .          .           					slotElem = unsafe.Pointer(uintptr(slotKeyOrig) + typ.ElemOff) 
    310         10ms       10ms           					if typ.IndirectElem() {                                                                                       return mt.Flags&SwissMapIndirectElem != 0 map_swiss.go:63

    311            .          .           						emem := newobject(typ.Elem) 
    312            .          .           						*(*unsafe.Pointer)(slotElem) = emem 
    313            .          .           						slotElem = emem 
    314            .          .           					} 
    315            .          .            
    316            .          .           					g.ctrls().set(i, ctrl(h2(hash))) 
    317            .          .           					t.growthLeft-- 
    318            .          .           					t.used++ 
    319            .          .           					m.used++ 
    320            .          .            
    321            .          .           					t.checkInvariants(typ, m) 
    322            .          .           					break outer 
    323            .          .           				} 
    324            .          .            
    325            .       10ms           				t.rehash(typ, m) 
    326            .          .           				continue outer 
    327            .          .           			} 
    328            .          .            
    329            .          .           			// No empty slots in this group. Check for a deleted 
    330            .          .           			// slot, which we'll use if we don't find a match later 

internal/runtime/maps.bitset.first

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
     45            .          .           // first returns the relative index of the first control byte in the group that 
     46            .          .           // is in the set. 
     47            .          .           // 
     48            .          .           // Preconditions: b is not 0 (empty). 
     49            .          .           func (b bitset) first() uintptr { 
     50         30ms       30ms           	return bitsetFirst(b)                                                       return uintptr(sys.TrailingZeros64(uint64(b))) >> 3                  group.go:58
                                     ⋮
                                     ⋮

     51            .          .           } 
     52            .          .            
     53            .          .           // Portable implementation of first. 

internal/runtime/maps.bitsetFirst

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        30ms       30ms (flat, cum) 0.063%
     55            .          .           // On AMD64, this is replaced with an intrisic that simply does 
     56            .          .           // TrailingZeros64. There is no need to shift as the bitset is packed. 
     57            .          .           func bitsetFirst(b bitset) uintptr { 
     58         30ms       30ms           	return uintptr(sys.TrailingZeros64(uint64(b))) >> 3 
     59            .          .           } 
     60            .          .            
     61            .          .           // removeFirst clears the first set bit (that is, resets the least significant 
     62            .          .           // set bit to 0). 
     63            .          .           func (b bitset) removeFirst() bitset { 

internal/runtime/maps.(*ctrlGroup).get

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        20ms       20ms (flat, cum) 0.042%
    129            .          .           // get returns the i-th control byte. 
    130            .          .           func (g *ctrlGroup) get(i uintptr) ctrl { 
    131            .          .           	if goarch.BigEndian { 
    132            .          .           		return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), 7-i)) 
    133            .          .           	} 
    134         20ms       20ms           	return *(*ctrl)(unsafe.Add(unsafe.Pointer(g), i)) 
    135            .          .           } 
    136            .          .            
    137            .          .           // set sets the i-th control byte. 
    138            .          .           func (g *ctrlGroup) set(i uintptr, c ctrl) { 

internal/runtime/maps.(*ctrlGroup).set

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    139            .          .           	if goarch.BigEndian { 
    140            .          .           		*(*ctrl)(unsafe.Add(unsafe.Pointer(g), 7-i)) = c 
    141            .          .           		return 
    142            .          .           	} 
    143         10ms       10ms           	*(*ctrl)(unsafe.Add(unsafe.Pointer(g), i)) = c 
    144            .          .           } 
    145            .          .            

internal/runtime/maps.(*ctrlGroup).setEmpty

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    146            .          .           // setEmpty sets all the control bytes to empty. 
    147            .          .           func (g *ctrlGroup) setEmpty() { 
    148         10ms       10ms           	*g = ctrlGroup(bitsetEmpty) 
    149            .          .           } 
    150            .          .            

internal/runtime/maps.ctrlGroup.matchH2

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        60ms       60ms (flat, cum)  0.13%
    152            .          .           // matches the given value. May return false positives. 
    153            .          .           func (g ctrlGroup) matchH2(h uintptr) bitset { 
    154         60ms       60ms           	return ctrlGroupMatchH2(g, h)                                                       v := uint64(g) ^ (bitsetLSB * uint64(h))                             group.go:170
                                     ⋮
                                     ⋮
                                     ⋮

    155            .          .           } 
    156            .          .            
    157            .          .           // Portable implementation of matchH2. 
    158            .          .           // 
    159            .          .           // Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See 

internal/runtime/maps.ctrlGroupMatchH2

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        60ms       60ms (flat, cum)  0.13%
    165            .          .           	// subtract off 0x0101 the first 2 bytes we'll become 0xffff and both be 
    166            .          .           	// considered matches of h. The false positive matches are not a problem, 
    167            .          .           	// just a rare inefficiency. Note that they only occur if there is a real 
    168            .          .           	// match and never occur on ctrlEmpty, or ctrlDeleted. The subsequent key 
    169            .          .           	// comparisons ensure that there is no correctness issue. 
    170         60ms       60ms           	v := uint64(g) ^ (bitsetLSB * uint64(h)) 
    171            .          .           	return bitset(((v - bitsetLSB) &^ v) & bitsetMSB) 
    172            .          .           } 

internal/runtime/maps.ctrlGroup.matchEmpty

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    174            .          .           // matchEmpty returns the set of slots in the group that are empty. 
    175            .          .           func (g ctrlGroup) matchEmpty() bitset { 
    176         10ms       10ms           	return ctrlGroupMatchEmpty(g)                                                       return bitset((v &^ (v << 6)) & bitsetMSB)                           group.go:191

    177            .          .           } 
    178            .          .            
    179            .          .           // Portable implementation of matchEmpty. 
    180            .          .           // 
    181            .          .           // Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See 

internal/runtime/maps.ctrlGroupMatchEmpty

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    186            .          .           	// A full slot is     0??? ???? 
    187            .          .           	// 
    188            .          .           	// A slot is empty iff bit 7 is set and bit 1 is not. We could select any 
    189            .          .           	// of the other bits here (e.g. v << 1 would also work). 
    190            .          .           	v := uint64(g) 
    191         10ms       10ms           	return bitset((v &^ (v << 6)) & bitsetMSB) 
    192            .          .           } 
    193            .          .            

internal/runtime/maps.ctrlGroup.matchEmptyOrDeleted

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    195            .          .           // deleted. 
    196            .          .           func (g ctrlGroup) matchEmptyOrDeleted() bitset { 
    197         10ms       10ms           	return ctrlGroupMatchEmptyOrDeleted(g) 
    198            .          .           } 
    199            .          .            
    200            .          .           // Portable implementation of matchEmptyOrDeleted. 
    201            .          .           // 
    202            .          .           // Note: On AMD64, this is an intrinsic implemented with SIMD instructions. See 

internal/runtime/maps.alignUpPow2

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        10ms       10ms (flat, cum) 0.021%
    266            .          .           // Returns true if round up causes overflow. 
    267            .          .           func alignUpPow2(n uint64) (uint64, bool) { 
    268            .          .           	if n == 0 { 
    269            .          .           		return 0, false 
    270            .          .           	} 
    271         10ms       10ms           	v := (uint64(1) << sys.Len64(n-1)) 
    272            .          .           	if v == 0 { 
    273            .          .           		return 0, true 
    274            .          .           	} 
    275            .          .           	return v, false 

internal/runtime/maps.(*groupReference).ctrls

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        50ms       50ms (flat, cum)   0.1%
    276            .          .           } 
    277            .          .            
    278            .          .           // ctrls returns the group control word. 
    279            .          .           func (g *groupReference) ctrls() *ctrlGroup { 
    280         50ms       50ms           	return (*ctrlGroup)(g.data) 
    281            .          .           } 
    282            .          .            

internal/runtime/maps.(*groupReference).key

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        40ms       40ms (flat, cum) 0.084%
    283            .          .           // key returns a pointer to the key at index i. 
    284            .          .           func (g *groupReference) key(typ *abi.SwissMapType, i uintptr) unsafe.Pointer { 
    285         40ms       40ms           	offset := groupSlotsOffset + i*typ.SlotSize 
    286            .          .            
    287            .          .           	return unsafe.Pointer(uintptr(g.data) + offset) 
    288            .          .           } 

internal/runtime/maps.(*groupReference).elem

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        50ms       50ms (flat, cum)   0.1%
    289            .          .            
    290            .          .           // elem returns a pointer to the element at index i. 
    291            .          .           func (g *groupReference) elem(typ *abi.SwissMapType, i uintptr) unsafe.Pointer { 
    292         50ms       50ms           	offset := groupSlotsOffset + i*typ.SlotSize + typ.ElemOff 
    293            .          .            
    294            .          .           	return unsafe.Pointer(uintptr(g.data) + offset) 
    295            .          .           } 
    296            .          .            
    297            .          .           // groupsReference is a wrapper type describing an array of groups stored at 

internal/runtime/maps.newGroups

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:           0      970ms (flat, cum)  2.04%
    311            .          .           // 
    312            .          .           // Length must be a power of two. 
    313            .          .           func newGroups(typ *abi.SwissMapType, length uint64) groupsReference { 
    314            .          .           	return groupsReference{ 
    315            .          .           		// TODO: make the length type the same throughout. 
    316            .      970ms           		data:       newarray(typ.Group, int(length)), 
    317            .          .           		lengthMask: length - 1, 
    318            .          .           	} 
    319            .          .           } 
    320            .          .            

internal/runtime/maps.(*groupsReference).group

/usr/lib/go/src/internal/runtime/maps/group.go

  Total:        40ms       40ms (flat, cum) 0.084%
    321            .          .           // group returns the group at index i. 
    322            .          .           func (g *groupsReference) group(typ *abi.SwissMapType, i uint64) groupReference { 
    323            .          .           	// TODO(prattmic): Do something here about truncation on cast to 
    324            .          .           	// uintptr on 32-bit systems? 
    325         20ms       20ms           	offset := uintptr(i) * typ.GroupSize 
    326            .          .            
    327            .          .           	return groupReference{ 
    328         20ms       20ms           		data: unsafe.Pointer(uintptr(g.data) + offset), 
    329            .          .           	} 
    330            .          .           } 
    331            .          .            
    332            .          .           func cloneGroup(typ *abi.SwissMapType, newGroup, oldGroup groupReference) { 
    333            .          .           	typedmemmove(typ.Group, newGroup.data, oldGroup.data) 

runtime.acquirem

/usr/lib/go/src/runtime/runtime1.go

  Total:       190ms      190ms (flat, cum)   0.4%
    625            .          .           // Helpers for Go. Must be NOSPLIT, must only call NOSPLIT functions, and must not block. 
    626            .          .            
    627            .          .           //go:nosplit 
    628            .          .           func acquirem() *m { 
    629            .          .           	gp := getg() 
    630        120ms      120ms           	gp.m.locks++ 
    631         70ms       70ms           	return gp.m 
    632            .          .           } 
    633            .          .            

runtime.releasem

/usr/lib/go/src/runtime/runtime1.go

  Total:       140ms      140ms (flat, cum)  0.29%
    635            .          .           func releasem(mp *m) { 
    636            .          .           	gp := getg() 
    637         60ms       60ms           	mp.locks-- 
    638         80ms       80ms           	if mp.locks == 0 && gp.preempt { 
    639            .          .           		// restore the preemption request in case we've cleared it in newstack 
    640            .          .           		gp.stackguard0 = stackPreempt 
    641            .          .           	} 
    642            .          .           } 
    643            .          .            

runtime.interhash

/usr/lib/go/src/runtime/alg.go

  Total:       150ms      170ms (flat, cum)  0.36%
    128            .          .           func c128hash(p unsafe.Pointer, h uintptr) uintptr { 
    129            .          .           	x := (*[2]float64)(p) 
    130            .          .           	return f64hash(unsafe.Pointer(&x[1]), f64hash(unsafe.Pointer(&x[0]), h)) 
    131            .          .           } 
    132            .          .            
    133         30ms       30ms           func interhash(p unsafe.Pointer, h uintptr) uintptr { 
    134            .          .           	a := (*iface)(p) 
    135            .          .           	tab := a.tab 
    136            .          .           	if tab == nil { 
    137            .          .           		return h 
    138            .          .           	} 
    139            .          .           	t := tab.Type 
    140        100ms      100ms           	if t.Equal == nil { 
    141            .          .           		// Check hashability here. We could do this check inside 
    142            .          .           		// typehash, but we want to report the topmost type in 
    143            .          .           		// the error text (e.g. in a struct with a field of slice type 
    144            .          .           		// we want to report the struct, not the slice). 
    145            .          .           		panic(errorString("hash of unhashable type " + toRType(t).string())) 
    146            .          .           	} 
    147         10ms       10ms           	if isDirectIface(t) {                                                       return t.Kind_&abi.KindDirectIface != 0                              typekind.go:11

    148         10ms       30ms           		return c1 * typehash(t, unsafe.Pointer(&a.data), h^c0) 
    149            .          .           	} else { 
    150            .          .           		return c1 * typehash(t, a.data, h^c0) 
    151            .          .           	} 
    152            .          .           } 
    153            .          .            

runtime.typehash

/usr/lib/go/src/runtime/alg.go

  Total:        20ms       20ms (flat, cum) 0.042%
    197            .          .           // 
    198            .          .           // Do not remove or change the type signature. 
    199            .          .           // See go.dev/issue/67401. 
    200            .          .           // 
    201            .          .           //go:linkname typehash 
    202         20ms       20ms           func typehash(t *_type, p unsafe.Pointer, h uintptr) uintptr { 
    203            .          .           	if t.TFlag&abi.TFlagRegularMemory != 0 { 
    204            .          .           		// Handle ptr sizes specially, see issue 37086. 
    205            .          .           		switch t.Size_ { 
    206            .          .           		case 4: 
    207            .          .           			return memhash32(p, h) 

runtime.interequal

/usr/lib/go/src/runtime/alg.go

  Total:       100ms      110ms (flat, cum)  0.23%
    286            .          .           	return *(*complex128)(p) == *(*complex128)(q) 
    287            .          .           } 
    288            .          .           func strequal(p, q unsafe.Pointer) bool { 
    289            .          .           	return *(*string)(p) == *(*string)(q) 
    290            .          .           } 
    291         30ms       30ms           func interequal(p, q unsafe.Pointer) bool { 
    292            .          .           	x := *(*iface)(p) 
    293            .          .           	y := *(*iface)(q) 
    294         70ms       80ms           	return x.tab == y.tab && ifaceeq(x.tab, x.data, y.data) 
    295            .          .           } 
    296            .          .           func nilinterequal(p, q unsafe.Pointer) bool { 
    297            .          .           	x := *(*eface)(p) 
    298            .          .           	y := *(*eface)(q) 
    299            .          .           	return x._type == y._type && efaceeq(x._type, x.data, y.data) 

runtime.ifaceeq

/usr/lib/go/src/runtime/alg.go

  Total:        50ms       50ms (flat, cum)   0.1%
    312            .          .           		// Ptrs, chans, and single-element items can be compared directly using ==. 
    313            .          .           		return x == y 
    314            .          .           	} 
    315            .          .           	return eq(x, y) 
    316            .          .           } 
    317         30ms       30ms           func ifaceeq(tab *itab, x, y unsafe.Pointer) bool { 
    318            .          .           	if tab == nil { 
    319            .          .           		return true 
    320            .          .           	} 
    321            .          .           	t := tab.Type 
    322            .          .           	eq := t.Equal 
    323         10ms       10ms           	if eq == nil { 
    324            .          .           		panic(errorString("comparing uncomparable type " + toRType(t).string())) 
    325            .          .           	} 
    326            .          .           	if isDirectIface(t) { 
    327            .          .           		// See comment in efaceeq. 
    328         10ms       10ms           		return x == y 
    329            .          .           	} 
    330            .          .           	return eq(x, y) 
    331            .          .           } 
    332            .          .            
    333            .          .           // Testing adapters for hash quality tests (see hash_test.go) 

runtime.(*spanSet).push

/usr/lib/go/src/runtime/mspanset.go

  Total:       140ms      660ms (flat, cum)  1.39%
     78            .          .           	spans [spanSetBlockEntries]atomicMSpanPointer 
     79            .          .           } 
     80            .          .            
     81            .          .           // push adds span s to buffer b. push is safe to call concurrently 
     82            .          .           // with other push and pop operations. 
     83            .       70ms           func (b *spanSet) push(s *mspan) { 
     84            .          .           	// Obtain our slot. 
     85         20ms      470ms           	cursor := uintptr(b.index.incTail().tail() - 1) 
     86        110ms      110ms           	top, bottom := cursor/spanSetBlockEntries, cursor%spanSetBlockEntries 
     87            .          .            
     88            .          .           	// Do we need to add a block? 
     89         10ms       10ms           	spineLen := b.spineLen.Load()                                                       return Loaduintptr(&u.value)                                         types.go:359

     90            .          .           	var block *spanSetBlock 
     91            .          .           retry: 
     92            .          .           	if top < spineLen { 
     93            .          .           		block = b.spine.Load().lookup(top).Load() 
     94            .          .           	} else { 

runtime.(*spanSet).push

/usr/lib/go/src/runtime/mspanset.go

  Total:        60ms       60ms (flat, cum)  0.13%
    140            .          .           		unlock(&b.spineLock) 
    141            .          .           	} 
    142            .          .            
    143            .          .           	// We have a block. Insert the span atomically, since there may be 
    144            .          .           	// concurrent readers via the block API. 
    145         60ms       60ms           	block.spans[bottom].StoreNoWB(s) 
    146            .          .           } 
    147            .          .            
    148            .          .           // pop removes and returns a span from buffer b, or nil if b is empty. 
    149            .          .           // pop is safe to call concurrently with other pop and push operations. 
    150            .          .           func (b *spanSet) pop() *mspan { 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        20ms       20ms (flat, cum) 0.042%
    172            .          .           		// Try to claim the current head by CASing in an updated head. 
    173            .          .           		// This may fail transiently due to a push which modifies the 
    174            .          .           		// tail, so keep trying while the head isn't changing. 
    175            .          .           		want := head 
    176            .          .           		for want == head { 
    177         20ms       20ms           			if b.index.cas(headtail, makeHeadTailIndex(want+1, tail)) {                                                                       return h.u.CompareAndSwap(uint64(old), uint64(new))  mspanset.go:371
                                                                          return Cas64(&u.value, old, new)                 types.go:325

    178            .          .           				break claimLoop 
    179            .          .           			} 
    180            .          .           			headtail = b.index.load() 
    181            .          .           			head, tail = headtail.split() 
    182            .          .           		} 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        10ms       10ms (flat, cum) 0.021%
    193            .          .            
    194            .          .           	// Given that the spine length is correct, we know we will never 
    195            .          .           	// see a nil block here, since the length is always updated after 
    196            .          .           	// the block is set. 
    197            .          .           	block := blockp.Load() 
    198         10ms       10ms           	s := block.spans[bottom].Load() 
    199            .          .           	for s == nil { 
    200            .          .           		// We raced with the span actually being set, but given that we 
    201            .          .           		// know a block for this span exists, the race window here is 
    202            .          .           		// extremely small. Try again. 
    203            .          .           		s = block.spans[bottom].Load() 

runtime.(*spanSet).pop

/usr/lib/go/src/runtime/mspanset.go

  Total:        60ms       60ms (flat, cum)  0.13%
    217            .          .           	// popping its corresponding mspan) by the time we get here. Because 
    218            .          .           	// we're the last popper, we also don't have to worry about concurrent 
    219            .          .           	// pushers (there can't be any). Note that we may not be the popper 
    220            .          .           	// which claimed the last slot in the block, we're just the last one 
    221            .          .           	// to finish popping. 
    222         60ms       60ms           	if block.popped.Add(1) == spanSetBlockEntries {                                                       return Xadd(&u.value, delta)                                         types.go:291

    223            .          .           		// Clear the block's pointer. 
    224            .          .           		blockp.StoreNoWB(nil) 
    225            .          .            
    226            .          .           		// Return the block to the block pool. 
    227            .          .           		spanSetBlockPool.free(block) 

runtime.(*atomicHeadTailIndex).cas

/usr/lib/go/src/runtime/mspanset.go

  Total:        20ms       20ms (flat, cum) 0.042%
    366            .          .           	return headTailIndex(h.u.Load()) 
    367            .          .           } 
    368            .          .            
    369            .          .           // cas atomically compares-and-swaps a headTailIndex value. 
    370            .          .           func (h *atomicHeadTailIndex) cas(old, new headTailIndex) bool { 
    371         20ms       20ms           	return h.u.CompareAndSwap(uint64(old), uint64(new))                                                       return Cas64(&u.value, old, new)                                     types.go:325

    372            .          .           } 
    373            .          .            
    374            .          .           // incHead atomically increments the head of a headTailIndex. 
    375            .          .           func (h *atomicHeadTailIndex) incHead() headTailIndex { 
    376            .          .           	return headTailIndex(h.u.Add(1 << 32)) 

runtime.(*atomicHeadTailIndex).incTail

/usr/lib/go/src/runtime/mspanset.go

  Total:        30ms      450ms (flat, cum)  0.94%
    380            .          .           func (h *atomicHeadTailIndex) decHead() headTailIndex { 
    381            .          .           	return headTailIndex(h.u.Add(-(1 << 32))) 
    382            .          .           } 
    383            .          .            
    384            .          .           // incTail atomically increments the tail of a headTailIndex. 
    385            .      420ms           func (h *atomicHeadTailIndex) incTail() headTailIndex { 
    386         30ms       30ms           	ht := headTailIndex(h.u.Add(1))                                                       return Xadd64(&u.value, delta)                                       types.go:344

    387            .          .           	// Check for overflow. 
    388            .          .           	if ht.tail() == 0 { 
    389            .          .           		print("runtime: head = ", ht.head(), ", tail = ", ht.tail(), "\n") 
    390            .          .           		throw("headTailIndex overflow") 
    391            .          .           	} 

runtime.gogo

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    186            .          .           // void gogo(Gobuf*) 
    187            .          .           // restore state from Gobuf; longjmp 
    188            .          .           TEXT runtime·gogo(SB), NOSPLIT|NOFRAME, $0-8 
    189            .          .           	MOVD	buf+0(FP), R5 
    190            .          .           	MOVD	gobuf_g(R5), R6 
    191         10ms       10ms           	MOVD	0(R6), R4	// make sure g != nil 
    192            .          .           	B	gogo<>(SB) 
    193            .          .            

gogo

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       30ms (flat, cum) 0.063%
    194            .          .           TEXT gogo<>(SB), NOSPLIT|NOFRAME, $0 
    195            .          .           	MOVD	R6, g 
    196            .       20ms           	BL	runtime·save_g(SB) 
    197            .          .            
    198            .          .           	MOVD	gobuf_sp(R5), R0 
    199            .          .           	MOVD	R0, RSP 
    200            .          .           	MOVD	gobuf_bp(R5), R29 
    201         10ms       10ms           	MOVD	gobuf_lr(R5), LR 
    202            .          .           	MOVD	gobuf_ctxt(R5), R26 
    203            .          .           	MOVD	$0, gobuf_sp(R5) 
    204            .          .           	MOVD	$0, gobuf_bp(R5) 
    205            .          .           	MOVD	$0, gobuf_lr(R5) 
    206            .          .           	MOVD	$0, gobuf_ctxt(R5) 

runtime.mcall

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        40ms      7.12s (flat, cum) 14.95%
    230            .          .           	CMP	g, R3 
    231            .          .           	BNE	2(PC) 
    232            .          .           	B	runtime·badmcall(SB) 
    233            .          .            
    234            .          .           	MOVD	(g_sched+gobuf_sp)(g), R0 
    235         10ms       10ms           	MOVD	R0, RSP	// sp = m->g0->sched.sp 
    236            .          .           	MOVD	$0, R29				// clear frame pointer, as caller may execute on another M 
    237            .          .           	MOVD	R3, R0				// arg = g 
    238         10ms       10ms           	MOVD	$0, -16(RSP)			// dummy LR 
    239         20ms       20ms           	SUB	$16, RSP 
    240            .          .           	MOVD	0(R26), R4			// code pointer 
    241            .      7.08s           	BL	(R4) 
    242            .          .           	B	runtime·badmcall2(SB) 
    243            .          .            
    244            .          .           // systemstack_switch is a dummy routine that systemstack leaves at the bottom 
    245            .          .           // of the G stack. We need to distinguish the routine that 
    246            .          .           // lives at the bottom of the G stack from the one that lives 

runtime.systemstack

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        60ms      3.41s (flat, cum)  7.16%
    252            .          .           	RET 
    253            .          .            
    254            .          .           // func systemstack(fn func()) 
    255            .          .           TEXT runtime·systemstack(SB), NOSPLIT, $0-8 
    256            .          .           	MOVD	fn+0(FP), R3	// R3 = fn 
    257         20ms       20ms           	MOVD	R3, R26		// context 
    258            .          .           	MOVD	g_m(g), R4	// R4 = m 
    259            .          .            
    260            .          .           	MOVD	m_gsignal(R4), R5	// R5 = gsignal 
    261            .          .           	CMP	g, R5 
    262            .          .           	BEQ	noswitch 
    263            .          .            
    264         10ms       10ms           	MOVD	m_g0(R4), R5	// R5 = g0 
    265            .          .           	CMP	g, R5 
    266            .          .           	BEQ	noswitch 
    267            .          .            
    268         10ms       10ms           	MOVD	m_curg(R4), R6 
    269            .          .           	CMP	g, R6 
    270            .          .           	BEQ	switch 
    271            .          .            
    272            .          .           	// Bad: g is not gsignal, not g0, not curg. What is it? 
    273            .          .           	// Hide call from linker nosplit analysis. 
    274            .          .           	MOVD	$runtime·badsystemstack(SB), R3 
    275            .          .           	BL	(R3) 
    276            .          .           	B	runtime·abort(SB) 
    277            .          .            
    278            .          .           switch: 
    279            .          .           	// Switch stacks. 
    280            .          .           	// The original frame pointer is stored in R29, 
    281            .          .           	// which is useful for stack unwinding. 
    282            .          .           	// Save our state in g->sched. Pretend to 
    283            .          .           	// be systemstack_switch if the G stack is scanned. 
    284            .          .           	BL	gosave_systemstack_switch<>(SB) 
    285            .          .            
    286            .          .           	// switch to g0 
    287            .          .           	MOVD	R5, g 
    288         10ms       10ms           	BL	runtime·save_g(SB) 
    289            .          .           	MOVD	(g_sched+gobuf_sp)(g), R3 
    290            .          .           	MOVD	R3, RSP 
    291            .          .            
    292            .          .           	// call target function 
    293            .          .           	MOVD	0(R26), R3	// code pointer 
    294            .      3.35s           	BL	(R3) 
    295            .          .            
    296            .          .           	// switch back to g 
    297            .          .           	MOVD	g_m(g), R3 
    298            .          .           	MOVD	m_curg(R3), g 
    299         10ms       10ms           	BL	runtime·save_g(SB) 
    300            .          .           	MOVD	(g_sched+gobuf_sp)(g), R0 
    301            .          .           	MOVD	R0, RSP 
    302            .          .           	MOVD	(g_sched+gobuf_bp)(g), R29 
    303            .          .           	MOVD	$0, (g_sched+gobuf_sp)(g) 
    304            .          .           	MOVD	$0, (g_sched+gobuf_bp)(g) 

runtime.morestack

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    367            .          .           	BNE	3(PC) 
    368            .          .           	BL	runtime·badmorestackg0(SB) 
    369            .          .           	B	runtime·abort(SB) 
    370            .          .            
    371            .          .           	// Cannot grow signal stack (m->gsignal). 
    372         10ms       10ms           	MOVD	m_gsignal(R8), R4 
    373            .          .           	CMP	g, R4 
    374            .          .           	BNE	3(PC) 
    375            .          .           	BL	runtime·badmorestackgsignal(SB) 
    376            .          .           	B	runtime·abort(SB) 
    377            .          .            

runtime.memhash64

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    605            .          .           TEXT runtime·memhash64<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24 
    606            .          .           	MOVB	runtime·useAeshash(SB), R10 
    607            .          .           	CBZ	R10, noaes 
    608            .          .           	MOVD	$runtime·aeskeysched+0(SB), R3 
    609            .          .            
    610         10ms       10ms           	VEOR	V0.B16, V0.B16, V0.B16 
    611            .          .           	VLD1	(R3), [V2.B16] 
    612            .          .           	VLD1	(R0), V0.D[1] 
    613            .          .           	VMOV	R1, V0.D[0] 
    614            .          .            
    615            .          .           	AESE	V2.B16, V0.B16 

runtime.strhash

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        20ms       20ms (flat, cum) 0.042%
    631            .          .           noaes: 
    632            .          .           	B	runtime·memhashFallback<ABIInternal>(SB) 
    633            .          .            
    634            .          .           // func strhash(p unsafe.Pointer, h uintptr) uintptr 
    635            .          .           TEXT runtime·strhash<ABIInternal>(SB),NOSPLIT|NOFRAME,$0-24 
    636         20ms       20ms           	MOVB	runtime·useAeshash(SB), R10 
    637            .          .           	CBZ	R10, noaes 
    638            .          .           	LDP	(R0), (R0, R2)	// string data / length 
    639            .          .           	B	aeshashbody<>(SB) 
    640            .          .           noaes: 
    641            .          .           	B	runtime·strhashFallback<ABIInternal>(SB) 

aeshashbody

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        50ms       50ms (flat, cum)   0.1%
    643            .          .           // R0: data 
    644            .          .           // R1: seed data 
    645            .          .           // R2: length 
    646            .          .           // At return, R0 = return value 
    647            .          .           TEXT aeshashbody<>(SB),NOSPLIT|NOFRAME,$0 
    648         50ms       50ms           	VEOR	V30.B16, V30.B16, V30.B16 
    649            .          .           	VMOV	R1, V30.D[0] 
    650            .          .           	VMOV	R2, V30.D[1] // load length into seed 
    651            .          .            
    652            .          .           	MOVD	$runtime·aeskeysched+0(SB), R4 
    653            .          .           	VLD1.P	16(R4), [V0.B16] 

aeshashbody

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        90ms       90ms (flat, cum)  0.19%
    666            .          .            
    667            .          .           aes0to15: 
    668            .          .           	CBZ	R2, aes0 
    669            .          .           	VEOR	V2.B16, V2.B16, V2.B16 
    670            .          .           	TBZ	$3, R2, less_than_8 
    671         10ms       10ms           	VLD1.P	8(R0), V2.D[0] 
    672            .          .            
    673            .          .           less_than_8: 
    674         20ms       20ms           	TBZ	$2, R2, less_than_4 
    675            .          .           	VLD1.P	4(R0), V2.S[2] 
    676            .          .            
    677            .          .           less_than_4: 
    678         60ms       60ms           	TBZ	$1, R2, less_than_2 
    679            .          .           	VLD1.P	2(R0), V2.H[6] 
    680            .          .            
    681            .          .           less_than_2: 
    682            .          .           	TBZ	$0, R2, done 
    683            .          .           	VLD1	(R0), V2.B[14] 

runtime.procyield

/usr/lib/go/src/runtime/asm_arm64.s

  Total:        10ms       10ms (flat, cum) 0.021%
    965            .          .           	RET 
    966            .          .            
    967            .          .           TEXT runtime·procyield(SB),NOSPLIT,$0-0 
    968            .          .           	MOVWU	cycles+0(FP), R0 
    969            .          .           again: 
    970         10ms       10ms           	YIELD 
    971            .          .           	SUBW	$1, R0 
    972            .          .           	CBNZ	R0, again 
    973            .          .           	RET 
    974            .          .            
    975            .          .           // Save state of caller into g->sched, 

runtime.(*mheap).nextSpanForSweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0       90ms (flat, cum)  0.19%
     98            .          .           	for sc := sweep.centralIndex.load(); sc < numSweepClasses; sc++ { 
     99            .          .           		spc, full := sc.split() 
    100            .          .           		c := &h.central[spc].mcentral 
    101            .          .           		var s *mspan 
    102            .          .           		if full { 
    103            .       90ms           			s = c.fullUnswept(sg).pop() 
    104            .          .           		} else { 
    105            .          .           			s = c.partialUnswept(sg).pop() 
    106            .          .           		} 
    107            .          .           		if s != nil { 
    108            .          .           			// Write down that we found something so future sweepers 

runtime.(*activeSweep).begin

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        50ms       50ms (flat, cum)   0.1%
    145            .          .           // this does not indicate that all sweeping has completed. 
    146            .          .           // 
    147            .          .           // Even if the sweepLocker is invalid, its sweepGen is always valid. 
    148            .          .           func (a *activeSweep) begin() sweepLocker { 
    149            .          .           	for { 
    150         10ms       10ms           		state := a.state.Load() 
    151         10ms       10ms           		if state&sweepDrainedMask != 0 { 
    152            .          .           			return sweepLocker{mheap_.sweepgen, false} 
    153            .          .           		} 
    154         30ms       30ms           		if a.state.CompareAndSwap(state, state+1) {                                                               return Cas(&u.value, old, new)                               types.go:236

    155            .          .           			return sweepLocker{mheap_.sweepgen, true} 
    156            .          .           		} 
    157            .          .           	} 
    158            .          .           } 
    159            .          .            

runtime.(*activeSweep).end

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        30ms       30ms (flat, cum) 0.063%
    162            .          .           func (a *activeSweep) end(sl sweepLocker) { 
    163            .          .           	if sl.sweepGen != mheap_.sweepgen { 
    164            .          .           		throw("sweeper left outstanding across sweep generations") 
    165            .          .           	} 
    166            .          .           	for { 
    167         10ms       10ms           		state := a.state.Load()                                                               return Load(&u.value)                                        types.go:194

    168            .          .           		if (state&^sweepDrainedMask)-1 >= sweepDrainedMask { 
    169            .          .           			throw("mismatched begin/end of activeSweep") 
    170            .          .           		} 
    171         20ms       20ms           		if a.state.CompareAndSwap(state, state-1) {                                                               return Cas(&u.value, old, new)                               types.go:236

    172            .          .           			if state-1 != sweepDrainedMask { 
    173            .          .           				return 
    174            .          .           			} 
    175            .          .           			// We're the last sweeper, and there's nothing left to sweep. 
    176            .          .           			if debug.gcpacertrace > 0 { 

runtime.bgsweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0      500ms (flat, cum)  1.05%
    295            .          .           		// isn't spare idle time available on other cores. If there's available idle 
    296            .          .           		// time, helping to sweep can reduce allocation latencies by getting ahead of 
    297            .          .           		// the proportional sweeper and having spans ready to go for allocation. 
    298            .          .           		const sweepBatchSize = 10 
    299            .          .           		nSwept := 0 
    300            .      500ms           		for sweepone() != ^uintptr(0) { 
    301            .          .           			nSwept++ 
    302            .          .           			if nSwept%sweepBatchSize == 0 { 
    303            .          .           				goschedIfBusy() 
    304            .          .           			} 
    305            .          .           		} 

runtime.(*sweepLocker).tryAcquire

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    341            .          .           func (l *sweepLocker) tryAcquire(s *mspan) (sweepLocked, bool) { 
    342            .          .           	if !l.valid { 
    343            .          .           		throw("use of invalid sweepLocker") 
    344            .          .           	} 
    345            .          .           	// Check before attempting to CAS. 
    346         10ms       10ms           	if atomic.Load(&s.sweepgen) != l.sweepGen-2 { 
    347            .          .           		return sweepLocked{}, false 
    348            .          .           	} 
    349            .          .           	// Attempt to acquire sweep ownership of s. 
    350            .          .           	if !atomic.Cas(&s.sweepgen, l.sweepGen-2, l.sweepGen-1) { 
    351            .          .           		return sweepLocked{}, false 

runtime.sweepone

/usr/lib/go/src/runtime/mgcsweep.go

  Total:       100ms      780ms (flat, cum)  1.64%
    362            .          .           	// in the middle of sweep thus leaving the span in an inconsistent state for next GC 
    363            .          .           	gp.m.locks++ 
    364            .          .            
    365            .          .           	// TODO(austin): sweepone is almost always called in a loop; 
    366            .          .           	// lift the sweepLocker into its callers. 
    367         40ms       40ms           	sl := sweep.active.begin()                                                       if a.state.CompareAndSwap(state, state+1) {                          mgcsweep.go:154
                                                          return Cas(&u.value, old, new)                                   types.go:236            if state&sweepDrainedMask != 0 {                                     mgcsweep.go:151

    368            .          .           	if !sl.valid { 
    369            .          .           		gp.m.locks-- 
    370            .          .           		return ^uintptr(0) 
    371            .          .           	} 
    372            .          .            
    373            .          .           	// Find a span to sweep. 
    374            .          .           	npages := ^uintptr(0) 
    375            .          .           	var noMoreWork bool 
    376            .          .           	for { 
    377            .       90ms           		s := mheap_.nextSpanForSweep() 
    378            .          .           		if s == nil { 
    379            .          .           			noMoreWork = sweep.active.markDrained() 
    380            .          .           			break 
    381            .          .           		} 
    382         30ms       30ms           		if state := s.state.get(); state != mSpanInUse { 
    383            .          .           			// This can happen if direct sweeping already 
    384            .          .           			// swept this span, but in that case the sweep 
    385            .          .           			// generation should always be up-to-date. 
    386            .          .           			if !(s.sweepgen == sl.sweepGen || s.sweepgen == sl.sweepGen+3) { 
    387            .          .           				print("runtime: bad span s.state=", state, " s.sweepgen=", s.sweepgen, " sweepgen=", sl.sweepGen, "\n") 
    388            .          .           				throw("non in-use span in unswept list") 
    389            .          .           			} 
    390            .          .           			continue 
    391            .          .           		} 
    392            .       10ms           		if s, ok := sl.tryAcquire(s); ok { 
    393            .          .           			// Sweep the span we found. 
    394            .          .           			npages = s.npages 
    395            .      550ms           			if s.sweep(false) { 
    396            .          .           				// Whole span was freed. Count it toward the 
    397            .          .           				// page reclaimer credit since these pages can 
    398            .          .           				// now be used for span allocation. 
    399         30ms       30ms           				mheap_.reclaimCredit.Add(npages)                                                                               return Xadduintptr(&u.value, delta)          types.go:420

    400            .          .           			} else { 
    401            .          .           				// Span is still in-use, so this returned no 
    402            .          .           				// pages to the heap and the span needs to 
    403            .          .           				// move to the swept in-use list. 
    404            .          .           				npages = 0 
    405            .          .           			} 
    406            .          .           			break 
    407            .          .           		} 
    408            .          .           	} 
    409            .       30ms           	sweep.active.end(sl) 
    410            .          .            
    411            .          .           	if noMoreWork { 
    412            .          .           		// The sweep list is empty. There may still be 
    413            .          .           		// concurrent sweeps running, but we're at least very 
    414            .          .           		// close to done sweeping. 

runtime.(*mspan).ensureSwept

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    464            .          .           func (s *mspan) ensureSwept() { 
    465            .          .           	// Caller must disable preemption. 
    466            .          .           	// Otherwise when this function returns the span can become unswept again 
    467            .          .           	// (if GC is triggered on another goroutine). 
    468            .          .           	gp := getg() 
    469         10ms       10ms           	if gp.m.locks == 0 && gp.m.mallocing == 0 && gp != gp.m.g0 { 
    470            .          .           		throw("mspan.ensureSwept: m is not locked") 
    471            .          .           	} 
    472            .          .            
    473            .          .           	// If this operation fails, then that means that there are 
    474            .          .           	// no more spans to be swept. In this case, either s has already 

runtime.(*mspan).ensureSwept

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    486            .          .            
    487            .          .           	// Unfortunately we can't sweep the span ourselves. Somebody else 
    488            .          .           	// got to it first. We don't have efficient means to wait, but that's 
    489            .          .           	// OK, it will be swept fairly soon. 
    490            .          .           	for { 
    491         10ms       10ms           		spangen := atomic.Load(&s.sweepgen) 
    492            .          .           		if spangen == sl.sweepGen || spangen == sl.sweepGen+3 { 
    493            .          .           			break 
    494            .          .           		} 
    495            .          .           		osyield() 
    496            .          .           	} 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        30ms       30ms (flat, cum) 0.063%
    526            .          .           	if trace.ok() { 
    527            .          .           		trace.GCSweepSpan(s.npages * pageSize) 
    528            .          .           		traceRelease(trace) 
    529            .          .           	} 
    530            .          .            
    531         30ms       30ms           	mheap_.pagesSwept.Add(int64(s.npages))                                                       return Xadd64(&u.value, delta)                                       types.go:344

    532            .          .            
    533            .          .           	spc := s.spanclass 
    534            .          .           	size := s.elemsize 
    535            .          .            
    536            .          .           	// The allocBits indicate which unmarked objects don't need to be 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        10ms       10ms (flat, cum) 0.021%
    547            .          .           	//    In such case we need to queue finalizer for execution, 
    548            .          .           	//    mark the object as live and preserve the profile special. 
    549            .          .           	// 2. A tiny object can have several finalizers setup for different offsets. 
    550            .          .           	//    If such object is not marked, we need to queue all finalizers at once. 
    551            .          .           	// Both 1 and 2 are possible at the same time. 
    552         10ms       10ms           	hadSpecials := s.specials != nil 
    553            .          .           	siter := newSpecialsIter(s) 
    554            .          .           	for siter.valid() { 
    555            .          .           		// A finalizer can be set for an inner byte of an object, find object beginning. 
    556            .          .           		objIndex := uintptr(siter.s.offset) / size 
    557            .          .           		p := s.base() + objIndex*size 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0       10ms (flat, cum) 0.021%
    593            .          .           					// Find the exact byte for which the special was setup 
    594            .          .           					// (as opposed to object beginning). 
    595            .          .           					special := siter.s 
    596            .          .           					p := s.base() + uintptr(special.offset) 
    597            .          .           					siter.unlinkAndNext() 
    598            .       10ms           					freeSpecial(special, unsafe.Pointer(p), size) 
    599            .          .           				} 
    600            .          .           			} 
    601            .          .           		} else { 
    602            .          .           			// object is still live 
    603            .          .           			if siter.s.kind == _KindSpecialReachable { 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:           0       90ms (flat, cum)  0.19%
    650            .          .           		} 
    651            .          .           	} 
    652            .          .            
    653            .          .           	// Copy over and clear the inline mark bits if necessary. 
    654            .          .           	if gcUsesSpanInlineMarkBits(s.elemsize) { 
    655            .       90ms           		s.moveInlineMarks(s.gcmarkBits) 
    656            .          .           	} 
    657            .          .            
    658            .          .           	// Check for zombie objects. 
    659            .          .           	if s.freeindex < s.nelems { 
    660            .          .           		// Everything < freeindex is allocated and hence 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms       60ms (flat, cum)  0.13%
    673            .          .           			} 
    674            .          .           		} 
    675            .          .           	} 
    676            .          .            
    677            .          .           	// Count the number of free objects in this span. 
    678         10ms       10ms           	nalloc := uint16(s.countAlloc())                                                       mrkBits := *(*uint64)(unsafe.Pointer(s.gcmarkBits.bytep(i)))         mbitmap.go:1465

    679            .          .           	nfreed := s.allocCount - nalloc 
    680         10ms       10ms           	if nalloc > s.allocCount { 
    681            .          .           		// The zombie check above should have caught this in 
    682            .          .           		// more detail. 
    683            .          .           		print("runtime: nelems=", s.nelems, " nalloc=", nalloc, " previous allocCount=", s.allocCount, " nfreed=", nfreed, "\n") 
    684            .          .           		throw("sweep increased allocation count") 
    685            .          .           	} 
    686            .          .            
    687            .          .           	s.allocCount = nalloc 
    688            .          .           	s.freeindex = 0 // reset allocation index to start of span. 
    689            .          .           	s.freeIndexForScan = 0 
    690            .          .           	if traceEnabled() { 
    691            .          .           		getg().m.p.ptr().trace.reclaimed += uintptr(nfreed) * s.elemsize 
    692            .          .           	} 
    693            .          .            
    694            .          .           	// gcmarkBits becomes the allocBits. 
    695            .          .           	// get a fresh cleared gcmarkBits in preparation for next GC 
    696            .          .           	s.allocBits = s.gcmarkBits 
    697            .       40ms           	s.gcmarkBits = newMarkBits(uintptr(s.nelems)) 
    698            .          .            
    699            .          .           	// refresh pinnerBits if they exists 
    700            .          .           	if s.pinnerBits != nil { 
    701            .          .           		s.refreshPinnerBits() 
    702            .          .           	} 

runtime.(*sweepLocked).sweep

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        20ms      350ms (flat, cum)  0.73%
    772            .          .           			stats := memstats.heapStats.acquire() 
    773            .          .           			atomic.Xadd64(&stats.smallFreeCount[spc.sizeclass()], int64(nfreed)) 
    774            .          .           			memstats.heapStats.release() 
    775            .          .            
    776            .          .           			// Count the frees in the inconsistent, internal stats. 
    777         20ms       20ms           			gcController.totalFree.Add(int64(nfreed) * int64(s.elemsize))                                                                       return Xadd64(&u.value, delta)                       types.go:344

    778            .          .           		} 
    779            .          .           		if !preserve { 
    780            .          .           			// The caller may not have removed this span from whatever 
    781            .          .           			// unswept set its on but taken ownership of the span for 
    782            .          .           			// sweeping by updating sweepgen. If this span still is in 
    783            .          .           			// an unswept set, then the mcentral will pop it off the 
    784            .          .           			// set, check its sweepgen, and ignore it. 
    785            .          .           			if nalloc == 0 { 
    786            .          .           				// Free totally free span directly back to the heap. 
    787            .      330ms           				mheap_.freeSpan(s)                                                                               systemstack(func() {                         mheap.go:1633

    788            .          .           				return true 
    789            .          .           			} 
    790            .          .           			// Return span back to the right mcentral list. 
    791            .          .           			if nalloc == s.nelems { 
    792            .          .           				mheap_.central[spc].mcentral.fullSwept(sweepgen).push(s) 

runtime.deductSweepCredit

/usr/lib/go/src/runtime/mgcsweep.go

  Total:        40ms       50ms (flat, cum)   0.1%
    907            .          .           // It uses statistics gathered by the garbage collector to perform 
    908            .          .           // enough sweeping so that all pages are swept during the concurrent 
    909            .          .           // sweep phase between GC cycles. 
    910            .          .           // 
    911            .          .           // mheap_ must NOT be locked. 
    912            .       10ms           func deductSweepCredit(spanBytes uintptr, callerSweepPages uintptr) { 
    913         20ms       20ms           	if mheap_.sweepPagesPerByte == 0 { 
    914            .          .           		// Proportional sweep is done or disabled. 
    915            .          .           		return 
    916            .          .           	} 
    917            .          .            
    918            .          .           	trace := traceAcquire() 
    919            .          .           	if trace.ok() { 
    920            .          .           		trace.GCSweepStart() 
    921            .          .           		traceRelease(trace) 
    922            .          .           	} 
    923            .          .            
    924            .          .           	// Fix debt if necessary. 
    925            .          .           retry: 
    926            .          .           	sweptBasis := mheap_.pagesSweptBasis.Load() 
    927            .          .           	live := gcController.heapLive.Load() 
    928         10ms       10ms           	liveBasis := mheap_.sweepHeapLiveBasis 
    929            .          .           	newHeapLive := spanBytes 
    930            .          .           	if liveBasis < live { 
    931            .          .           		// Only do this subtraction when we don't overflow. Otherwise, pagesTarget 
    932            .          .           		// might be computed as something really huge, causing us to get stuck 
    933            .          .           		// sweeping here until the next mark phase. 
    934            .          .           		// 
    935            .          .           		// Overflow can happen here if gcPaceSweeper is called concurrently with 
    936            .          .           		// sweeping (i.e. not during a STW, like it usually is) because this code 
    937            .          .           		// is intentionally racy. A concurrent call to gcPaceSweeper can happen 
    938            .          .           		// if a GC tuning parameter is modified and we read an older value of 
    939            .          .           		// heapLive than what was used to set the basis. 
    940            .          .           		// 
    941            .          .           		// This state should be transient, so it's fine to just let newHeapLive 
    942            .          .           		// be a relatively small number. We'll probably just skip this attempt to 
    943            .          .           		// sweep. 
    944            .          .           		// 
    945            .          .           		// See issue #57523. 
    946            .          .           		newHeapLive += uintptr(live - liveBasis) 
    947            .          .           	} 
    948         10ms       10ms           	pagesTarget := int64(mheap_.sweepPagesPerByte*float64(newHeapLive)) - int64(callerSweepPages) 
    949            .          .           	for pagesTarget > int64(mheap_.pagesSwept.Load()-sweptBasis) { 
    950            .          .           		if sweepone() == ^uintptr(0) { 
    951            .          .           			mheap_.sweepPagesPerByte = 0 
    952            .          .           			break 
    953            .          .           		} 

git.urbach.dev/cli/q/src/token.identifier

/home/user/q/src/token/identifier.go

  Total:       250ms      250ms (flat, cum)  0.52%
      1            .          .           package token 
      2            .          .            
      3            .          .           // identifier handles all tokens that qualify as an identifier. 
      4         20ms       20ms           func identifier(tokens List, buffer []byte, i Position) (List, Position) { 
      5            .          .           	position := i 
      6            .          .           	i++ 
      7            .          .            
      8         80ms       80ms           	for i < Position(len(buffer)) && isIdentifier(buffer[i]) {                                                       return isLetter(c) || isDigit(c) || c == '_'                         identifier.go:55
                                                          return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')          identifier.go:68
                                     ⋮
                                     ⋮
                                                      return isLetter(c) || isDigit(c) || c == '_'                         identifier.go:55
                                                          return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')          identifier.go:68
                                     ⋮
                                     ⋮
                                                      return isLetter(c) || isDigit(c) || c == '_'                         identifier.go:55
                                                          return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')          identifier.go:68

      9         30ms       30ms           		i++ 
     10            .          .           	} 
     11            .          .            
     12         10ms       10ms           	identifier := buffer[position:i] 
     13            .          .           	kind := Identifier 
     14            .          .            
     15         20ms       20ms           	switch string(identifier) { 
     16            .          .           	case "as": 
     17            .          .           		kind = Cast 
     18            .          .           	case "assert": 
     19            .          .           		kind = Assert 
     20         20ms       20ms           	case "const": 
     21            .          .           		kind = Const 
     22            .          .           	case "delete": 
     23            .          .           		kind = Delete 
     24            .          .           	case "if": 
     25            .          .           		kind = If 
     26            .          .           	case "else": 
     27            .          .           		kind = Else 
     28            .          .           	case "extern": 
     29            .          .           		kind = Extern 
     30            .          .           	case "global": 
     31            .          .           		kind = Global 
     32            .          .           	case "go": 
     33            .          .           		kind = Go 
     34            .          .           	case "import": 
     35            .          .           		kind = Import 
     36            .          .           	case "loop": 
     37            .          .           		kind = Loop 
     38         10ms       10ms           	case "new": 
     39            .          .           		kind = New 
     40            .          .           	case "return": 
     41            .          .           		kind = Return 
     42         10ms       10ms           	case "syscall": 
     43         10ms       10ms           		kind = Syscall 
     44            .          .           	case "switch": 
     45            .          .           		kind = Switch 
     46            .          .           	} 
     47            .          .            
     48         30ms       30ms           	tokens = append(tokens, Token{Kind: kind, Position: position, Length: Length(len(identifier))}) 
     49         10ms       10ms           	return tokens, i 
     50            .          .           } 
     51            .          .            

git.urbach.dev/cli/q/src/token.isIdentifier

/home/user/q/src/token/identifier.go

  Total:        40ms       40ms (flat, cum) 0.084%
     53            .          .           // a digit or an underscore. 
     54            .          .           func isIdentifier(c byte) bool { 
     55         40ms       40ms           	return isLetter(c) || isDigit(c) || c == '_'                                                       return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')              identifier.go:68
                                     ⋮
                                     ⋮

     56            .          .           } 
     57            .          .            
     58            .          .           // isIdentifierStart returns true if the character is the 

git.urbach.dev/cli/q/src/token.isIdentifierStart

/home/user/q/src/token/identifier.go

  Total:        30ms       30ms (flat, cum) 0.063%
     59            .          .           // start of an identifier which is either a letter or an 
     60            .          .           // underscore. 
     61            .          .           func isIdentifierStart(c byte) bool { 
     62         30ms       30ms           	return isLetter(c) || c == '_'             return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z')              identifier.go:68

     63            .          .           } 
     64            .          .            

git.urbach.dev/cli/q/src/token.isLetter

/home/user/q/src/token/identifier.go

  Total:        50ms       50ms (flat, cum)   0.1%
     66            .          .           // or uppercase letter in the English alphabet. 
     67            .          .           func isLetter(c byte) bool { 
     68         50ms       50ms           	return (c >= 'a' && c <= 'z') || (c >= 'A' && c <= 'Z') 
     69            .          .           } 

git.urbach.dev/cli/q/src/codegen..markAlive.Backward[go.shape.[]*git.urbach.dev/cli/q/src/codegen.Step,go.shape.*uint8].func1

/usr/lib/go/src/slices/iter.go

  Total:        20ms       20ms (flat, cum) 0.042%
     23            .          .            
     24            .          .           // Backward returns an iterator over index-value pairs in the slice, 
     25            .          .           // traversing it backward with descending indices. 
     26            .          .           func Backward[Slice ~[]E, E any](s Slice) iter.Seq2[int, E] { 
     27            .          .           	return func(yield func(int, E) bool) { 
     28         20ms       20ms           		for i := len(s) - 1; i >= 0; i-- { 

git.urbach.dev/cli/q/src/ssa..FindExisting.Backward[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }].func1

/usr/lib/go/src/slices/iter.go

  Total:       260ms      1.99s (flat, cum)  4.18%
     29        260ms      1.99s           			if !yield(i, s[i]) {                                                                       if existing.IsPure() && instr.Equals(existing) {     Block.go:191                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118
                                                                              for i := range s {                           slices.go:97                            if existing.IsPure() && instr.Equals(existing) {     Block.go:191                            current.Live = append(current.Live, live)            markAlive.go:36                            if existing.IsPure() && instr.Equals(existing) {     Block.go:191
                                     ⋮
                                     ⋮
                                                                      current.Live = append(current.Live, live)            markAlive.go:36
                                     ⋮
                                     ⋮
                                                                      f.assignFreeRegister(step)                           CompileToAssembly.go:28
                                                                          step.Register = f.findFreeRegister(step)         assignFreeRegister.go:6                            switch existing.(type) {                             Block.go:197                            if f.ValueToStep[arg].Register == f.CPU.ExternCall.In[i] { executeCallExtern.go:21                            if existing.IsPure() && instr.Equals(existing) {     Block.go:191                            switch existing.(type) {                             Block.go:197                            f.hintABI(step)                                      CompileToAssembly.go:22                            f.createLiveRanges(step)                             CompileToAssembly.go:23
                                     ⋮
                                     ⋮
                                                                      for _, current := range slices.Backward(steps) {     markAlive.go:31                            switch existing.(type) {                             Block.go:197                            if live.Value == current.Value {                     markAlive.go:38                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118
                                                                              if v == s[i] {                               slices.go:98                            if existing.IsPure() && instr.Equals(existing) {     Block.go:191                            if slices.Contains(current.Live, live) {             markAlive.go:32
                                                                          return Index(s, v) >= 0                          slices.go:118                            case *Call, *CallExtern:                             Block.go:198                            if step.Register == -1 && f.needsRegister(step) {    CompileToAssembly.go:27                            f.hintDestination(step)                              CompileToAssembly.go:31
                                     ⋮
                                     ⋮
                                                                      if live.Value == current.Value {                     markAlive.go:38                            f.hintDestination(step)                              CompileToAssembly.go:31

     30            .          .           				return 
     31            .          .           			} 
     32            .          .           		} 
     33            .          .           	} 
     34            .          .           } 

git.urbach.dev/cli/q/src/codegen.(*Function).findFreeRegister

/home/user/q/src/codegen/findFreeRegister.go

  Total:        10ms       10ms (flat, cum) 0.021%
      8            .          .           	"git.urbach.dev/cli/q/src/ssa" 
      9            .          .           	"git.urbach.dev/cli/q/src/token" 
     10            .          .           ) 
     11            .          .            
     12            .          .           // findFreeRegister finds a free register for the given value. 
     13         10ms       10ms           func (f *Function) findFreeRegister(step *Step) cpu.Register { 
     14            .          .           	usedRegisters := 0 
     15            .          .           	binaryOp, isBinaryOp := step.Value.(*ssa.BinaryOp) 
     16            .          .            
     17            .          .           	if isBinaryOp && !binaryOp.Op.IsComparison() { 
     18            .          .           		switch f.build.Arch { 

git.urbach.dev/cli/q/src/codegen.(*Function).findFreeRegister

/home/user/q/src/codegen/findFreeRegister.go

  Total:        90ms       90ms (flat, cum)  0.19%
     36            .          .           				usedRegisters |= (1 << right.Register) 
     37            .          .           			} 
     38            .          .           		} 
     39            .          .           	} 
     40            .          .            
     41         10ms       10ms           	for _, current := range f.Steps { 
     42            .          .           		// These checks need to happen regardless of whether the value is alive after execution. 
     43            .          .           		// If it is used as an operand, the operand restrictions of the architecture apply. 
     44         60ms       60ms           		binaryOp, isBinaryOp := current.Value.(*ssa.BinaryOp) 
     45            .          .            
     46            .          .           		if isBinaryOp && !binaryOp.Op.IsComparison() { 
     47         10ms       10ms           			switch f.build.Arch { 
     48            .          .           			case config.ARM: 
     49            .          .           				if current.Register != -1 && binaryOp.Op == token.Mod { 
     50         10ms       10ms           					if binaryOp.Left == step.Value { 
     51            .          .           						usedRegisters |= (1 << current.Register) 
     52            .          .           					} 
     53            .          .            
     54            .          .           					if binaryOp.Right == step.Value { 
     55            .          .           						usedRegisters |= (1 << current.Register) 

git.urbach.dev/cli/q/src/codegen.(*Function).findFreeRegister

/home/user/q/src/codegen/findFreeRegister.go

  Total:       170ms      170ms (flat, cum)  0.36%
     77            .          .           				} 
     78            .          .           			} 
     79            .          .           		} 
     80            .          .            
     81            .          .           		// If it's not alive in this step, ignore it. 
     82         20ms       20ms           		if !slices.Contains(current.Live, step) {                                                               return Index(s, v) >= 0                                      slices.go:118
                                                                  if v == s[i] {                                           slices.go:98
     83            .          .           			continue 
     84            .          .           		} 
     85            .          .            
     86            .          .           		// Mark all the neighbor registers that are alive 
     87            .          .           		// at the same time as used. 
     88         10ms       10ms           		for _, live := range current.Live { 
     89         20ms       20ms           			if live.Register == -1 { 
     90            .          .           				continue 
     91            .          .           			} 
     92            .          .            
     93         20ms       20ms           			usedRegisters |= (1 << live.Register) 
     94            .          .           		} 
     95            .          .            
     96            .          .           		// Ignore the definition itself. 
     97            .          .           		if current == step { 
     98            .          .           			continue 
     99            .          .           		} 
    100            .          .            
    101            .          .           		// Find all the registers that this instruction 
    102            .          .           		// would clobber and mark them as used. 
    103            .          .           		var clobbered []cpu.Register 
    104            .          .            
    105            .          .           		switch instr := current.Value.(type) { 
    106            .          .           		case *ssa.BinaryOp: 
    107            .          .           			switch instr.Op { 
    108         10ms       10ms           			case token.Div, token.Mod: 
    109            .          .           				clobbered = f.CPU.DivisionClobbered 
    110         10ms       10ms           			case token.Shl, token.Shr: 
    111            .          .           				clobbered = f.CPU.ShiftClobbered 
    112            .          .           			} 
    113            .          .           		case *ssa.Call: 
    114            .          .           			clobbered = f.CPU.Call.Clobbered 
    115            .          .           		case *ssa.CallExtern: 
    116            .          .           			clobbered = f.CPU.ExternCall.Clobbered 
    117            .          .           		case *ssa.FromTuple: 
    118            .          .           			usedRegisters |= (1 << f.CPU.Call.Out[instr.Index]) 
    119            .          .           		case *ssa.Parameter: 
    120            .          .           			usedRegisters |= (1 << f.CPU.Call.In[instr.Index]) 
    121            .          .           		case *ssa.Syscall: 
    122            .          .           			clobbered = f.CPU.Syscall.Clobbered 
    123            .          .           		} 
    124            .          .            
    125         10ms       10ms           		for _, reg := range clobbered { 
    126         10ms       10ms           			usedRegisters |= (1 << reg) 
    127            .          .           		} 
    128            .          .           	} 
    129            .          .            
    130            .          .           	// Pick one of the register hints if possible. 
    131            .          .           	for _, reg := range step.Hints { 
    132            .          .           		if usedRegisters&(1<<reg) == 0 { 
    133            .          .           			return reg 
    134            .          .           		} 
    135            .          .           	} 
    136            .          .            
    137            .          .           	// Pick a general purpose register that's not used yet. 
    138         10ms       10ms           	for _, reg := range f.CPU.General { 
    139         40ms       40ms           		if usedRegisters&(1<<reg) == 0 { 
    140         10ms       10ms           			return reg 
    141            .          .           		} 
    142            .          .           	} 
    143            .          .            
    144            .          .           	panic("no free registers") 
    145            .          .           } 

git.urbach.dev/cli/q/src/expression.(*Expression).AddChild

/home/user/q/src/expression/Expression.go

  Total:        40ms      190ms (flat, cum)   0.4%
     15            .          .           	precedence int8 
     16            .          .           } 
     17            .          .            
     18            .          .           // AddChild adds a child to the expression. 
     19            .          .           func (expr *Expression) AddChild(child *Expression) { 
     20         10ms       10ms           	if expr.Children == nil { 
     21            .      100ms           		expr.Children = make([]*Expression, 0, 2) 
     22            .          .           	} 
     23            .          .            
     24         30ms       80ms           	expr.Children = append(expr.Children, child) 
     25            .          .           	child.Parent = expr 
     26            .          .           } 

git.urbach.dev/cli/q/src/expression.(*Expression).EachLeaf

/home/user/q/src/expression/Expression.go

  Total:       130ms      300ms (flat, cum)  0.63%
     27            .          .            
     28            .          .           // EachLeaf iterates through all leaves in the tree. 
     29         30ms       30ms           func (expr *Expression) EachLeaf(yield func(*Expression) bool) bool { 
     30         20ms       20ms           	if expr.IsLeaf() {                                                       return len(expr.Children) == 0                                       Expression.go:68
     31         40ms       80ms           		return yield(expr) 
     32            .          .           	} 
     33            .          .            
     34         20ms       20ms           	for _, child := range expr.Children { 
     35         10ms      140ms           		if !child.EachLeaf(yield) { 
     36            .          .           			return false 
     37            .          .           		} 
     38            .          .           	} 
     39            .          .            
     40         10ms       10ms           	return true 
     41            .          .           } 
     42            .          .            
     43            .          .           // Index returns the position of the child or `-1` if it's not a child of this expression. 
     44            .          .           func (expr *Expression) Index(child *Expression) int { 
     45            .          .           	for i, c := range expr.Children { 

git.urbach.dev/cli/q/src/expression.(*Expression).InsertAbove

/home/user/q/src/expression/Expression.go

  Total:        10ms       60ms (flat, cum)  0.13%
     58            .          .           	if expr.Parent != nil { 
     59            .          .           		expr.Parent.Children[len(expr.Parent.Children)-1] = tree 
     60            .          .           		tree.Parent = expr.Parent 
     61            .          .           	} 
     62            .          .            
     63         10ms       60ms           	tree.AddChild(expr)                                                       expr.Children = make([]*Expression, 0, 2)                            Expression.go:21            if expr.Children == nil {                                            Expression.go:20            expr.Children = make([]*Expression, 0, 2)                            Expression.go:21

     64            .          .           } 
     65            .          .            

git.urbach.dev/cli/q/src/expression.(*Expression).IsLeaf

/home/user/q/src/expression/Expression.go

  Total:        30ms       30ms (flat, cum) 0.063%
     66            .          .           // IsLeaf returns true if the expression has no children. 
     67            .          .           func (expr *Expression) IsLeaf() bool { 
     68         30ms       30ms           	return len(expr.Children) == 0 
     69            .          .           } 
     70            .          .            
     71            .          .           // LastChild returns the last child. 
     72            .          .           func (expr *Expression) LastChild() *Expression { 
     73            .          .           	return expr.Children[len(expr.Children)-1] 

git.urbach.dev/cli/q/src/expression.(*Expression).Source.(*Expression).Leaves.func1

/home/user/q/src/expression/Expression.go

  Total:           0      170ms (flat, cum)  0.36%
     74            .          .           } 
     75            .          .            
     76            .          .           // Leaves iterates through all leaves in the tree. 
     77            .          .           func (expr *Expression) Leaves() iter.Seq[*Expression] { 
     78            .          .           	return func(yield func(*Expression) bool) { 
     79            .      170ms           		expr.EachLeaf(yield) 
     80            .          .           	} 
     81            .          .           } 
     82            .          .            
     83            .          .           // RemoveChild removes a child from the expression. 
     84            .          .           func (expr *Expression) RemoveChild(child *Expression) { 

git.urbach.dev/cli/q/src/expression.(*Expression).Source

/home/user/q/src/expression/Expression.go

  Total:        30ms      200ms (flat, cum)  0.42%
    102            .          .           	expr.Token.Reset() 
    103            .          .           	expr.precedence = 0 
    104            .          .           } 
    105            .          .            
    106            .          .           // Source returns the start and end positions in the source file. 
    107         10ms       10ms           func (expr *Expression) Source() token.Source { 
    108            .          .           	start := expr.Token.Position 
    109            .          .           	end := expr.Token.End() 
    110            .          .            
    111         20ms      190ms           	for leaf := range expr.Leaves() {                                      ⋮
                                     ⋮
                                                      expr.EachLeaf(yield)                                                 Expression.go:79

git.urbach.dev/cli/q/src/expression.(*Expression).Source-range1

/home/user/q/src/expression/Expression.go

  Total:        40ms       40ms (flat, cum) 0.084%
    112         20ms       20ms           		if leaf.Token.Position < start { 
    113         10ms       10ms           			start = leaf.Token.Position 
    114         10ms       10ms           		} else if leaf.Token.End() > end { 
    115            .          .           			end = leaf.Token.End() 
    116            .          .           		} 

git.urbach.dev/cli/q/src/expression.(*Expression).Source

/home/user/q/src/expression/Expression.go

  Total:        10ms       10ms (flat, cum) 0.021%
    117            .          .           	} 
    118            .          .            
    119         10ms       10ms           	return token.NewSource(start, end) 
    120            .          .           } 
    121            .          .            
    122            .          .           // SourceString returns the string that was parsed in this expression. 
    123            .          .           func (expr *Expression) SourceString(source []byte) string { 
    124            .          .           	region := expr.Source() 

git.urbach.dev/cli/q/src/core.(*Environment).AddPackage

/home/user/q/src/core/Environment.go

  Total:           0      220ms (flat, cum)  0.46%
     22            .          .           	typeCache 
     23            .          .           } 
     24            .          .            
     25            .          .           // AddPackage returns an existing package with the giving name or creates a new one. 
     26            .          .           func (env *Environment) AddPackage(name string, isExtern bool) *Package { 
     27            .       60ms           	pkg, exists := env.Packages[name] 
     28            .          .            
     29            .          .           	if !exists { 
     30            .       40ms           		pkg = &Package{ 
     31            .          .           			Name:      name, 
     32            .       10ms           			Constants: make(map[string]*Constant), 
     33            .       30ms           			Functions: make(map[string]*Function, 8), 
     34            .       10ms           			Structs:   make(map[string]*types.Struct), 
     35            .       10ms           			Globals:   make(map[string]*Global), 
     36            .          .           			IsExtern:  isExtern, 
     37            .          .           		} 
     38            .          .            
     39            .       60ms           		env.Packages[name] = pkg 
     40            .          .           	} 
     41            .          .            
     42            .          .           	return pkg 
     43            .          .           } 
     44            .          .            

git.urbach.dev/cli/q/src/core.(*Environment).Function

/home/user/q/src/core/Environment.go

  Total:        10ms       40ms (flat, cum) 0.084%
     48            .          .            
     49            .          .           	if !exists { 
     50            .          .           		return nil 
     51            .          .           	} 
     52            .          .            
     53         10ms       40ms           	fn, exists := pkg.Functions[name] 
     54            .          .            
     55            .          .           	if !exists { 
     56            .          .           		return nil 
     57            .          .           	} 
     58            .          .            

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:           0       10ms (flat, cum) 0.021%
     60            .          .           } 
     61            .          .            
     62            .          .           // Functions returns an iterator over all functions. 
     63            .          .           func (env *Environment) Functions() iter.Seq[*Function] { 
     64            .          .           	return func(yield func(*Function) bool) { 
     65            .       10ms           		for _, pkg := range env.Packages { 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:           0       40ms (flat, cum) 0.084%
     66            .       40ms           			for _, fn := range pkg.Functions { 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Functions.func3

/home/user/q/src/core/Environment.go

  Total:       230ms      1.09s (flat, cum)  2.29%
     67        230ms      1.09s           				for variant := range fn.Variants {                                                                               if !yield(f) {                               Function.go:92
                                                                                  if !yield(variant) {                     Environment.go:68
                                                                                      for i, output := range f.Output {    parseParameters.go:31
                                     ⋮
                                     ⋮
                                                                              if !yield(f) {                               Function.go:92
                                                                                  if !yield(variant) {                     Environment.go:68
                                                                                      f.Type.Input[i] = input.Typ          parseParameters.go:28                                            if f.Err != nil {                    Compile.go:53                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20
                                     ⋮
                                     ⋮
                                                                              if !yield(f) {                               Function.go:92
                                                                                  for variant := range fn.Variants {       Environment.go:67                                        if !yield(variant) {                     Environment.go:68
                                                                                      input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34
                                     ⋮
                                     ⋮
                                                                              if !yield(f) {                               Function.go:92
                                                                                  if !yield(variant) {                     Environment.go:68
                                                                                      f.Type = &types.Function{            parseParameters.go:14                                            if f.Previous != nil || f.Next != nil { parseParameters.go:50                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            for i, input := range f.Input {      parseParameters.go:19                                            for i, output := range f.Output {    parseParameters.go:31                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            Output: make([]types.Type, len(f.Output)), parseParameters.go:16                                            typeTokens := output.Tokens          parseParameters.go:32                                            typ, err := env.TypeFromTokens(typeTokens, f.File) parseParameters.go:40                                            f.AddSuffix(suffix.String())         parseParameters.go:63
                                                                                          f.FullName += suffix             Function.go:51                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            f.AddSuffix(suffix.String())         parseParameters.go:63
                                                                                          f.name += suffix                 Function.go:50

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).Functions.func3-range1

/home/user/q/src/core/Environment.go

  Total:       190ms      1.05s (flat, cum)  2.20%
     68        190ms      1.05s           					if !yield(variant) {                                             for i, output := range f.Output {    parseParameters.go:31                                            f.Type.Input[i] = input.Typ          parseParameters.go:28                                            if f.Err != nil {                    Compile.go:53                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20
                                     ⋮
                                     ⋮
                                                                                      if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                                            f.Type = &types.Function{            parseParameters.go:14                                            if f.Previous != nil || f.Next != nil { parseParameters.go:50                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            for i, input := range f.Input {      parseParameters.go:19                                            for i, output := range f.Output {    parseParameters.go:31                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            Output: make([]types.Type, len(f.Output)), parseParameters.go:16                                            typeTokens := output.Tokens          parseParameters.go:32                                            typ, err := env.TypeFromTokens(typeTokens, f.File) parseParameters.go:40                                            f.AddSuffix(suffix.String())         parseParameters.go:63
                                                                                          f.FullName += suffix             Function.go:51                                            Input:  make([]types.Type, len(f.Input)), parseParameters.go:15                                            typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                                            f.AddSuffix(suffix.String())         parseParameters.go:63
                                                                                          f.name += suffix                 Function.go:50

     69            .          .           						return 
     70            .          .           					} 
     71            .          .           				} 
     72            .          .           			} 
     73            .          .           		} 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Globals.func2

/home/user/q/src/core/Environment.go

  Total:           0       20ms (flat, cum) 0.042%
     76            .          .            
     77            .          .           // Globals returns an iterator over all globals. 
     78            .          .           func (env *Environment) Globals() iter.Seq[*Global] { 
     79            .          .           	return func(yield func(*Global) bool) { 
     80            .          .           		for _, pkg := range env.Packages { 
     81            .       20ms           			for _, global := range pkg.Globals { 
     82            .          .           				if !yield(global) { 
     83            .          .           					return 
     84            .          .           				} 
     85            .          .           			} 
     86            .          .           		} 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).LiveFunctions.func5

/home/user/q/src/core/Environment.go

  Total:           0      430ms (flat, cum)   0.9%
     90            .          .           // LiveFunctions returns an iterator over functions that are alive, 
     91            .          .           // starting with `run.init` and all of its dependencies. 
     92            .          .           func (env *Environment) LiveFunctions() iter.Seq[*Function] { 
     93            .          .           	return func(yield func(*Function) bool) { 
     94            .          .           		running := true 
     95            .       50ms           		traversed := make(map[*Function]bool, env.NumFunctions) 
     96            .          .            
     97            .      380ms           		env.Init.EachDependency(traversed, func(f *Function) { 
     98            .          .           			if !running { 
     99            .          .           				return 

git.urbach.dev/cli/q/src/compiler.Compile.(*Environment).LiveFunctions.func5.1

/home/user/q/src/core/Environment.go

  Total:           0      340ms (flat, cum)  0.71%
    100            .          .           			} 
    101            .          .            
    102            .      340ms           			running = yield(f) 
    103            .          .           		}) 
    104            .          .           	} 
    105            .          .           } 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes

/home/user/q/src/core/Environment.go

  Total:       220ms      630ms (flat, cum)  1.32%
    106            .          .            
    107            .          .           // ResolveTypes resolves all the type tokens in structs, globals and function parameters. 
    108            .          .           func (env *Environment) ResolveTypes() error { 
    109         20ms       90ms           	err := env.parseStructs(env.Structs())                                                       for structure := range structs {                                     parseStructs.go:13
                                                          for _, structure := range pkg.Structs {                          Environment.go:128            }                                                                    parseStructs.go:19
                                     ⋮
                                     ⋮
                                                      for structure := range structs {                                     parseStructs.go:13
                                                          if !yield(structure) {                                           Environment.go:129
                                                              err := env.parseStruct(structure, processed)                 parseStructs.go:14

    110            .          .            
    111            .          .           	if err != nil { 
    112            .          .           		return err 
    113            .          .           	} 
    114            .          .            
    115            .       20ms           	err = env.parseGlobals(env.Globals())                                                       for global := range globals {                                        parseGlobals.go:9
                                                          for _, global := range pkg.Globals {                             Environment.go:81

    116            .          .            
    117            .          .           	if err != nil { 
    118            .          .           		return err 
    119            .          .           	} 
    120            .          .            
    121        200ms      520ms           	return env.parseParameters(env.Functions())                                                       for f := range functions {                                           parseParameters.go:13
                                                          for variant := range fn.Variants {                               Environment.go:67
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      for i, output := range f.Output {                    parseParameters.go:31
                                     ⋮
                                     ⋮
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      f.Type.Input[i] = input.Typ                          parseParameters.go:28                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20
                                     ⋮
                                     ⋮
                                                                      if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                            f.Type = &types.Function{                            parseParameters.go:14                            if f.Previous != nil || f.Next != nil {              parseParameters.go:50                            Input:  make([]types.Type, len(f.Input)),            parseParameters.go:15
                                     ⋮
                                     ⋮
                                                      for f := range functions {                                           parseParameters.go:13
                                                          for variant := range fn.Variants {                               Environment.go:67
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      for i, input := range f.Input {                      parseParameters.go:19                            for i, output := range f.Output {                    parseParameters.go:31                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                for _, pkg := range env.Packages {                               Environment.go:65                for variant := range fn.Variants {                               Environment.go:67
                                                              if !yield(f) {                                               Function.go:92
                                                                  if !yield(variant) {                                     Environment.go:68
                                                                      Output: make([]types.Type, len(f.Output)),           parseParameters.go:16                            typeTokens := output.Tokens                          parseParameters.go:32                            typ, err := env.TypeFromTokens(typeTokens, f.File)   parseParameters.go:40                            f.AddSuffix(suffix.String())                         parseParameters.go:63
                                                                          f.FullName += suffix                             Function.go:51                            Input:  make([]types.Type, len(f.Input)),            parseParameters.go:15                            typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                            input.Name = input.Tokens[0].StringFrom(f.File.Bytes) parseParameters.go:20                            f.AddSuffix(suffix.String())                         parseParameters.go:63
                                                                          f.name += suffix                                 Function.go:50                for _, fn := range pkg.Functions {                               Environment.go:66

    122            .          .           } 
    123            .          .            

git.urbach.dev/cli/q/src/core.typeByName.(*Environment).Structs.func1

/home/user/q/src/core/Environment.go

  Total:           0       10ms (flat, cum) 0.021%
    125            .          .           func (env *Environment) Structs() iter.Seq[*types.Struct] { 
    126            .          .           	return func(yield func(*types.Struct) bool) { 
    127            .       10ms           		for _, pkg := range env.Packages { 

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).Structs.func1

/home/user/q/src/core/Environment.go

  Total:           0       90ms (flat, cum)  0.19%
    128            .       40ms           			for _, structure := range pkg.Structs { 
    129            .       50ms           				if !yield(structure) {                                                                               err := env.parseStruct(structure, processed) parseStructs.go:14

    130            .          .           					return 
    131            .          .           				} 
    132            .          .           			} 
    133            .          .           		} 
    134            .          .           	} 

slices.Index[go.shape.[]git.urbach.dev/cli/q/src/cpu.Register,go.shape.int8]

/usr/lib/go/src/slices/slices.go

  Total:        60ms       60ms (flat, cum)  0.13%
     92            .          .           } 
     93            .          .            
     94            .          .           // Index returns the index of the first occurrence of v in s, 
     95            .          .           // or -1 if not present. 
     96            .          .           func Index[S ~[]E, E comparable](s S, v E) int { 
     97         60ms       60ms           	for i := range s { 

slices.Index[go.shape.[]*git.urbach.dev/cli/q/src/codegen.Step,go.shape.*git.urbach.dev/cli/q/src/codegen.Step]

/usr/lib/go/src/slices/slices.go

  Total:        60ms       60ms (flat, cum)  0.13%
     98         60ms       60ms           		if v == s[i] { 
     99            .          .           			return i 
    100            .          .           		} 
    101            .          .           	} 
    102            .          .           	return -1 

slices.IndexFunc[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:        50ms       60ms (flat, cum)  0.13%
    104            .          .            
    105            .          .           // IndexFunc returns the first index i satisfying f(s[i]), 
    106            .          .           // or -1 if none do. 
    107            .          .           func IndexFunc[S ~[]E, E any](s S, f func(E) bool) int { 
    108         30ms       30ms           	for i := range s { 
    109         20ms       30ms           		if f(s[i]) { 
    110            .          .           			return i 
    111            .          .           		} 
    112            .          .           	} 
    113            .          .           	return -1 

slices.Contains[go.shape.[]*git.urbach.dev/cli/q/src/codegen.Step,go.shape.*git.urbach.dev/cli/q/src/codegen.Step]

/usr/lib/go/src/slices/slices.go

  Total:       130ms      130ms (flat, cum)  0.27%
    114            .          .           } 
    115            .          .            
    116            .          .           // Contains reports whether v is present in s. 
    117            .          .           func Contains[S ~[]E, E comparable](s S, v E) bool { 
    118        130ms      130ms           	return Index(s, v) >= 0                                                       if v == s[i] {                                                       slices.go:98            for i := range s {                                                   slices.go:97
                                     ⋮
                                     ⋮
                                                      if v == s[i] {                                                       slices.go:98            for i := range s {                                                   slices.go:97            if v == s[i] {                                                       slices.go:98
                                     ⋮
                                     ⋮
                                     ⋮
                                                      for i := range s {                                                   slices.go:97

    119            .          .           } 
    120            .          .            
    121            .          .           // ContainsFunc reports whether at least one 
    122            .          .           // element e of s satisfies f(e). 
    123            .          .           func ContainsFunc[S ~[]E, E any](s S, f func(E) bool) bool { 

slices.Insert[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:        20ms       20ms (flat, cum) 0.042%
    151            .          .           		s2 := append(s[:i], make(S, n+m-i)...) 
    152            .          .           		copy(s2[i:], v) 
    153            .          .           		copy(s2[i+m:], s[i:]) 
    154            .          .           		return s2 
    155            .          .           	} 
    156         10ms       10ms           	s = s[:n+m] 
    157            .          .            
    158            .          .           	// before: 
    159            .          .           	// s: aaaaaaaabbbbccccccccdddd 
    160            .          .           	//            ^   ^       ^   ^ 
    161            .          .           	//            i  i+m      n  n+m 
    162            .          .           	// after: 
    163            .          .           	// s: aaaaaaaavvvvbbbbcccccccc 
    164            .          .           	//            ^   ^       ^   ^ 
    165            .          .           	//            i  i+m      n  n+m 
    166            .          .           	// 
    167            .          .           	// a are the values that don't move in s. 
    168            .          .           	// v are the values copied in from v. 
    169            .          .           	// b and c are the values from s that are shifted up in index. 
    170            .          .           	// d are the values that get overwritten, never to be seen again. 
    171            .          .            
    172         10ms       10ms           	if !overlaps(v, s[i+m:]) { 
    173            .          .           		// Easy case - v does not overlap either the c or d regions. 
    174            .          .           		// (It might be in some of a or b, or elsewhere entirely.) 
    175            .          .           		// The data we copy up doesn't write to v at all, so just do it. 
    176            .          .            
    177            .          .           		copy(s[i+m:], s[i:]) 

slices.DeleteFunc[go.shape.[]git.urbach.dev/cli/q/src/ssa.Value,go.shape.interface { AddUser; Equals bool; Inputs []git.urbach.dev/cli/q/src/ssa.Value; IsPure bool; RemoveUser; Replace; String string; Type git.urbach.dev/cli/q/src/types.Type; Users []git.urbach.dev/cli/q/src/ssa.Value }]

/usr/lib/go/src/slices/slices.go

  Total:       110ms      140ms (flat, cum)  0.29%
    234            .          .            
    235            .          .           // DeleteFunc removes any elements from s for which del returns true, 
    236            .          .           // returning the modified slice. 
    237            .          .           // DeleteFunc zeroes the elements between the new length and the original length. 
    238            .          .           // If the result is empty, it has the same nilness as s. 
    239         20ms       20ms           func DeleteFunc[S ~[]E, E any](s S, del func(E) bool) S { 
    240         50ms       60ms           	i := IndexFunc(s, del)                                                       for i := range s {                                                   slices.go:108            if f(s[i]) {                                                         slices.go:109
                                     ⋮
                                     ⋮
                                                      for i := range s {                                                   slices.go:108            if f(s[i]) {                                                         slices.go:109

    241            .          .           	if i == -1 { 
    242         20ms       20ms           		return s 
    243            .          .           	} 
    244            .          .           	// Don't start copying elements until we find one to delete. 
    245            .          .           	for j := i + 1; j < len(s); j++ { 
    246         10ms       10ms           		if v := s[j]; !del(v) { 
    247         10ms       10ms           			s[i] = v 
    248            .          .           			i++ 
    249            .          .           		} 
    250            .          .           	} 
    251            .       20ms           	clear(s[i:]) // zero/nil out the obsolete elements, for GC 
    252            .          .           	return s[:i] 
    253            .          .           } 
    254            .          .            
    255            .          .           // Replace replaces the elements s[i:j] by the given v, and returns the 
    256            .          .           // modified slice. 

git.urbach.dev/cli/q/src/core.(*Function).AddInput

/home/user/q/src/core/Function.go

  Total:        30ms      260ms (flat, cum)  0.55%
     29            .          .           	codegen.Function 
     30            .          .           } 
     31            .          .            
     32            .          .           // AddInput adds an input parameter. 
     33            .          .           func (f *Function) AddInput(tokens token.List, source token.Source) { 
     34         20ms      250ms           	f.Input = append(f.Input, &ssa.Parameter{ 
     35         10ms       10ms           		Tokens: tokens, 
     36            .          .           		Source: source, 
     37            .          .           	}) 
     38            .          .           } 

git.urbach.dev/cli/q/src/core.(*Function).AddOutput

/home/user/q/src/core/Function.go

  Total:        10ms       70ms (flat, cum)  0.15%
     39            .          .            
     40            .          .           // AddOutput adds an output parameter. 
     41            .          .           func (f *Function) AddOutput(tokens token.List, source token.Source) { 
     42         10ms       70ms           	f.Output = append(f.Output, &ssa.Parameter{ 
     43            .          .           		Tokens: tokens, 
     44            .          .           		Source: source, 
     45            .          .           	}) 

git.urbach.dev/cli/q/src/core.(*Function).AddSuffix

/home/user/q/src/core/Function.go

  Total:           0       50ms (flat, cum)   0.1%
     47            .          .            
     48            .          .           // AddSuffix adds a suffix to the name and is used for generic functions. 
     49            .          .           func (f *Function) AddSuffix(suffix string) { 
     50            .       20ms           	f.name += suffix 
     51            .       30ms           	f.FullName += suffix 
     52            .          .           } 
     53            .          .            

git.urbach.dev/cli/q/src/core.(*Function).Body

/home/user/q/src/core/Function.go

  Total:        10ms       10ms (flat, cum) 0.021%
     54            .          .           // Body returns the function body. 
     55            .          .           func (f *Function) Body() token.List { 
     56         10ms       10ms           	return f.File.Tokens[f.body.Start():f.body.End()] 
     57            .          .           } 
     58            .          .            
     59            .          .           // IsExtern returns true if the function has no body. 
     60            .          .           func (f *Function) IsExtern() bool { 
     61            .          .           	return f.body.End() == 0 

git.urbach.dev/cli/q/src/core.(*Function).Package

/home/user/q/src/core/Function.go

  Total:        10ms       10ms (flat, cum) 0.021%
     71            .          .           	return f.name 
     72            .          .           } 
     73            .          .            
     74            .          .           // Package returns the package name. 
     75            .          .           func (f *Function) Package() string { 
     76         10ms       10ms           	return f.pkg 
     77            .          .           } 
     78            .          .            
     79            .          .           // SetBody sets the token range for the function body. 
     80            .          .           func (f *Function) SetBody(start int, end int) { 
     81            .          .           	f.body = token.NewSource(token.Position(start), token.Position(end)) 

git.urbach.dev/cli/q/src/core.(*Function).Variants

/home/user/q/src/core/Function.go

  Total:       200ms      1.06s (flat, cum)  2.23%
     87            .          .           } 
     88            .          .            
     89            .          .           // Variants returns all function overloads. 
     90            .          .           func (f *Function) Variants(yield func(*Function) bool) { 
     91            .          .           	for { 
     92        200ms      1.06s           		if !yield(f) {                                                               if !yield(variant) {                                         Environment.go:68
                                                                  for i, output := range f.Output {                        parseParameters.go:31                        f.Type.Input[i] = input.Typ                              parseParameters.go:28                        if f.Err != nil {                                        Compile.go:53                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                    for variant := range fn.Variants {                           Environment.go:67                    if !yield(variant) {                                         Environment.go:68
                                                                  input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                        f.Type = &types.Function{                                parseParameters.go:14                        if f.Previous != nil || f.Next != nil {                  parseParameters.go:50                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        for i, input := range f.Input {                          parseParameters.go:19                        for i, output := range f.Output {                        parseParameters.go:31                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        Output: make([]types.Type, len(f.Output)),               parseParameters.go:16                        typeTokens := output.Tokens                              parseParameters.go:32                        typ, err := env.TypeFromTokens(typeTokens, f.File)       parseParameters.go:40
                                     ⋮
                                     ⋮
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  f.AddSuffix(suffix.String())                             parseParameters.go:63
                                                                      f.FullName += suffix                                 Function.go:51                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        f.AddSuffix(suffix.String())                             parseParameters.go:63
                                                                      f.name += suffix                                     Function.go:50

     93            .          .           			return 
     94            .          .           		} 
     95            .          .            
     96            .          .           		f = f.Next 
     97            .          .            

internal/runtime/maps.h2

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        30ms       30ms (flat, cum) 0.063%
    186            .          .            
    187            .          .           // Extracts the H2 portion of a hash: the 7 bits not used for h1. 
    188            .          .           // 
    189            .          .           // These are used as an occupied control byte. 
    190            .          .           func h2(h uintptr) uintptr { 
    191         30ms       30ms           	return h & 0x7f 
    192            .          .           } 
    193            .          .            
    194            .          .           // Note: changes here must be reflected in cmd/compile/internal/reflectdata/map_swiss.go:SwissMapType. 
    195            .          .           type Map struct { 
    196            .          .           	// The number of filled slots (i.e. the number of elements in all 

internal/runtime/maps.NewMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        20ms      190ms (flat, cum)   0.4%
    255            .          .           // If m is non-nil, it should be used rather than allocating. 
    256            .          .           // 
    257            .          .           // maxAlloc should be runtime.maxAlloc. 
    258            .          .           // 
    259            .          .           // TODO(prattmic): Put maxAlloc somewhere accessible. 
    260         10ms       10ms           func NewMap(mt *abi.SwissMapType, hint uintptr, m *Map, maxAlloc uintptr) *Map { 
    261         10ms       10ms           	if m == nil { 
    262            .      160ms           		m = new(Map) 
    263            .          .           	} 
    264            .          .            
    265            .       10ms           	m.seed = uintptr(rand()) 
    266            .          .            
    267            .          .           	if hint <= abi.SwissMapGroupSlots { 
    268            .          .           		// A small map can fill all 8 slots, so no need to increase 
    269            .          .           		// target capacity. 
    270            .          .           		// 

internal/runtime/maps.NewMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        20ms      310ms (flat, cum)  0.65%
    289            .          .           	targetCapacity := (hint * abi.SwissMapGroupSlots) / maxAvgGroupLoad 
    290            .          .           	if targetCapacity < hint { // overflow 
    291            .          .           		return m // return an empty map. 
    292            .          .           	} 
    293            .          .            
    294         10ms       10ms           	dirSize := (uint64(targetCapacity) + maxTableCapacity - 1) / maxTableCapacity 
    295         10ms       10ms           	dirSize, overflow := alignUpPow2(dirSize)                                                       v := (uint64(1) << sys.Len64(n-1))                                   group.go:271

    296            .          .           	if overflow || dirSize > uint64(math.MaxUintptr) { 
    297            .          .           		return m // return an empty map. 
    298            .          .           	} 
    299            .          .            
    300            .          .           	// Reject hints that are obviously too large. 
    301            .          .           	groups, overflow := math.MulUintptr(uintptr(dirSize), maxTableCapacity) 
    302            .          .           	if overflow { 
    303            .          .           		return m // return an empty map. 
    304            .          .           	} else { 
    305            .          .           		mem, overflow := math.MulUintptr(groups, mt.GroupSize) 
    306            .          .           		if overflow || mem > maxAlloc { 
    307            .          .           			return m // return an empty map. 
    308            .          .           		} 
    309            .          .           	} 
    310            .          .            
    311            .          .           	m.globalDepth = uint8(sys.TrailingZeros64(dirSize)) 
    312            .          .           	m.globalShift = depthToShift(m.globalDepth) 
    313            .          .            
    314            .       40ms           	directory := make([]*table, dirSize) 
    315            .          .            
    316            .          .           	for i := range directory { 
    317            .          .           		// TODO: Think more about initial table capacity. 
    318            .      250ms           		directory[i] = newTable(mt, uint64(targetCapacity)/dirSize, i, m.globalDepth) 
    319            .          .           	} 
    320            .          .            
    321            .          .           	m.dirPtr = unsafe.Pointer(&directory[0]) 
    322            .          .           	m.dirLen = len(directory) 

internal/runtime/maps.NewEmptyMap

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0      170ms (flat, cum)  0.36%
    324            .          .           	return m 
    325            .          .           } 
    326            .          .            
    327            .          .           func NewEmptyMap() *Map { 
    328            .      160ms           	m := new(Map) 
    329            .       10ms           	m.seed = uintptr(rand()) 
    330            .          .           	// See comment in NewMap. No need to eager allocate a group. 
    331            .          .           	return m 
    332            .          .           } 
    333            .          .            
    334            .          .           func (m *Map) directoryIndex(hash uintptr) uintptr { 

internal/runtime/maps.(*Map).Used

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        20ms       20ms (flat, cum) 0.042%
    389            .          .           	right.index = left.index + entries 
    390            .          .           	m.replaceTable(right) 
    391            .          .           } 
    392            .          .            
    393            .          .           func (m *Map) Used() uint64 { 
    394         20ms       20ms           	return m.used 
    395            .          .           } 
    396            .          .            
    397            .          .           // Get performs a lookup of the key that key points to. It returns a pointer to 
    398            .          .           // the element, or false if the key doesn't exist. 
    399            .          .           func (m *Map) Get(typ *abi.SwissMapType, key unsafe.Pointer) (unsafe.Pointer, bool) { 

internal/runtime/maps.(*Map).getWithKeySmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0       30ms (flat, cum) 0.063%
    452            .          .           		slotKey := g.key(typ, i) 
    453            .          .           		if typ.IndirectKey() { 
    454            .          .           			slotKey = *((*unsafe.Pointer)(slotKey)) 
    455            .          .           		} 
    456            .          .            
    457            .       30ms           		if typ.Key.Equal(key, slotKey) { 
    458            .          .           			slotElem := g.elem(typ, i) 
    459            .          .           			if typ.IndirectElem() { 
    460            .          .           				slotElem = *((*unsafe.Pointer)(slotElem)) 
    461            .          .           			} 
    462            .          .           			return slotKey, slotElem, true 

internal/runtime/maps.(*Map).putSlotSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        60ms       60ms (flat, cum)  0.13%
    527            .          .            
    528            .          .           		return elem 
    529            .          .           	} 
    530            .          .           } 
    531            .          .            
    532         20ms       20ms           func (m *Map) putSlotSmall(typ *abi.SwissMapType, hash uintptr, key unsafe.Pointer) unsafe.Pointer { 
    533            .          .           	g := groupReference{ 
    534            .          .           		data: m.dirPtr, 
    535            .          .           	} 
    536            .          .            
    537         30ms       30ms           	match := g.ctrls().matchH2(h2(hash)) 
    538            .          .            
    539            .          .           	// Look for an existing slot containing this key. 
    540         10ms       10ms           	for match != 0 { 
    541            .          .           		i := match.first() 
    542            .          .            
    543            .          .           		slotKey := g.key(typ, i) 
    544            .          .           		if typ.IndirectKey() { 
    545            .          .           			slotKey = *((*unsafe.Pointer)(slotKey)) 

internal/runtime/maps.(*Map).putSlotSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        30ms       30ms (flat, cum) 0.063%
    560            .          .           	} 
    561            .          .            
    562            .          .           	// There can't be deleted slots, small maps can't have them 
    563            .          .           	// (see deleteSmall). Use matchEmptyOrDeleted as it is a bit 
    564            .          .           	// more efficient than matchEmpty. 
    565         20ms       20ms           	match = g.ctrls().matchEmptyOrDeleted()                                                       return (*ctrlGroup)(g.data)                                          group.go:280
    566            .          .           	if match == 0 { 
    567            .          .           		fatal("small map with no empty slot (concurrent map writes?)") 
    568            .          .           		return nil 
    569            .          .           	} 
    570            .          .            
    571            .          .           	i := match.first() 
    572            .          .            
    573            .          .           	slotKey := g.key(typ, i) 
    574         10ms       10ms           	if typ.IndirectKey() { 
    575            .          .           		kmem := newobject(typ.Key) 
    576            .          .           		*(*unsafe.Pointer)(slotKey) = kmem 
    577            .          .           		slotKey = kmem 
    578            .          .           	} 
    579            .          .           	typedmemmove(typ.Key, slotKey, key) 

internal/runtime/maps.(*Map).growToSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        50ms      820ms (flat, cum)  1.72%
    590            .          .            
    591            .          .           	return slotElem 
    592            .          .           } 
    593            .          .            
    594            .          .           func (m *Map) growToSmall(typ *abi.SwissMapType) { 
    595            .      770ms           	grp := newGroups(typ, 1)                                                       data:       newarray(typ.Group, int(length)),                        group.go:316

    596         10ms       10ms           	m.dirPtr = grp.data 
    597            .          .            
    598            .          .           	g := groupReference{ 
    599            .          .           		data: m.dirPtr, 
    600            .          .           	} 
    601         20ms       20ms           	g.ctrls().setEmpty()                                                       *g = ctrlGroup(bitsetEmpty)                                          group.go:148
    602         20ms       20ms           } 
    603            .          .            

internal/runtime/maps.(*Map).growToTable

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0       30ms (flat, cum) 0.063%
    604            .          .           func (m *Map) growToTable(typ *abi.SwissMapType) { 
    605            .       30ms           	tab := newTable(typ, 2*abi.SwissMapGroupSlots, 0, 0) 
    606            .          .            
    607            .          .           	g := groupReference{ 
    608            .          .           		data: m.dirPtr, 
    609            .          .           	} 
    610            .          .            

internal/runtime/maps.(*Map).growToTable

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms       40ms (flat, cum) 0.084%
    622            .          .           		elem := g.elem(typ, i) 
    623            .          .           		if typ.IndirectElem() { 
    624            .          .           			elem = *((*unsafe.Pointer)(elem)) 
    625            .          .           		} 
    626            .          .            
    627         10ms       10ms           		hash := typ.Hasher(key, m.seed) 
    628            .          .            
    629            .       20ms           		tab.uncheckedPutSlot(typ, hash, key, elem) 
    630            .          .           	} 
    631            .          .            
    632            .       10ms           	directory := make([]*table, 1) 
    633            .          .            
    634            .          .           	directory[0] = tab 
    635            .          .            
    636            .          .           	m.dirPtr = unsafe.Pointer(&directory[0]) 
    637            .          .           	m.dirLen = len(directory) 

internal/runtime/maps.(*Map).Delete

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:        10ms       30ms (flat, cum) 0.063%
    654            .          .            
    655            .          .           	hash := typ.Hasher(key, m.seed) 
    656            .          .            
    657            .          .           	// Set writing after calling Hasher, since Hasher may panic, in which 
    658            .          .           	// case we have not actually done a write. 
    659         10ms       10ms           	m.writing ^= 1 // toggle, see comment on writing 
    660            .          .            
    661            .          .           	if m.dirLen == 0 { 
    662            .       10ms           		m.deleteSmall(typ, hash, key) 
    663            .          .           	} else { 
    664            .          .           		idx := m.directoryIndex(hash) 
    665            .       10ms           		if m.directoryAt(idx).Delete(typ, m, hash, key) { 
    666            .          .           			m.tombstonePossible = true 
    667            .          .           		} 
    668            .          .           	} 
    669            .          .            
    670            .          .           	if m.used == 0 { 

internal/runtime/maps.(*Map).deleteSmall

/usr/lib/go/src/internal/runtime/maps/map.go

  Total:           0       10ms (flat, cum) 0.021%
    700            .          .           			if typ.IndirectKey() { 
    701            .          .           				// Clearing the pointer is sufficient. 
    702            .          .           				*(*unsafe.Pointer)(origSlotKey) = nil 
    703            .          .           			} else if typ.Key.Pointers() { 
    704            .          .           				// Only bother clearing if there are pointers. 
    705            .       10ms           				typedmemclr(typ.Key, slotKey) 
    706            .          .           			} 
    707            .          .            
    708            .          .           			slotElem := g.elem(typ, i) 
    709            .          .           			if typ.IndirectElem() { 
    710            .          .           				// Clearing the pointer is sufficient. 

runtime.rand

/usr/lib/go/src/runtime/rand.go

  Total:        50ms       80ms (flat, cum)  0.17%
    161            .          .           	// Note: We avoid acquirem here so that in the fast path 
    162            .          .           	// there is just a getg, an inlined c.Next, and a return. 
    163            .          .           	// The performance difference on a 16-core AMD is 
    164            .          .           	// 3.7ns/call this way versus 4.3ns/call with acquirem (+16%). 
    165            .          .           	mp := getg().m 
    166         20ms       20ms           	c := &mp.chacha8 
    167            .          .           	for { 
    168            .          .           		// Note: c.Next is marked nosplit, 
    169            .          .           		// so we don't need to use mp.locks 
    170            .          .           		// on the fast path, which is that the 
    171            .          .           		// first attempt succeeds. 
    172         20ms       20ms           		x, ok := c.Next()                     s.i = i + 1                                                  chacha8.go:58

    173            .          .           		if ok { 
    174         10ms       10ms           			return x 
    175            .          .           		} 
    176            .          .           		mp.locks++ // hold m even though c.Refill may do stack split checks 
    177            .       30ms           		c.Refill() 
    178            .          .           		mp.locks-- 
    179            .          .           	} 

internal/runtime/maps.rand

/usr/lib/go/src/runtime/rand.go

  Total:        40ms      110ms (flat, cum)  0.23%
    181            .          .            
    182            .          .           //go:linkname maps_rand internal/runtime/maps.rand 
    183         30ms       30ms           func maps_rand() uint64 { 
    184         10ms       80ms           	return rand() 
    185            .          .           } 
    186            .          .            
    187            .          .           // mrandinit initializes the random state of an m. 
    188            .          .           func mrandinit(mp *m) { 
    189            .          .           	var seed [4]uint64 

runtime.cheaprand

/usr/lib/go/src/runtime/rand.go

  Total:       140ms      140ms (flat, cum)  0.29%
    223            .          .           // See go.dev/issue/67401. 
    224            .          .           // 
    225            .          .           //go:linkname cheaprand 
    226            .          .           //go:nosplit 
    227            .          .           func cheaprand() uint32 { 
    228         10ms       10ms           	mp := getg().m 
    229            .          .           	// Implement wyrand: https://github.com/wangyi-fudan/wyhash 
    230            .          .           	// Only the platform that math.Mul64 can be lowered 
    231            .          .           	// by the compiler should be in this list. 
    232            .          .           	if goarch.IsAmd64|goarch.IsArm64|goarch.IsPpc64| 
    233            .          .           		goarch.IsPpc64le|goarch.IsMips64|goarch.IsMips64le| 
    234            .          .           		goarch.IsS390x|goarch.IsRiscv64|goarch.IsLoong64 == 1 { 
    235         60ms       60ms           		mp.cheaprand += 0xa0761d6478bd642f 
    236         30ms       30ms           		hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db) 
    237         40ms       40ms           		return uint32(hi ^ lo) 
    238            .          .           	} 
    239            .          .            
    240            .          .           	// Implement xorshift64+: 2 32-bit xorshift sequences added together. 
    241            .          .           	// Shift triplet [17,7,16] was calculated as indicated in Marsaglia's 
    242            .          .           	// Xorshift paper: https://www.jstatsoft.org/article/view/v008i14/xorshift.pdf 

runtime.cheaprandn

/usr/lib/go/src/runtime/rand.go

  Total:       110ms      110ms (flat, cum)  0.23%
    288            .          .           // 
    289            .          .           //go:linkname cheaprandn 
    290            .          .           //go:nosplit 
    291            .          .           func cheaprandn(n uint32) uint32 { 
    292            .          .           	// See https://lemire.me/blog/2016/06/27/a-fast-alternative-to-the-modulo-reduction/ 
    293        110ms      110ms           	return uint32((uint64(cheaprand()) * uint64(n)) >> 32)                                                       hi, lo := math.Mul64(mp.cheaprand, mp.cheaprand^0xe7037ed1a0b428db)  rand.go:236
                                     ⋮
                                     ⋮
                                                      mp.cheaprand += 0xa0761d6478bd642f                                   rand.go:235            return uint32(hi ^ lo)                                               rand.go:237
    294            .          .           } 
    295            .          .            
    296            .          .           // Too much legacy code has go:linkname references 
    297            .          .           // to runtime.fastrand and friends, so keep these around for now. 
    298            .          .           // Code should migrate to math/rand/v2.Uint64, 

runtime.makechan

/usr/lib/go/src/runtime/chan.go

  Total:        20ms       90ms (flat, cum)  0.19%
     74            .          .            
     75            .          .           func makechan(t *chantype, size int) *hchan { 
     76            .          .           	elem := t.Elem 
     77            .          .            
     78            .          .           	// compiler checks this but be safe. 
     79         20ms       20ms           	if elem.Size_ >= 1<<16 { 
     80            .          .           		throw("makechan: invalid channel element type") 
     81            .          .           	} 
     82            .          .           	if hchanSize%maxAlign != 0 || elem.Align_ > maxAlign { 
     83            .          .           		throw("makechan: bad alignment") 
     84            .          .           	} 
     85            .          .            
     86            .          .           	mem, overflow := math.MulUintptr(elem.Size_, uintptr(size)) 
     87            .          .           	if overflow || mem > maxAlloc-hchanSize || size < 0 { 
     88            .          .           		panic(plainError("makechan: size out of range")) 
     89            .          .           	} 
     90            .          .            
     91            .          .           	// Hchan does not contain pointers interesting for GC when elements stored in buf do not contain pointers. 
     92            .          .           	// buf points into the same allocation, elemtype is persistent. 
     93            .          .           	// SudoG's are referenced from their owning thread so they can't be collected. 
     94            .          .           	// TODO(dvyukov,rlh): Rethink when collector can move allocated objects. 
     95            .          .           	var c *hchan 
     96            .          .           	switch { 
     97            .          .           	case mem == 0: 
     98            .          .           		// Queue or element size is zero. 
     99            .       10ms           		c = (*hchan)(mallocgc(hchanSize, nil, true)) 
    100            .          .           		// Race detector uses this location for synchronization. 
    101            .          .           		c.buf = c.raceaddr() 
    102            .          .           	case !elem.Pointers(): 
    103            .          .           		// Elements do not contain pointers. 
    104            .          .           		// Allocate hchan and buf in one call. 
    105            .          .           		c = (*hchan)(mallocgc(hchanSize+mem, nil, true)) 
    106            .          .           		c.buf = add(unsafe.Pointer(c), hchanSize) 
    107            .          .           	default: 
    108            .          .           		// Elements contain pointers. 
    109            .       40ms           		c = new(hchan) 
    110            .       20ms           		c.buf = mallocgc(mem, elem, true) 
    111            .          .           	} 
    112            .          .            
    113            .          .           	c.elemsize = uint16(elem.Size_) 
    114            .          .           	c.elemtype = elem 
    115            .          .           	c.dataqsiz = uint(size) 

runtime.chanbuf

/usr/lib/go/src/runtime/chan.go

  Total:        10ms       10ms (flat, cum) 0.021%
    134            .          .           // Do not remove or change the type signature. 
    135            .          .           // See go.dev/issue/67401. 
    136            .          .           // 
    137            .          .           //go:linkname chanbuf 
    138            .          .           func chanbuf(c *hchan, i uint) unsafe.Pointer { 
    139         10ms       10ms           	return add(c.buf, uintptr(i)*uintptr(c.elemsize)) 
    140            .          .           } 
    141            .          .            
    142            .          .           // full reports whether a send on c would block (that is, the channel is full). 
    143            .          .           // It uses a single word-sized read of mutable state, so although 
    144            .          .           // the answer is instantaneously true, the correct answer may have changed 

runtime.chansend1

/usr/lib/go/src/runtime/chan.go

  Total:           0      850ms (flat, cum)  1.78%
    156            .          .            
    157            .          .           // entry point for c <- x from compiled code. 
    158            .          .           // 
    159            .          .           //go:nosplit 
    160            .          .           func chansend1(c *hchan, elem unsafe.Pointer) { 
    161            .      850ms           	chansend(c, elem, true, sys.GetCallerPC()) 
    162            .          .           } 
    163            .          .            
    164            .          .           /* 
    165            .          .            * generic single channel send/recv 
    166            .          .            * If block is not nil, 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:        50ms       50ms (flat, cum)   0.1%
    188            .          .            
    189            .          .           	if raceenabled { 
    190            .          .           		racereadpc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(chansend)) 
    191            .          .           	} 
    192            .          .            
    193         50ms       50ms           	if c.bubble != nil && getg().bubble != c.bubble { 
    194            .          .           		fatal("send on synctest channel from outside bubble") 
    195            .          .           	} 
    196            .          .            
    197            .          .           	// Fast path: check for failed non-blocking operation without acquiring the lock. 
    198            .          .           	// 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:       100ms      450ms (flat, cum)  0.94%
    217            .          .           	var t0 int64 
    218            .          .           	if blockprofilerate > 0 { 
    219            .          .           		t0 = cputicks() 
    220            .          .           	} 
    221            .          .            
    222         10ms      360ms           	lock(&c.lock)             lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24

    223            .          .            
    224         40ms       40ms           	if c.closed != 0 { 
    225            .          .           		unlock(&c.lock) 
    226            .          .           		panic(plainError("send on closed channel")) 
    227            .          .           	} 
    228            .          .            
    229         50ms       50ms           	if sg := c.recvq.dequeue(); sg != nil {                                                       if !sgp.g.selectDone.CompareAndSwap(0, 1) {                          chan.go:911
                                                          return Cas(&u.value, old, new)                                   types.go:236            if y == nil {                                                        chan.go:893            if sgp == nil {                                                      chan.go:889

    230            .          .           		// Found a waiting receiver. We pass the value we want to send 

runtime.chansend.func1

/usr/lib/go/src/runtime/chan.go

  Total:           0      290ms (flat, cum)  0.61%
    231            .          .           		// directly to the receiver, bypassing the channel buffer (if any). 
    232            .      290ms           		send(c, sg, ep, func() { unlock(&c.lock) }, 3)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35
    233            .          .           		return true 
    234            .          .           	} 
    235            .          .            
    236            .          .           	if c.qcount < c.dataqsiz { 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:           0       70ms (flat, cum)  0.15%
    238            .          .           		qp := chanbuf(c, c.sendx) 
    239            .          .           		if raceenabled { 
    240            .          .           			racenotify(c, c.sendx, nil) 
    241            .          .           		} 
    242            .       10ms           		typedmemmove(c.elemtype, qp, ep) 
    243            .          .           		c.sendx++ 
    244            .          .           		if c.sendx == c.dataqsiz { 
    245            .          .           			c.sendx = 0 
    246            .          .           		} 
    247            .          .           		c.qcount++ 
    248            .       60ms           		unlock(&c.lock)                                                               unlockWithRank(l)                                            lock_spinbit.go:261
                                                                  unlock2(l)                                               lockrank_off.go:35

    249            .          .           		return true 
    250            .          .           	} 
    251            .          .            
    252            .          .           	if !block { 
    253            .          .           		unlock(&c.lock) 

runtime.chansend

/usr/lib/go/src/runtime/chan.go

  Total:        10ms       20ms (flat, cum) 0.042%
    278            .          .           	gp.parkingOnChan.Store(true) 
    279            .          .           	reason := waitReasonChanSend 
    280            .          .           	if c.bubble != nil { 
    281            .          .           		reason = waitReasonSynctestChanSend 
    282            .          .           	} 
    283            .       10ms           	gopark(chanparkcommit, unsafe.Pointer(&c.lock), reason, traceBlockChanSend, 2) 
    284            .          .           	// Ensure the value being sent is kept alive until the 
    285            .          .           	// receiver copies it out. The sudog has a pointer to the 
    286            .          .           	// stack object, but sudogs aren't considered as roots of the 
    287            .          .           	// stack tracer. 
    288            .          .           	KeepAlive(ep) 
    289            .          .            
    290            .          .           	// someone woke us up. 
    291            .          .           	if mysg != gp.waiting { 
    292            .          .           		throw("G waiting list is corrupted") 
    293            .          .           	} 
    294            .          .           	gp.waiting = nil 
    295            .          .           	gp.activeStackChans = false 
    296            .          .           	closed := !mysg.success 
    297         10ms       10ms           	gp.param = nil 
    298            .          .           	if mysg.releasetime > 0 { 
    299            .          .           		blockevent(mysg.releasetime-t0, 2) 
    300            .          .           	} 
    301            .          .           	mysg.c = nil 
    302            .          .           	releaseSudog(mysg) 

runtime.send

/usr/lib/go/src/runtime/chan.go

  Total:           0      260ms (flat, cum)  0.55%
    339            .          .           	if sg.elem != nil { 
    340            .          .           		sendDirect(c.elemtype, sg, ep) 
    341            .          .           		sg.elem = nil 
    342            .          .           	} 
    343            .          .           	gp := sg.g 
    344            .       30ms           	unlockf() 
    345            .          .           	gp.param = unsafe.Pointer(sg) 
    346            .          .           	sg.success = true 
    347            .          .           	if sg.releasetime != 0 { 
    348            .          .           		sg.releasetime = cputicks() 
    349            .          .           	} 
    350            .      230ms           	goready(gp, skip+1)                                                       systemstack(func() {                                                 proc.go:480

    351            .          .           } 
    352            .          .            
    353            .          .           // timerchandrain removes all elements in channel c's buffer. 
    354            .          .           // It reports whether any elements were removed. 
    355            .          .           // Because it is only intended for timers, it does not 

runtime.closechan

/usr/lib/go/src/runtime/chan.go

  Total:        30ms       80ms (flat, cum)  0.17%
    413            .          .            
    414            .          .           func closechan(c *hchan) { 
    415            .          .           	if c == nil { 
    416            .          .           		panic(plainError("close of nil channel")) 
    417            .          .           	} 
    418         10ms       10ms           	if c.bubble != nil && getg().bubble != c.bubble { 
    419            .          .           		fatal("close of synctest channel from outside bubble") 
    420            .          .           	} 
    421            .          .            
    422         10ms       60ms           	lock(&c.lock)                                                       lockWithRank(l, getLockRank(l))                                      lock_spinbit.go:152
                                                          lock2(l)                                                         lockrank_off.go:24
    423            .          .           	if c.closed != 0 { 
    424            .          .           		unlock(&c.lock) 
    425            .          .           		panic(plainError("close of closed channel")) 
    426            .          .           	} 
    427            .          .            
    428            .          .           	if raceenabled { 
    429            .          .           		callerpc := sys.GetCallerPC() 
    430            .          .           		racewritepc(c.raceaddr(), callerpc, abi.FuncPCABIInternal(closechan)) 
    431            .          .           		racerelease(c.raceaddr()) 
    432            .          .           	} 
    433            .          .            
    434            .          .           	c.closed = 1 
    435            .          .            
    436            .          .           	var glist gList 
    437            .          .            
    438            .          .           	// release all readers 
    439            .          .           	for { 
    440         10ms       10ms           		sg := c.recvq.dequeue()                                                               if y == nil {                                                chan.go:893

    441            .          .           		if sg == nil { 
    442            .          .           			break 
    443            .          .           		} 
    444            .          .           		if sg.elem != nil { 
    445            .          .           			typedmemclr(c.elemtype, sg.elem) 

runtime.closechan

/usr/lib/go/src/runtime/chan.go

  Total:           0       20ms (flat, cum) 0.042%
    473            .          .           		if raceenabled { 
    474            .          .           			raceacquireg(gp, c.raceaddr()) 
    475            .          .           		} 
    476            .          .           		glist.push(gp) 
    477            .          .           	} 
    478            .       20ms           	unlock(&c.lock)                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

    479            .          .            
    480            .          .           	// Ready all Gs now that we've dropped the channel lock. 
    481            .          .           	for !glist.empty() { 
    482            .          .           		gp := glist.pop() 
    483            .          .           		gp.schedlink = 0 

runtime.recv

/usr/lib/go/src/runtime/chan.go

  Total:           0       20ms (flat, cum) 0.042%
    734            .          .           		} 
    735            .          .           		c.sendx = c.recvx // c.sendx = (c.sendx+1) % c.dataqsiz 
    736            .          .           	} 
    737            .          .           	sg.elem = nil 
    738            .          .           	gp := sg.g 
    739            .       10ms           	unlockf() 
    740            .          .           	gp.param = unsafe.Pointer(sg) 
    741            .          .           	sg.success = true 
    742            .          .           	if sg.releasetime != 0 { 
    743            .          .           		sg.releasetime = cputicks() 
    744            .          .           	} 
    745            .       10ms           	goready(gp, skip+1)                                                       systemstack(func() {                                                 proc.go:480

    746            .          .           } 
    747            .          .            
    748            .          .           func chanparkcommit(gp *g, chanLock unsafe.Pointer) bool { 
    749            .          .           	// There are unlocked sudogs that point into gp's stack. Stack 
    750            .          .           	// copying must lock the channels of those sudogs. 

runtime.chanparkcommit

/usr/lib/go/src/runtime/chan.go

  Total:           0       10ms (flat, cum) 0.021%
    759            .          .           	// Make sure we unlock after setting activeStackChans and 
    760            .          .           	// unsetting parkingOnChan. The moment we unlock chanLock 
    761            .          .           	// we risk gp getting readied by a channel operation and 
    762            .          .           	// so gp could continue running before everything before 
    763            .          .           	// the unlock is visible (even to gp itself). 
    764            .       10ms           	unlock((*mutex)(chanLock))                                                       unlockWithRank(l)                                                    lock_spinbit.go:261
                                                          unlock2(l)                                                       lockrank_off.go:35

    765            .          .           	return true 
    766            .          .           } 
    767            .          .            
    768            .          .           // compiler implements 
    769            .          .           // 

runtime.(*waitq).enqueue

/usr/lib/go/src/runtime/chan.go

  Total:        10ms       10ms (flat, cum) 0.021%
    871            .          .            
    872            .          .           func (q *waitq) enqueue(sgp *sudog) { 
    873            .          .           	sgp.next = nil 
    874            .          .           	x := q.last 
    875            .          .           	if x == nil { 
    876         10ms       10ms           		sgp.prev = nil 
    877            .          .           		q.first = sgp 
    878            .          .           		q.last = sgp 
    879            .          .           		return 
    880            .          .           	} 
    881            .          .           	sgp.prev = x 

runtime.(*waitq).dequeue

/usr/lib/go/src/runtime/chan.go

  Total:        70ms       70ms (flat, cum)  0.15%
    882            .          .           	x.next = sgp 
    883            .          .           	q.last = sgp 
    884            .          .           } 
    885            .          .            
    886            .          .           func (q *waitq) dequeue() *sudog { 
    887         10ms       10ms           	for { 
    888            .          .           		sgp := q.first 
    889         10ms       10ms           		if sgp == nil { 
    890            .          .           			return nil 
    891            .          .           		} 
    892            .          .           		y := sgp.next 
    893         30ms       30ms           		if y == nil { 
    894            .          .           			q.first = nil 
    895            .          .           			q.last = nil 
    896            .          .           		} else { 
    897            .          .           			y.prev = nil 
    898            .          .           			q.first = y 
    899            .          .           			sgp.next = nil // mark as removed (see dequeueSudoG) 
    900            .          .           		} 
    901            .          .            
    902            .          .           		// if a goroutine was put on this queue because of a 
    903            .          .           		// select, there is a small window between the goroutine 
    904            .          .           		// being woken up by a different case and it grabbing the 
    905            .          .           		// channel locks. Once it has the lock 
    906            .          .           		// it removes itself from the queue, so we won't see it after that. 
    907            .          .           		// We use a flag in the G struct to tell us when someone 
    908            .          .           		// else has won the race to signal this goroutine but the goroutine 
    909            .          .           		// hasn't removed itself from the queue yet. 
    910            .          .           		if sgp.isSelect { 
    911         20ms       20ms           			if !sgp.g.selectDone.CompareAndSwap(0, 1) {                                                                       return Cas(&u.value, old, new)                       types.go:236

    912            .          .           				// We lost the race to wake this goroutine. 
    913            .          .           				continue 
    914            .          .           			} 
    915            .          .           		} 
    916            .          .            

git.urbach.dev/cli/q/src/codegen.(*Function).hintABI

/home/user/q/src/codegen/hintABI.go

  Total:       140ms      190ms (flat, cum)   0.4%
      4            .          .           	"git.urbach.dev/cli/q/src/ssa" 
      5            .          .           ) 
      6            .          .            
      7            .          .           // hintABI recommends ABI registers that a value must reside in later on. 
      8            .          .           // These register hints have the highest priority. 
      9         20ms       20ms           func (f *Function) hintABI(step *Step) { 
     10         50ms       50ms           	switch instr := step.Value.(type) { 
     11         10ms       10ms           	case *ssa.Call: 
     12         10ms       10ms           		for paramIndex, param := range instr.Arguments { 
     13         50ms      100ms           			f.ValueToStep[param].hint(f.CPU.Call.In[paramIndex])                             if len(s.Hints) == 0 {                               Step.go:22
                                     ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                                      s.Hints = append(s.Hints, reg)                       Step.go:26

     14            .          .           		} 
     15            .          .           	case *ssa.CallExtern: 
     16            .          .           		for r, param := range instr.Arguments { 
     17            .          .           			if r >= len(f.CPU.ExternCall.In) { 
     18            .          .           				// Temporary hack to allow arguments 5 and 6 to be hinted as r10 and r11, then pushed later. 

git.urbach.dev/cli/q/src/codegen.(*Function).hintABI

/home/user/q/src/codegen/hintABI.go

  Total:        90ms      200ms (flat, cum)  0.42%
     29            .          .           		if step.Register == -1 { 
     30            .          .           			step.Register = f.CPU.Call.Out[instr.Index] 
     31            .          .           		} 
     32            .          .           	case *ssa.Parameter: 
     33            .          .           		if step.Register == -1 { 
     34         10ms       10ms           			step.Register = f.CPU.Call.In[instr.Index] 
     35            .          .           		} 
     36            .          .            
     37            .       10ms           		for _, user := range step.Value.Users() { 
     38         20ms       20ms           			switch user := user.(type) { 
     39            .          .           			case *ssa.BinaryOp: 
     40            .          .           				if !user.Op.IsComparison() && user.Left == step.Value { 
     41            .          .           					f.ValueToStep[user].hint(step.Register) 
     42            .          .           				} 
     43            .          .           			case *ssa.Phi: 
     44            .          .           				f.ValueToStep[user].hint(step.Register) 
     45         10ms       10ms           			case *ssa.UnaryOp: 
     46            .          .           				f.ValueToStep[user].hint(step.Register) 
     47            .          .           			} 
     48            .          .           		} 
     49            .          .           	case *ssa.Register: 
     50            .          .           		if step.Register == -1 { 
     51            .          .           			step.Register = instr.Register 
     52            .          .           		} 
     53         10ms       10ms           	case *ssa.Return: 
     54            .          .           		for r, param := range instr.Arguments { 
     55         20ms       80ms           			f.ValueToStep[param].hint(f.CPU.Call.Out[r])                             if len(s.Hints) == 0 {                               Step.go:22                            s.Hints = append(s.Hints, reg)                       Step.go:26
     56            .          .           		} 
     57            .          .           	case *ssa.Syscall: 
     58         10ms       10ms           		for r, param := range instr.Arguments { 
     59         10ms       50ms           			f.ValueToStep[param].hint(f.CPU.Syscall.In[r])                                      ⋮
                                     ⋮
                                                                      s.Hints = append(s.Hints, reg)                       Step.go:26

     60            .          .           		} 
     61            .          .           	} 
     62            .          .           } 

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive

/home/user/q/src/codegen/markAlive.go

  Total:       210ms      640ms (flat, cum)  1.34%
      6            .          .           	"git.urbach.dev/cli/q/src/ssa" 
      7            .          .           ) 
      8            .          .            
      9            .          .           // markAlive marks the `live` value in the `block` as alive and recursively 
     10            .          .           // proceeds in the predecessors of `block` if they can reach the definition. 
     11            .      300ms           func (f *Function) markAlive(live *Step, block *ssa.Block, use *Step, first bool) { 
     12            .          .           	if use.Block == block { 
     13         10ms       10ms           		phi, isPhi := use.Value.(*ssa.Phi) 
     14            .          .            
     15            .          .           		if isPhi { 
     16            .          .           			index := phi.Arguments.Index(live.Value) 
     17            .          .           			pre := block.Predecessors[index] 
     18            .       70ms           			f.markAlive(live, pre, use, false) 
     19            .          .           			return 
     20            .          .           		} 
     21            .          .           	} 
     22            .          .            
     23            .       40ms           	region := f.BlockToRegion[block] 
     24            .          .            
     25            .          .           	if first && use.Block == block && (block.Loop == nil || live.Block.Loop != nil) { 
     26            .          .           		region.End = uint32(use.Index) 
     27            .          .           	} 
     28            .          .            
     29         20ms       20ms           	steps := f.Steps[region.Start:region.End] 
     30            .          .            
     31        180ms      200ms           	for _, current := range slices.Backward(steps) {                                                       for i := len(s) - 1; i >= 0; i-- {                                   iter.go:28            if !yield(i, s[i]) {                                                 iter.go:29
                                                          if slices.Contains(current.Live, live) {                         markAlive.go:32
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97                current.Live = append(current.Live, live)                        markAlive.go:36
                                     ⋮
                                     ⋮
                                     ⋮
                                                      if !yield(i, s[i]) {                                                 iter.go:29
                                                          for _, current := range slices.Backward(steps) {                 markAlive.go:31                if live.Value == current.Value {                                 markAlive.go:38                if slices.Contains(current.Live, live) {                         markAlive.go:32
                                                              return Index(s, v) >= 0                                      slices.go:118
                                                                  if v == s[i] {                                           slices.go:98
                                     ⋮
                                     ⋮
                                                          if live.Value == current.Value {                                 markAlive.go:38

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive-range1

/home/user/q/src/codegen/markAlive.go

  Total:        80ms      100ms (flat, cum)  0.21%
     32         40ms       40ms           		if slices.Contains(current.Live, live) {                                                               return Index(s, v) >= 0                                      slices.go:118
                                                                  for i := range s {                                       slices.go:97                        if v == s[i] {                                           slices.go:98
     33            .          .           			return 
     34            .          .           		} 
     35            .          .            
     36         20ms       20ms           		current.Live = append(current.Live, live) 
     37            .          .            
     38         20ms       40ms           		if live.Value == current.Value { 
     39            .          .           			_, isParam := current.Value.(*ssa.Parameter) 
     40            .          .           			_, isPhi := current.Value.(*ssa.Phi) 
     41            .          .            

git.urbach.dev/cli/q/src/codegen.(*Function).markAlive

/home/user/q/src/codegen/markAlive.go

  Total:        50ms      370ms (flat, cum)  0.78%
     43            .          .           				return 
     44            .          .           			} 
     45            .          .           		} 
     46         10ms       10ms           	} 
     47            .          .            
     48         10ms       10ms           	for _, pre := range block.Predecessors { 
     49         10ms       10ms           		if pre == block { 
     50            .          .           			continue 
     51            .          .           		} 
     52            .          .            
     53         10ms       40ms           		if !pre.CanReachPredecessor(live.Block) {                                                               return b.canReachPredecessor(other, make(map[*Block]bool))   Block.go:155
                                     ⋮
                                     ⋮

     54            .          .           			continue 
     55            .          .           		} 
     56            .          .            
     57            .      290ms           		f.markAlive(live, pre, use, false) 
     58            .          .           	} 
     59         10ms       10ms           } 

internal/filepathlite.(*lazybuf).append

/usr/lib/go/src/internal/filepathlite/path.go

  Total:        60ms       60ms (flat, cum)  0.13%
     36            .          .           	return b.path[i] 
     37            .          .           } 
     38            .          .            
     39            .          .           func (b *lazybuf) append(c byte) { 
     40            .          .           	if b.buf == nil { 
     41         40ms       40ms           		if b.w < len(b.path) && b.path[b.w] == c { 
     42         20ms       20ms           			b.w++ 
     43            .          .           			return 
     44            .          .           		} 
     45            .          .           		b.buf = make([]byte, len(b.path)) 
     46            .          .           		copy(b.buf, b.path[:b.w]) 
     47            .          .           	} 

internal/filepathlite.(*lazybuf).string

/usr/lib/go/src/internal/filepathlite/path.go

  Total:        10ms       10ms (flat, cum) 0.021%
     54            .          .           	b.w += len(prefix) 
     55            .          .           } 
     56            .          .            
     57            .          .           func (b *lazybuf) string() string { 
     58            .          .           	if b.buf == nil { 
     59         10ms       10ms           		return b.volAndPath[:b.volLen+b.w] 
     60            .          .           	} 
     61            .          .           	return b.volAndPath[:b.volLen] + string(b.buf[:b.w]) 

internal/filepathlite.Clean

/usr/lib/go/src/internal/filepathlite/path.go

  Total:        70ms       70ms (flat, cum)  0.15%
     63            .          .            
     64            .          .           // Clean is filepath.Clean. 
     65         10ms       10ms           func Clean(path string) string { 
     66            .          .           	originalPath := path 
     67            .          .           	volLen := volumeNameLen(path) 
     68            .          .           	path = path[volLen:] 
     69            .          .           	if path == "" { 
     70            .          .           		if volLen > 1 && IsPathSeparator(originalPath[0]) && IsPathSeparator(originalPath[1]) { 
     71            .          .           			// should be UNC 
     72            .          .           			return FromSlash(originalPath) 
     73            .          .           		} 
     74            .          .           		return originalPath + "." 
     75            .          .           	} 
     76         20ms       20ms           	rooted := IsPathSeparator(path[0])                                                       return Separator == c                                                path_unix.go:20

     77            .          .            
     78            .          .           	// Invariants: 
     79            .          .           	//	reading from path; r is index of next byte to process. 
     80            .          .           	//	writing to buf; w is index of next byte to write. 
     81            .          .           	//	dotdot is index in buf where .. must stop, either because 
     82            .          .           	//		it is the leading slash or it is a leading ../../.. prefix. 
     83            .          .           	n := len(path) 
     84         10ms       10ms           	out := lazybuf{path: path, volAndPath: originalPath, volLen: volLen} 
     85            .          .           	r, dotdot := 0, 0 
     86            .          .           	if rooted { 
     87            .          .           		out.append(Separator) 
     88            .          .           		r, dotdot = 1, 1 
     89            .          .           	} 
     90            .          .            
     91         10ms       10ms           	for r < n { 
     92            .          .           		switch { 
     93         20ms       20ms           		case IsPathSeparator(path[r]): 
     94            .          .           			// empty path element 
     95            .          .           			r++ 
     96            .          .           		case path[r] == '.' && (r+1 == n || IsPathSeparator(path[r+1])): 
     97            .          .           			// . element 
     98            .          .           			r++ 

internal/filepathlite.Clean

/usr/lib/go/src/internal/filepathlite/path.go

  Total:       120ms      120ms (flat, cum)  0.25%
    117            .          .           			} 
    118            .          .           		default: 
    119            .          .           			// real path element. 
    120            .          .           			// add slash if needed 
    121            .          .           			if rooted && out.w != 1 || !rooted && out.w != 0 { 
    122         20ms       20ms           				out.append(Separator)                                                                               b.w++                                        path.go:42                                    if b.w < len(b.path) && b.path[b.w] == c {   path.go:41

    123            .          .           			} 
    124            .          .           			// copy element 
    125         40ms       40ms           			for ; r < n && !IsPathSeparator(path[r]); r++ {                                      ⋮
                                     ⋮
                                                                      return Separator == c                                path_unix.go:20

    126         40ms       40ms           				out.append(path[r])                                                                               b.w++                                        path.go:42                                    if b.w < len(b.path) && b.path[b.w] == c {   path.go:41
                                     ⋮
                                     ⋮

    127            .          .           			} 
    128            .          .           		} 
    129            .          .           	} 
    130            .          .            
    131            .          .           	// Turn empty string into "." 
    132         10ms       10ms           	if out.w == 0 { 
    133            .          .           		out.append('.') 
    134            .          .           	} 
    135            .          .            
    136            .          .           	postClean(&out) // avoid creating absolute paths on Windows 
    137         10ms       10ms           	return FromSlash(out.string())                                                       return b.volAndPath[:b.volLen+b.w]                                   path.go:59

    138            .          .           } 
    139            .          .            
    140            .          .           // IsLocal is filepath.IsLocal. 
    141            .          .           func IsLocal(path string) bool { 
    142            .          .           	return isLocal(path) 

internal/filepathlite.Base

/usr/lib/go/src/internal/filepathlite/path.go

  Total:        30ms       30ms (flat, cum) 0.063%
    220            .          .           	} 
    221            .          .           	return "" 
    222            .          .           } 
    223            .          .            
    224            .          .           // Base is filepath.Base. 
    225         10ms       10ms           func Base(path string) string { 
    226            .          .           	if path == "" { 
    227            .          .           		return "." 
    228            .          .           	} 
    229            .          .           	// Strip trailing slashes. 
    230            .          .           	for len(path) > 0 && IsPathSeparator(path[len(path)-1]) { 
    231            .          .           		path = path[0 : len(path)-1] 
    232            .          .           	} 
    233            .          .           	// Throw away volume name 
    234            .          .           	path = path[len(VolumeName(path)):] 
    235            .          .           	// Find the last element 
    236            .          .           	i := len(path) - 1 
    237            .          .           	for i >= 0 && !IsPathSeparator(path[i]) { 
    238         10ms       10ms           		i-- 
    239            .          .           	} 
    240            .          .           	if i >= 0 { 
    241            .          .           		path = path[i+1:] 
    242            .          .           	} 
    243            .          .           	// If empty now, it had only slashes. 
    244            .          .           	if path == "" { 
    245            .          .           		return string(Separator) 
    246            .          .           	} 
    247         10ms       10ms           	return path 
    248            .          .           } 
    249            .          .            
    250            .          .           // Dir is filepath.Dir. 
    251            .          .           func Dir(path string) string { 
    252            .          .           	vol := VolumeName(path) 

git.urbach.dev/cli/q/src/asm.(*Assembler).Append

/home/user/q/src/asm/Assembler.go

  Total:        50ms      360ms (flat, cum)  0.76%
     16            .          .           	Libraries    dll.List 
     17            .          .           } 
     18            .          .            
     19            .          .           // Append adds another instruction. 
     20            .          .           func (a *Assembler) Append(instr Instruction) { 
     21            .      180ms           	if a.Skip(instr) { 
     22            .          .           		return 
     23            .          .           	} 
     24            .          .            
     25         50ms      180ms           	a.Instructions = append(a.Instructions, instr) 
     26            .          .           } 
     27            .          .            

git.urbach.dev/cli/q/src/asm.(*Assembler).Last

/home/user/q/src/asm/Assembler.go

  Total:        10ms       10ms (flat, cum) 0.021%
     28            .          .           // Last returns the last instruction. 
     29            .          .           func (a *Assembler) Last() Instruction { 
     30         10ms       10ms           	return a.Instructions[len(a.Instructions)-1] 
     31            .          .           } 
     32            .          .            
     33            .          .           // Compile compiles the instructions to machine code. 
     34            .          .           func (a *Assembler) Compile(build *config.Build) (code []byte, data []byte, libs dll.List) { 
     35            .          .           	data, dataLabels := a.Data.Finalize() 

git.urbach.dev/cli/q/src/asm.(*Assembler).Skip

/home/user/q/src/asm/Assembler.go

  Total:       150ms      170ms (flat, cum)  0.36%
    102            .          .           func (a *Assembler) SetLast(instr Instruction) { 
    103            .          .           	a.Instructions[len(a.Instructions)-1] = instr 
    104            .          .           } 
    105            .          .            
    106            .          .           // Skip returns true if appending the instruction can be skipped. 
    107         30ms       30ms           func (a *Assembler) Skip(instr Instruction) bool { 
    108         30ms       30ms           	if len(a.Instructions) == 0 { 
    109            .          .           		return false 
    110            .          .           	} 
    111            .          .            
    112            .          .           	// Call to os.exit + anything is skipped if it's not a label 
    113         10ms       10ms           	call, isCall := a.Last().(*Call)                                                       return a.Instructions[len(a.Instructions)-1]                         Assembler.go:30

    114            .          .            
    115         10ms       10ms           	if isCall && call.Label == "run.exit" { 
    116            .          .           		switch instr.(type) { 
    117            .          .           		case *Label: 
    118            .          .           		default: 
    119            .          .           			return true 
    120            .          .           		} 
    121            .          .           	} 
    122            .          .            
    123         60ms       60ms           	switch instr := instr.(type) { 
    124            .          .           	case *Label: 
    125            .          .           		// Jump + Label can be replaced by just the Label if both addresses are equal 
    126         10ms       10ms           		jump, isJump := a.Last().(*Jump) 
    127            .          .            
    128            .       20ms           		if isJump && jump.Label == instr.Name { 
    129            .          .           			a.SetLast(instr) 
    130            .          .           			return true 
    131            .          .           		} 
    132            .          .            
    133            .          .           	case *Move: 

git.urbach.dev/cli/q/src/asm.(*Assembler).Skip

/home/user/q/src/asm/Assembler.go

  Total:        10ms       10ms (flat, cum) 0.021%
    168            .          .           		if isJump && jump.Condition == token.Invalid { 
    169            .          .           			return true 
    170            .          .           		} 
    171            .          .            
    172            .          .           		// Return + Return is unnecessary 
    173         10ms       10ms           		_, isReturn := a.Last().(*Return) 
    174            .          .            
    175            .          .           		if isReturn { 
    176            .          .           			return true 
    177            .          .           		} 
    178            .          .           	} 

git.urbach.dev/cli/q/src/codegen.createSteps

/home/user/q/src/codegen/createSteps.go

  Total:       200ms      2.10s (flat, cum)  4.41%
      1            .          .           package codegen 
      2            .          .            
      3            .          .           import "git.urbach.dev/cli/q/src/ssa" 
      4            .          .            
      5            .          .           // createSteps builds a series of instructions from the SSA values in the IR. 
      6         20ms       20ms           func createSteps(ir ssa.IR) IR { 
      7         50ms       50ms           	count := ir.CountValues() + len(ir.Blocks) - 1                                                       count += len(block.Instructions)                                     IR.go:46
                                     ⋮
                                     ⋮
                                                      for _, block := range ir.Blocks {                                    IR.go:45

      8            .      600ms           	storage := make([]Step, count) 
      9            .      160ms           	steps := make([]*Step, count) 
     10         20ms      360ms           	valueToStep := make(map[ssa.Value]*Step, count) 
     11            .       30ms           	blockToRegion := make(map[*ssa.Block]region, len(ir.Blocks)) 
     12            .          .           	i := 0 
     13            .          .            
     14         10ms       10ms           	for _, block := range ir.Blocks { 
     15         10ms       10ms           		if block != ir.Blocks[0] { 
     16            .          .           			step := &storage[i] 
     17            .          .           			step.Index = i 
     18         30ms       60ms           			step.Value = &Label{Name: block.Label} 
     19            .          .           			step.Block = block 
     20            .          .           			step.Register = -1 
     21            .          .           			steps[i] = step 
     22            .          .           			i++ 
     23            .          .           		} 
     24            .          .            
     25            .      210ms           		blockToRegion[block] = region{ 
     26         10ms       10ms           			Start: uint32(i), 
     27            .          .           			End:   uint32(i + len(block.Instructions)), 
     28            .          .           		} 
     29            .          .            
     30            .          .           		for _, instr := range block.Instructions { 
     31         20ms       20ms           			step := &storage[i] 
     32            .          .           			step.Index = i 
     33            .          .           			step.Value = instr 
     34            .          .           			step.Block = block 
     35         10ms       10ms           			step.Register = -1 
     36         10ms      260ms           			step.Live = make([]*Step, 0, 4) 
     37            .          .           			steps[i] = step 
     38            .      280ms           			valueToStep[instr] = step 
     39         10ms       10ms           			i++ 
     40            .          .           		} 
     41            .          .           	} 
     42            .          .            
     43            .          .           	return IR{steps, valueToStep, blockToRegion} 
     44            .          .           } 

git.urbach.dev/cli/q/src/expression.Parse

/home/user/q/src/expression/Parse.go

  Total:       200ms      3.78s (flat, cum)  7.94%
      3            .          .           import ( 
      4            .          .           	"git.urbach.dev/cli/q/src/token" 
      5            .          .           ) 
      6            .          .            
      7            .          .           // Parse generates an expression tree from tokens. 
      8         30ms      250ms           func Parse(tokens token.List) *Expression { 
      9            .          .           	var ( 
     10            .          .           		cursor *Expression 
     11            .          .           		root   *Expression 
     12            .          .           		i      uint 
     13            .          .           	) 
     14            .          .            
     15         10ms       10ms           loop: 
     16            .          .           	for i < uint(len(tokens)) { 
     17         10ms       10ms           		t := tokens[i] 
     18            .          .            
     19            .          .           		switch t.Kind { 
     20         20ms       20ms           		case token.GroupStart, token.ArrayStart, token.BlockStart: 
     21            .          .           			i++ 
     22            .          .           			groupLevel := 1 
     23            .          .           			groupPosition := i 
     24            .          .            
     25            .          .           			for i < uint(len(tokens)) { 
     26            .          .           				t = tokens[i] 
     27            .          .            
     28         10ms       10ms           				switch t.Kind { 
     29            .          .           				case token.GroupStart, token.ArrayStart, token.BlockStart: 
     30            .          .           					groupLevel++ 
     31         10ms       10ms           				case token.GroupEnd, token.ArrayEnd, token.BlockEnd: 
     32            .          .           					groupLevel-- 
     33            .          .            
     34            .          .           					if groupLevel == 0 { 
     35         10ms      1.19s           						root, cursor = handleGroupEnd(tokens, root, cursor, groupPosition, i, t) 
     36            .          .           						i++ 
     37            .          .           						continue loop 
     38            .          .           					} 
     39            .          .           				} 
     40            .          .            
     41            .          .           				i++ 
     42            .          .           			} 
     43            .          .            
     44            .          .           			break loop 
     45            .          .           		} 
     46            .          .            
     47            .          .           		switch { 
     48         80ms       80ms           		case cursor != nil && cursor.Token.Kind == token.Cast && len(cursor.Children) < 2: 
     49            .          .           			cursor.AddChild(&newTypeExpression(tokens[i:]).Expression) 
     50            .          .           			return root 
     51            .          .            
     52            .          .           		case t.Kind.IsLiteral(): 
     53         20ms      2.01s           			root, cursor = handleLiteral(root, cursor, t) 
     54            .          .            
     55            .          .           		case !t.Kind.IsOperator(): 
     56            .          .           			// do nothing 
     57            .          .            
     58            .          .           		case cursor == nil: 
     59            .          .           			cursor = newLeaf(t) 
     60            .          .           			cursor.precedence = precedence(t.Kind) 
     61            .          .           			root = cursor 
     62            .          .            
     63            .          .           		default: 
     64            .       90ms           			node := newLeaf(t)                                                                       return &Expression{Token: t}                         newLeaf.go:7

     65            .          .           			node.precedence = precedence(t.Kind) 
     66            .          .            
     67            .          .           			if cursor.Token.Kind.IsOperator() { 
     68            .       60ms           				root = handleOperator(root, cursor, node) 
     69            .          .           			} else { 
     70            .       40ms           				node.AddChild(cursor)                                                                               expr.Children = make([]*Expression, 0, 2)    Expression.go:21

     71            .          .           				root = node 
     72            .          .           			} 
     73            .          .            
     74            .          .           			cursor = node 
     75            .          .           		} 

git.urbach.dev/cli/q/src/codegen.(*Function).fixRegisterConflicts

/home/user/q/src/codegen/fixRegisterConflicts.go

  Total:       200ms      460ms (flat, cum)  0.97%
     11            .          .           ) 
     12            .          .            
     13            .          .           // fixRegisterConflicts checks for conflicts where 2 values that are live at the same time use the same register. 
     14            .          .           // It then assigns a new register to the value that was defined earlier. 
     15            .          .           func (f *Function) fixRegisterConflicts() { 
     16         10ms       10ms           	for _, step := range f.Steps { 
     17            .          .           		var clobbered []cpu.Register 
     18            .          .            
     19         50ms       50ms           		switch instr := step.Value.(type) { 
     20            .          .           		case *ssa.BinaryOp: 
     21            .          .           			switch instr.Op { 
     22            .          .           			case token.Div, token.Mod: 
     23            .          .           				clobbered = f.CPU.DivisionClobbered 
     24            .          .           			case token.Shl, token.Shr: 
     25            .          .           				clobbered = f.CPU.ShiftRestricted 
     26            .          .            
     27            .          .           				if slices.Contains(f.CPU.ShiftRestricted, step.Register) { 
     28            .          .           					f.assignFreeRegister(step) 
     29            .          .           				} 
     30            .          .           			} 
     31            .          .            
     32            .          .           			if step.Register != -1 { 
     33            .       10ms           				right := f.ValueToStep[instr.Right] 
     34            .          .            
     35            .          .           				if step.Register == right.Register { 
     36            .          .           					f.assignFreeRegister(right) 
     37            .          .           				} 
     38            .          .            
     39            .          .           				left := f.ValueToStep[instr.Left] 
     40            .          .            
     41            .          .           				if instr.Op == token.Mod && step.Register == left.Register { 
     42            .       10ms           					f.assignFreeRegister(left)                                                                                       step.Register = f.findFreeRegister(step) assignFreeRegister.go:6

     43            .          .           				} 
     44            .          .           			} 
     45            .          .           		case *ssa.Call: 
     46            .          .           			clobbered = f.CPU.Call.Clobbered 
     47            .          .           		case *ssa.CallExtern: 
     48            .          .           			clobbered = f.CPU.ExternCall.Clobbered 
     49            .          .           		case *ssa.Register: 
     50            .          .           			if f.build.Arch == config.ARM && step.Register == arm.SP { 
     51            .          .           				f.assignFreeRegister(step) 
     52            .          .           			} 
     53            .          .           		case *ssa.Syscall: 
     54            .          .           			clobbered = f.CPU.Syscall.Clobbered 
     55            .          .           		} 
     56            .          .            
     57         20ms       20ms           		for i, live := range step.Live { 
     58         50ms       50ms           			if live.Register == -1 { 
     59            .          .           				continue 
     60            .          .           			} 
     61            .          .            
     62         60ms       80ms           			if live.Value != step.Value && slices.Contains(clobbered, live.Register) {                                      ⋮
                                     ⋮
                                     ⋮
                                     ⋮
                                                                      return Index(s, v) >= 0                              slices.go:118
                                                                          for i := range s {                               slices.go:97
     63            .       90ms           				f.assignFreeRegister(live)                                                                               step.Register = f.findFreeRegister(step)     assignFreeRegister.go:6

     64            .          .           				continue 
     65            .          .           			} 
     66            .          .            
     67         10ms       10ms           			for _, previous := range step.Live[:i] { 
     68            .          .           				if previous.Register == -1 { 
     69            .          .           					continue 
     70            .          .           				} 
     71            .          .            
     72            .          .           				if previous.Register != live.Register { 
     73            .          .           					continue 
     74            .          .           				} 
     75            .          .            
     76            .          .           				if previous.Index < live.Index { 
     77            .          .           					f.assignFreeRegister(previous) 
     78            .          .           				} else { 
     79            .      130ms           					f.assignFreeRegister(live)                                                                                       step.Register = f.findFreeRegister(step) assignFreeRegister.go:6

     80            .          .           					break 
     81            .          .           				} 
     82            .          .           			} 
     83            .          .           		} 
     84            .          .           	} 

git.urbach.dev/cli/q/src/core.(*Environment).parseParameters

/home/user/q/src/core/parseParameters.go

  Total:       190ms      510ms (flat, cum)  1.07%
      8            .          .           	"git.urbach.dev/cli/q/src/types" 
      9            .          .           ) 
     10            .          .            
     11            .          .           // parseParameters parses the tokens of the input and output types. 
     12            .          .           func (env *Environment) parseParameters(functions iter.Seq[*Function]) error { 
     13        190ms      510ms           	for f := range functions {                                                       for variant := range fn.Variants {                                   Environment.go:67
                                                          if !yield(f) {                                                   Function.go:92
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  for i, output := range f.Output {                        parseParameters.go:31
                                     ⋮
                                     ⋮
                                                          if !yield(f) {                                                   Function.go:92
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  f.Type.Input[i] = input.Typ                              parseParameters.go:28                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20
                                     ⋮
                                     ⋮
                                                                  if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { parseParameters.go:34                        f.Type = &types.Function{                                parseParameters.go:14                        if f.Previous != nil || f.Next != nil {                  parseParameters.go:50                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        for i, input := range f.Input {                          parseParameters.go:19                        for i, output := range f.Output {                        parseParameters.go:31                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20            for _, pkg := range env.Packages {                                   Environment.go:65            for variant := range fn.Variants {                                   Environment.go:67
                                                          if !yield(f) {                                                   Function.go:92
                                                              if !yield(variant) {                                         Environment.go:68
                                                                  Output: make([]types.Type, len(f.Output)),               parseParameters.go:16                        typeTokens := output.Tokens                              parseParameters.go:32                        typ, err := env.TypeFromTokens(typeTokens, f.File)       parseParameters.go:40                        f.AddSuffix(suffix.String())                             parseParameters.go:63
                                                                      f.FullName += suffix                                 Function.go:51                        Input:  make([]types.Type, len(f.Input)),                parseParameters.go:15                        typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) parseParameters.go:21                        input.Name = input.Tokens[0].StringFrom(f.File.Bytes)    parseParameters.go:20                        f.AddSuffix(suffix.String())                             parseParameters.go:63
                                                                      f.name += suffix                                     Function.go:50            for _, fn := range pkg.Functions {                                   Environment.go:66

git.urbach.dev/cli/q/src/core.(*Environment).ResolveTypes.(*Environment).parseParameters-range3

/home/user/q/src/core/parseParameters.go

  Total:       180ms      480ms (flat, cum)  1.01%
     14            .       80ms           		f.Type = &types.Function{ 
     15         20ms       70ms           			Input:  make([]types.Type, len(f.Input)), 
     16            .       20ms           			Output: make([]types.Type, len(f.Output)), 
     17            .          .           		} 
     18            .          .            
     19         10ms       10ms           		for i, input := range f.Input { 
     20         80ms       80ms           			input.Name = input.Tokens[0].StringFrom(f.File.Bytes) 
     21            .       80ms           			typ, err := env.TypeFromTokens(input.Tokens[1:], f.File) 
     22            .          .            
     23            .          .           			if err != nil { 
     24            .          .           				return err 
     25            .          .           			} 
     26            .          .            
     27            .          .           			input.Typ = typ 
     28         10ms       10ms           			f.Type.Input[i] = input.Typ 
     29            .          .           		} 
     30            .          .            
     31         20ms       20ms           		for i, output := range f.Output { 
     32         20ms       20ms           			typeTokens := output.Tokens 
     33            .          .            
     34         10ms       10ms           			if len(output.Tokens) > 1 && output.Tokens[0].Kind == token.Identifier && output.Tokens[1].Kind != token.Or { 
     35            .          .           				output.Name = output.Tokens[0].StringFrom(f.File.Bytes) 
     36            .          .           				output.SetEnd(output.Tokens[0].End()) 
     37            .          .           				typeTokens = typeTokens[1:] 
     38            .          .           			} 
     39            .          .            
     40            .       20ms           			typ, err := env.TypeFromTokens(typeTokens, f.File) 
     41            .          .            
     42            .          .           			if err != nil { 
     43            .          .           				return err 
     44            .          .           			} 
     45            .          .            
     46            .          .           			output.Typ = typ 
     47            .          .           			f.Type.Output[i] = output.Typ 
     48            .          .           		} 
     49            .          .            
     50         10ms       10ms           		if f.Previous != nil || f.Next != nil { 
     51            .          .           			suffix := strings.Builder{} 
     52            .          .           			suffix.WriteByte('[') 
     53            .          .            
     54            .          .           			for i, input := range f.Input { 
     55            .          .           				suffix.WriteString(input.Typ.Name()) 
     56            .          .            
     57            .          .           				if i != len(f.Input)-1 { 
     58            .          .           					suffix.WriteByte(',') 
     59            .          .           				} 
     60            .          .           			} 
     61            .          .            
     62            .          .           			suffix.WriteByte(']') 
     63            .       50ms           			f.AddSuffix(suffix.String())                                                                       f.FullName += suffix                                 Function.go:51                            f.name += suffix                                     Function.go:50

     64            .          .           		} 
     65            .          .           	} 
     66            .          .            
     67            .          .           	return nil 
     68            .          .           } 

runtime.(*sysMemStat).add

/usr/lib/go/src/runtime/mstats.go

  Total:        60ms       60ms (flat, cum)  0.13%
    655            .          .           // 
    656            .          .           // Must be nosplit as it is called in runtime initialization, e.g. newosproc0. 
    657            .          .           // 
    658            .          .           //go:nosplit 
    659            .          .           func (s *sysMemStat) add(n int64) { 
    660         50ms       50ms           	val := atomic.Xadd64((*uint64)(s), n) 
    661            .          .           	if (n > 0 && int64(val) < n) || (n < 0 && int64(val)+n < n) { 
    662            .          .           		print("runtime: val=", val, " n=", n, "\n") 
    663            .          .           		throw("sysMemStat overflow") 
    664            .          .           	} 
    665         10ms       10ms           } 
    666            .          .            
    667            .          .           // heapStatsDelta contains deltas of various runtime memory statistics 
    668            .          .           // that need to be updated together in order for them to be kept 
    669            .          .           // consistent with one another. 
    670            .          .           type heapStatsDelta struct { 

runtime.(*consistentHeapStats).acquire

/usr/lib/go/src/runtime/mstats.go

  Total:       120ms      120ms (flat, cum)  0.25%
    773            .          .           // function. 
    774            .          .           // 
    775            .          .           //go:nosplit 
    776            .          .           func (m *consistentHeapStats) acquire() *heapStatsDelta { 
    777            .          .           	if pp := getg().m.p.ptr(); pp != nil { 
    778        110ms      110ms           		seq := pp.statsSeq.Add(1)                                                               return Xadd(&u.value, delta)                                 types.go:291

    779            .          .           		if seq%2 == 0 { 
    780            .          .           			// Should have been incremented to odd. 
    781            .          .           			print("runtime: seq=", seq, "\n") 
    782            .          .           			throw("bad sequence number") 
    783            .          .           		} 
    784            .          .           	} else { 
    785            .          .           		lock(&m.noPLock) 
    786            .          .           	} 
    787         10ms       10ms           	gen := m.gen.Load() % 3                                                       return Load(&u.value)                                                types.go:194

    788            .          .           	return &m.stats[gen] 
    789            .          .           } 
    790            .          .            
    791            .          .           // release indicates that the writer is done modifying 
    792            .          .           // the delta. The value returned by the corresponding 

runtime.(*consistentHeapStats).release

/usr/lib/go/src/runtime/mstats.go

  Total:        10ms       10ms (flat, cum) 0.021%
    802            .          .           // before this operation has completed. 
    803            .          .           // 
    804            .          .           //go:nosplit 
    805            .          .           func (m *consistentHeapStats) release() { 
    806            .          .           	if pp := getg().m.p.ptr(); pp != nil { 
    807         10ms       10ms           		seq := pp.statsSeq.Add(1)                                                               return Xadd(&u.value, delta)                                 types.go:291

    808            .          .           		if seq%2 != 0 { 
    809            .          .           			// Should have been incremented to even. 
    810            .          .           			print("runtime: seq=", seq, "\n") 
    811            .          .           			throw("bad sequence number") 
    812            .          .           		} 

git.urbach.dev/cli/q/src/token.List.Instructions

/home/user/q/src/token/Instructions.go

  Total:       180ms      6.18s (flat, cum) 12.98%
      4            .          .           func (list List) Instructions(yield func(List) bool) { 
      5            .          .           	start := 0 
      6            .          .           	groupLevel := 0 
      7            .          .           	blockLevel := 0 
      8            .          .            
      9         40ms       40ms           	for i, t := range list { 
     10        110ms      110ms           		switch t.Kind { 
     11            .          .           		case NewLine: 
     12            .          .           			if start == i { 
     13            .          .           				start = i + 1 
     14            .          .           				continue 
     15            .          .           			} 
     16            .          .            
     17         10ms       10ms           			if groupLevel > 0 || blockLevel > 0 { 
     18            .          .           				continue 
     19            .          .           			} 
     20            .          .            
     21            .      3.85s           			if !yield(list[start:i]) { 
     22            .          .           				return 
     23            .          .           			} 
     24            .          .            
     25            .          .           			start = i + 1 
     26            .          .            
     27            .          .           		case GroupStart: 
     28            .          .           			groupLevel++ 
     29            .          .            
     30            .          .           		case GroupEnd: 
     31         20ms       20ms           			groupLevel-- 
     32            .          .            
     33            .          .           		case BlockStart: 
     34            .          .           			blockLevel++ 
     35            .          .            
     36            .          .           		case BlockEnd: 
     37            .          .           			blockLevel-- 
     38            .          .            
     39            .          .           			if groupLevel > 0 || blockLevel > 0 { 
     40            .          .           				continue 
     41            .          .           			} 
     42            .          .            
     43            .          .           			if !list[start].Kind.IsBlock() { 
     44            .          .           				continue 
     45            .          .           			} 
     46            .          .            
     47            .      2.08s           			if !yield(list[start : i+1]) { 
     48            .          .           				return 
     49            .          .           			} 
     50            .          .            
     51            .          .           			start = i + 1 
     52            .          .            
     53            .          .           		case EOF: 
     54            .          .           			if start < i { 
     55            .          .           				yield(list[start:i]) 
     56            .          .           			} 
     57            .          .            
     58            .          .           			return 
     59            .          .           		} 
     60            .          .           	} 
     61            .          .            
     62            .          .           	if start < len(list) { 
     63            .       70ms           		yield(list[start:]) 
     64            .          .           	} 
     65            .          .           } 

internal/poll.(*FD).Init

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        20ms      480ms (flat, cum)  1.01%
     50            .          .           // Init initializes the FD. The Sysfd field should already be set. 
     51            .          .           // This can be called multiple times on a single FD. 
     52            .          .           // The net argument is a network name from the net package (e.g., "tcp"), 
     53            .          .           // or "file". 
     54            .          .           // Set pollable to true if fd should be managed by runtime netpoll. 
     55         10ms       10ms           func (fd *FD) Init(net string, pollable bool) error { 
     56            .          .           	fd.SysFile.init() 
     57            .          .            
     58            .          .           	// We don't actually care about the various network types. 
     59         10ms       10ms           	if net == "file" { 
     60            .          .           		fd.isFile = true 
     61            .          .           	} 
     62            .          .           	if !pollable { 
     63            .          .           		fd.isBlocking = 1 
     64            .          .           		return nil 
     65            .          .           	} 
     66            .      460ms           	err := fd.pd.init(fd) 
     67            .          .           	if err != nil { 
     68            .          .           		// If we could not initialize the runtime poller, 
     69            .          .           		// assume we are using blocking mode. 
     70            .          .           		fd.isBlocking = 1 
     71            .          .           	} 

internal/poll.(*FD).destroy

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        10ms      410ms (flat, cum)  0.86%
     77            .          .           func (fd *FD) destroy() error { 
     78            .          .           	// Poller may want to unregister fd in readiness notification mechanism, 
     79            .          .           	// so this must be executed before CloseFunc. 
     80            .          .           	fd.pd.close() 
     81            .          .            
     82            .      350ms           	err := fd.SysFile.destroy(fd.Sysfd)                                                       return CloseFunc(fd)                                                 fd_unixjs.go:24

     83            .          .            
     84            .          .           	fd.Sysfd = -1 
     85         10ms       60ms           	runtime_Semrelease(&fd.csema) 
     86            .          .           	return err 
     87            .          .           } 

internal/poll.(*FD).Close

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        20ms      430ms (flat, cum)   0.9%
     89            .          .           // Close closes the FD. The underlying file descriptor is closed by the 
     90            .          .           // destroy method when there are no remaining references. 
     91         10ms       10ms           func (fd *FD) Close() error { 
     92         10ms       10ms           	if !fd.fdmu.increfAndClose() { 
     93            .          .           		return errClosing(fd.isFile) 
     94            .          .           	} 
     95            .          .            
     96            .          .           	// Unblock any I/O.  Once it all unblocks and returns, 
     97            .          .           	// so that it cannot be referring to fd.sysfd anymore, 
     98            .          .           	// the final decref will close fd.sysfd. This should happen 
     99            .          .           	// fairly quickly, since all the I/O is non-blocking, and any 
    100            .          .           	// attempts to block in the pollDesc will return errClosing(fd.isFile). 
    101            .          .           	fd.pd.evict() 
    102            .          .            
    103            .          .           	// The call to decref will call destroy if there are no other 
    104            .          .           	// references. 
    105            .      410ms           	err := fd.decref() 
    106            .          .            
    107            .          .           	// Wait until the descriptor is closed. If this was the only 
    108            .          .           	// reference, it is already closed. Only wait if the file has 
    109            .          .           	// not been set to blocking mode, as otherwise any current I/O 
    110            .          .           	// may be blocking, and that would block the Close. 

internal/poll.(*FD).Read

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        70ms      820ms (flat, cum)  1.72%
    136            .          .           // See golang.org/issue/7812 and golang.org/issue/16266. 
    137            .          .           // Use 1GB instead of, say, 2GB-1, to keep subsequent reads aligned. 
    138            .          .           const maxRW = 1 << 30 
    139            .          .            
    140            .          .           // Read implements io.Reader. 
    141         10ms       10ms           func (fd *FD) Read(p []byte) (int, error) { 
    142         10ms       60ms           	if err := fd.readLock(); err != nil {                                                       if !fd.fdmu.rwlock(true) {                                           fd_mutex.go:221
    143            .          .           		return 0, err 
    144            .          .           	} 
    145            .          .           	defer fd.readUnlock() 
    146            .          .           	if len(p) == 0 { 
    147            .          .           		// If the caller wanted a zero byte read, return immediately 
    148            .          .           		// without trying (but after acquiring the readLock). 
    149            .          .           		// Otherwise syscall.Read returns 0, nil which looks like 
    150            .          .           		// io.EOF. 
    151            .          .           		// TODO(bradfitz): make it wait for readability? (Issue 15735) 
    152            .          .           		return 0, nil 
    153            .          .           	} 
    154         10ms       30ms           	if err := fd.pd.prepareRead(fd.isFile); err != nil {             return pd.prepare('r', isFile)                                       fd_poll_runtime.go:73

    155            .          .           		return 0, err 
    156            .          .           	} 
    157            .          .           	if fd.IsStream && len(p) > maxRW { 
    158            .          .           		p = p[:maxRW] 
    159            .          .           	} 
    160            .          .           	for { 
    161         20ms      690ms           		n, err := ignoringEINTRIO(syscall.Read, fd.Sysfd, p)                                                               n, err := fn(fd, p)                                          fd_unix.go:738
                                                                  n, err = read(fd, p)                                     syscall_unix.go:183
                                     ⋮
                                     ⋮
                                                                  n, err = read(fd, p)                                     syscall_unix.go:183

    162            .          .           		if err != nil { 
    163            .          .           			n = 0 
    164            .          .           			if err == syscall.EAGAIN && fd.pd.pollable() { 
    165            .          .           				if err = fd.pd.waitRead(fd.isFile); err == nil { 
    166            .          .           					continue 
    167            .          .           				} 
    168            .          .           			} 
    169            .          .           		} 
    170            .          .           		err = fd.eofError(n, err) 
    171         20ms       30ms           		return n, err 
    172            .          .           	} 
    173            .          .           } 
    174            .          .            
    175            .          .           // Pread wraps the pread system call. 
    176            .          .           func (fd *FD) Pread(p []byte, off int64) (int, error) { 

internal/poll.(*FD).Fstat

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        60ms      610ms (flat, cum)  1.28%
    634            .          .           		return syscall.Fchmod(fd.Sysfd, mode) 
    635            .          .           	}) 
    636            .          .           } 
    637            .          .            
    638            .          .           // Fstat wraps syscall.Fstat 
    639         10ms       10ms           func (fd *FD) Fstat(s *syscall.Stat_t) error { 
    640         40ms       40ms           	if err := fd.incref(); err != nil {                                                       if !fd.fdmu.incref() {                                               fd_mutex.go:202
                                                          if atomic.CompareAndSwapUint64(&mu.state, old, new) {            fd_mutex.go:63
    641            .          .           		return err 
    642            .          .           	} 
    643            .          .           	defer fd.decref() 
    644         10ms      560ms           	return ignoringEINTR(func() error {             err := fn()                                                          fd_posix.go:74
                                                          return syscall.Fstat(fd.Sysfd, s)                                fd_unix.go:645

internal/poll.(*FD).Fstat.func1

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:           0      540ms (flat, cum)  1.13%
    645            .      540ms           		return syscall.Fstat(fd.Sysfd, s) 
    646            .          .           	}) 
    647            .          .           } 
    648            .          .            
    649            .          .           // dupCloexecUnsupported indicates whether F_DUPFD_CLOEXEC is supported by the kernel. 
    650            .          .           var dupCloexecUnsupported atomic.Bool 

internal/poll.ignoringEINTRIO

/usr/lib/go/src/internal/poll/fd_unix.go

  Total:        20ms      690ms (flat, cum)  1.45%
    733            .          .           } 
    734            .          .            
    735            .          .           // ignoringEINTRIO is like ignoringEINTR, but just for IO calls. 
    736            .          .           func ignoringEINTRIO(fn func(fd int, p []byte) (int, error), fd int, p []byte) (int, error) { 
    737            .          .           	for { 
    738         20ms      690ms           		n, err := fn(fd, p)                                                               n, err = read(fd, p)                                         syscall_unix.go:183
                                     ⋮
                                     ⋮
                                                              n, err = read(fd, p)                                         syscall_unix.go:183

    739            .          .           		if err != syscall.EINTR { 
    740            .          .           			return n, err 
    741            .          .           		} 
    742            .          .           	} 
    743            .          .           }