-
Notifications
You must be signed in to change notification settings - Fork 3
/
environment.go
153 lines (122 loc) · 3.47 KB
/
environment.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
package gofine
import (
"errors"
"sync"
"golang.org/x/sys/unix"
)
const maxNumCPUs = 1 << 10
var invalidLgoreId = errors.New("Invalid lgore id")
// Config environment config
type Config struct {
// Specifies if we should pre-occupy all available cores
//
// TODO ignored for now
OccupyAll bool
// Specifies which cores should not be used as lgores
//
// There should be at least one core present in this slice.
// Each value should be an index in range [0, NumCPUs),
// where NumCPUs can be found from runtime.NumCPU().
ReserveCores []int
}
// Environment manages all the lgores
type Environment struct {
mu sync.Mutex
original unix.CPUSet
available unix.CPUSet
lgores []*lgore
}
// InitDefault initializes `env` with default configuration
//
// Default config sets `OccupyAll` to `false` and adds core 0 for reserve
func (env *Environment) InitDefault() error {
defaultConf := Config{}
defaultConf.OccupyAll = false
defaultConf.ReserveCores = append(defaultConf.ReserveCores, 0)
return env.Init(defaultConf)
}
// Init initializes environment and lgores
func (env *Environment) Init(conf Config) error {
if len(conf.ReserveCores) == 0 {
return errors.New("Should reserve at least one lgore")
}
// save original cpu affinity
err := unix.SchedGetaffinity(0, &env.original)
if err != nil {
return err
}
if env.original.Count() <= 1 {
return errors.New("Not enough logical cores, should be greater than one")
}
env.available = env.original
// reserve cores for Go runtime
for _, coreIndex := range conf.ReserveCores {
coreId := getCoreIdByIndex(env.original, coreIndex)
if coreId < 0 {
return errors.New("Invalid reservation lgore id")
}
env.available.Clear(coreId)
}
if env.available.Count() == 0 {
return errors.New("No lgores left after reservation")
}
env.initLgores()
// TODO occupy lgores if OccupyAll is true
return nil
}
// LgoreCount returns number of available lgores
func (env *Environment) LgoreCount() int {
return env.available.Count()
}
// GetLgoreState returns `LgoreState` of a lgore
func (env *Environment) GetLgoreState(lgoreId int) (LgoreState, error) {
if lgoreId >= len(env.lgores) {
return Invalid, invalidLgoreId
}
return env.lgores[lgoreId].state, nil
}
// Occupy locks calling goroutine to an lgore
//
// Goroutine is locked to OS thread, and OS thread is locked to lgore's core.
func (env *Environment) Occupy(lgoreId int) error {
if lgoreId >= len(env.lgores) {
return invalidLgoreId
}
env.mu.Lock()
defer env.mu.Unlock()
lg := env.lgores[lgoreId]
return lg.occupy()
}
// Release releases the lgore
//
// This function should be called from the same goroutine that called `Occupy`.
// Lgore becomes available, and the locked OS thread allowed to run on any core again.
func (env *Environment) Release(lgoreId int) error {
if lgoreId >= len(env.lgores) {
return invalidLgoreId
}
env.mu.Lock()
defer env.mu.Unlock()
lg := env.lgores[lgoreId]
return lg.release(env.original)
}
func (env *Environment) initLgores() {
env.lgores = make([]*lgore, env.available.Count())
for lgoreId := 0; lgoreId < len(env.lgores); lgoreId++ {
coreId := getCoreIdByIndex(env.available, lgoreId)
env.lgores[lgoreId] = &lgore{coreId: coreId, state: Available}
}
}
// returns -1 if not found
func getCoreIdByIndex(cpuset unix.CPUSet, coreIndex int) int {
count := 0
for coreId := 0; coreId < maxNumCPUs; coreId++ {
if cpuset.IsSet(coreId) {
if coreIndex == count {
return coreId
}
count++
}
}
return -1
}