-
Notifications
You must be signed in to change notification settings - Fork 0
/
deployment.yaml
151 lines (146 loc) · 3.96 KB
/
deployment.yaml
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
---
apiVersion: apps/v1
kind: Deployment
metadata:
labels:
app: crane-scheduler-controller
name: crane-scheduler-controller
namespace: crane-system
spec:
replicas: 1
selector:
matchLabels:
app: crane-scheduler-controller
template:
metadata:
labels:
app: crane-scheduler-controller
spec:
serviceAccountName: crane-scheduler-controller
containers:
- name: controller
env:
- name: TZ
value: Asia/Shanghai
- name: CRANE_SYSTEM_NAMESPACE
valueFrom:
fieldRef:
fieldPath: metadata.namespace
command:
- /controller
- --policy-config-path=/data/policy.yaml
- --prometheus-address=PROMETHEUS_ADDRESS
image: docker.io/gocrane/crane-scheduler-controller:0.0.23
imagePullPolicy: IfNotPresent
volumeMounts:
- mountPath: /data
name: dynamic-scheduler-policy
resources:
requests:
cpu: 200m
memory: 200Mi
livenessProbe:
failureThreshold: 3
httpGet:
path: /healthz
port: 8090
scheme: HTTP
initialDelaySeconds: 15
periodSeconds: 10
readinessProbe:
httpGet:
path: /healthz
port: 8090
scheme: HTTP
dnsPolicy: ClusterFirst
restartPolicy: Always
volumes:
- configMap:
defaultMode: 420
name: dynamic-scheduler-policy
name: dynamic-scheduler-policy
---
apiVersion: v1
kind: ConfigMap
metadata:
name: dynamic-scheduler-policy
namespace: crane-system
data:
policy.yaml: |
apiVersion: scheduler.policy.crane.io/v1alpha1
kind: DynamicSchedulerPolicy
spec:
syncAppPolicy:
- name: range_pod_avg_cpu_usage
period: 24h
- name: range_pod_avg_mem_usage
period: 24h
syncPolicy:
##cpu usage
- name: cpu_usage_avg_5m
period: 3m
- name: cpu_usage_max_avg_1h
period: 15m
- name: cpu_usage_max_avg_1d
period: 3h
##memory usage
- name: mem_usage_avg_5m
period: 3m
- name: mem_usage_max_avg_1h
period: 15m
- name: mem_usage_max_avg_1d
period: 3h
##load average
- name: load1_usage_avg_3m
period: 3m
- name: load5_usage_avg_1h
period: 15m
- name: load5_usage_avg_3d
period: 3h
## range usage
- name: range_cpu_usage_avg_5m
period: 24h
- name: range_mem_usage_avg_5m
period: 24h
predicate:
##cpu usage
- name: cpu_usage_avg_5m
maxLimitPecent: 0.65
- name: cpu_usage_max_avg_1h
maxLimitPecent: 0.75
##memory usage
- name: mem_usage_avg_5m
maxLimitPecent: 0.65
- name: mem_usage_max_avg_1h
maxLimitPecent: 0.75
##load average
- name: load1_usage_avg_3m
maxLimitPecent: 1
- name: load5_usage_avg_1h
maxLimitPecent: 1
## range usage
- name: range:cpu:range_pod_avg_cpu_usage:range_cpu_usage_avg_5m
maxLimitPecent: 0.65
- name: range:memory:range_pod_avg_mem_usage:range_mem_usage_avg_5m
maxLimitPecent: 0.65
priority:
###score = sum(() * weight) / len, 0 <= score <= 10
##cpu usage
- name: cpu_usage_avg_5m
weight: 0.2
- name: cpu_usage_max_avg_1h
weight: 0.3
- name: cpu_usage_max_avg_1d
weight: 0.5
##memory usage
- name: mem_usage_avg_5m
weight: 0.2
- name: mem_usage_max_avg_1h
weight: 0.3
- name: mem_usage_max_avg_1d
weight: 0.5
hotValue:
- timeRange: 5m
count: 5
- timeRange: 1m
count: 2