This repository has been archived by the owner on Sep 19, 2018. It is now read-only.
-
Notifications
You must be signed in to change notification settings - Fork 3
/
main.go
176 lines (151 loc) · 5.13 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
package main
import (
"flag"
"fmt"
"log"
"net/http"
"os"
"os/signal"
"strings"
"github.com/hashicorp/logutils"
"github.com/prometheus/client_golang/prometheus"
"github.com/prometheus/client_golang/prometheus/promhttp"
"github.com/utilitywarehouse/go-operational/op"
utilruntime "k8s.io/apimachinery/pkg/util/runtime"
"k8s.io/client-go/tools/clientcmd"
)
// Define a type named "strslice" as a slice of strings
type strslice []string
// Now, for our new type, implement the two methods of
// the flag.Value interface...
// The first method is String() string
func (s *strslice) String() string {
return fmt.Sprint(*s)
}
// Set is the method to set the flag value, part of the flag.Value interface.
// Set's argument is a string to be parsed to set the flag.
// It's a comma-separated list, so we split it.
func (s *strslice) Set(value string) error {
for _, target := range strings.Split(value, ",") {
*s = append(*s, target)
}
return nil
}
var (
appGitHash = "master"
// Define a flag to accumulate durations. Because it has a special type,
// we need to use the Var function and therefore create the flag during
// init.
targets strslice
kubeConfig = flag.String("kubernetes-config", "", "path to the kubeconfig file, if unspecified then in-cluster config will be used")
targetLabelName = flag.String("target-label", "ingress53.target", "Kubernetes key of the label that specifies the target type")
r53ZoneID = flag.String("route53-zone-id", "", "route53 hosted DNS zone id")
debugLogs = flag.Bool("debug", false, "enables debug logs")
dryRun = flag.Bool("dry-run", false, "if set, ingress53 will not make any Route53 changes")
metricUpdatesApplied = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "ingress53",
Subsystem: "route53",
Name: "updates_applied",
Help: "number of route53 updates",
},
[]string{"hostname", "action"},
)
metricUpdatesReceived = prometheus.NewCounterVec(
prometheus.CounterOpts{
Namespace: "ingress53",
Subsystem: "kubernetes",
Name: "updates_received",
Help: "number of route53 updates",
},
[]string{"ingress", "action"},
)
metricUpdatesRejected = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "ingress53",
Subsystem: "kubernetes",
Name: "updates_rejected",
Help: "number of route53 updates rejected",
},
)
// ListWatch runs every 30 seconds (approx). That means that we can allow up to 9
// errors on a per 5m rate of the following metric otherwise every call to kube
// api is failing (so rate > 0.03 => evry call to api fails)
metricKubernetesIOError = prometheus.NewCounter(
prometheus.CounterOpts{
Namespace: "ingress53",
Subsystem: "kubernetes",
Name: "io_error",
Help: "number of errors occured while talking to kube api",
},
)
)
// UpdateKubernetesIOErrorCount: to keep count of errors while talking to kube api
func UpdateKubernetesIOErrorCount(err error) {
metricKubernetesIOError.Inc()
}
func main() {
prometheus.MustRegister(metricUpdatesApplied)
prometheus.MustRegister(metricUpdatesReceived)
prometheus.MustRegister(metricUpdatesRejected)
prometheus.MustRegister(metricKubernetesIOError)
// Register KubernetesIO Error Handling
utilruntime.ErrorHandlers = append(utilruntime.ErrorHandlers, UpdateKubernetesIOErrorCount)
flag.Var(&targets, "target", "List of endpoints (ELB) targets to map ingress records to")
flag.Parse()
luf := &logutils.LevelFilter{
Levels: []logutils.LogLevel{"DEBUG", "INFO", "ERROR"},
MinLevel: logutils.LogLevel("INFO"),
Writer: os.Stdout,
}
if *debugLogs {
luf.MinLevel = logutils.LogLevel("DEBUG")
}
log.SetOutput(luf)
ro := registratorOptions{
Targets: targets,
TargetLabelName: *targetLabelName,
Route53ZoneID: *r53ZoneID,
}
if *kubeConfig != "" {
config, err := clientcmd.BuildConfigFromFlags("", *kubeConfig)
if err != nil {
log.Printf("[ERROR] could not create kubernetes client: %+v", err)
os.Exit(1)
}
ro.KubernetesConfig = config
}
r, err := newRegistratorWithOptions(ro)
if err != nil {
log.Printf("[ERROR] could not create registrator: %+v", err)
os.Exit(1)
}
sigChannel := make(chan os.Signal, 1)
signal.Notify(sigChannel, os.Interrupt)
go func() {
<-sigChannel
log.Println("[INFO] interrupt singal: shutting down ...")
r.Stop()
}()
go func() {
log.Printf("[INFO] starting HTTP endpoints ...")
mux := http.NewServeMux()
mux.Handle("/__/", op.NewHandler(
op.NewStatus("ingress53", "ingress53 updates Route53 DNS records based on the ingresses available on the kubernetes cluster it runs on").
AddOwner("infrastructure", "#infra").
AddLink("github", "https://github.com/utilitywarehouse/ingress53").
SetRevision(appGitHash).
AddChecker("running", func(cr *op.CheckResponse) { cr.Healthy("service is running") }).
ReadyAlways(),
))
mux.Handle("/metrics", promhttp.Handler())
if err := http.ListenAndServe(":5000", mux); err != nil {
log.Printf("[ERROR] could not start HTTP router: %+v", err)
os.Exit(1)
}
}()
if err := r.Start(); err != nil {
log.Printf("[ERROR] registrator returned an error: %+v", err)
os.Exit(1)
}
}