198 lines
5.1 KiB
Go
198 lines
5.1 KiB
Go
package main
|
|
|
|
import (
|
|
"flag"
|
|
"unitechdev/golib/metrics"
|
|
"unitechdev/golib/metrics/prometheus"
|
|
"unitechdev/golib/tlog"
|
|
"math/rand"
|
|
"os"
|
|
"os/signal"
|
|
"strconv"
|
|
"sync"
|
|
"syscall"
|
|
"time"
|
|
|
|
stdprometheus "github.com/prometheus/client_golang/prometheus"
|
|
"golang.org/x/net/context"
|
|
)
|
|
|
|
func main() {
|
|
// parse the flags
|
|
flag.Parse()
|
|
|
|
processMetrics, err := metrics.NewProcessMetrics("test", "INDIA", "testProject")
|
|
if err != nil {
|
|
tlog.Error(err)
|
|
return
|
|
}
|
|
workPort := processMetrics.GetMetricsPort()
|
|
// init Exporter
|
|
tlog.Info("Beginning to serve on port :", workPort)
|
|
processMetrics.InitExporter()
|
|
|
|
// You can registry metrics any where as you want
|
|
// Now we registry a local gauge in function main
|
|
workersGauge := prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
|
Namespace: "worker",
|
|
Subsystem: "jobs",
|
|
Name: "worker_number",
|
|
Help: "Number of workers.",
|
|
}, []string{"server"})
|
|
|
|
host, _ := os.Hostname()
|
|
workersGauge.With("server", host).Set(float64(workers))
|
|
|
|
// create a channel with a 1000 Job buffer
|
|
jobChannel := make(chan *Job, 1000)
|
|
// start the job processor
|
|
go startJobProcessor(jobChannel)
|
|
// start a Goroutine to create some mock jobs
|
|
go createJobs(jobChannel)
|
|
|
|
exit := make(chan os.Signal, 1)
|
|
|
|
// kill (no param) default send syscanll.SIGTERM
|
|
// kill -2 is syscall.SIGINT
|
|
// kill -9 is syscall.SIGKILL but can't be catch, so don't need add it
|
|
signal.Notify(exit, syscall.SIGTERM, syscall.SIGINT)
|
|
<-exit
|
|
|
|
tlog.Info("Graceful shutdown Server in 10 seconds...")
|
|
ctx, cancel := context.WithTimeout(context.Background(), 10*time.Second)
|
|
defer cancel()
|
|
|
|
// set workers to 0 , then stop all workers and job creator
|
|
workers = 0
|
|
|
|
select {
|
|
case <-ctx.Done():
|
|
tlog.Info("timeout of 10 seconds.")
|
|
}
|
|
|
|
// init pusher
|
|
// pusher usually uses at DAG/Cron job processor
|
|
pusher := metrics.InitPusher("http://ops-dev.a.mobimagic.com:9091", "MetricsExample1")
|
|
if err := pusher.Collector(workerTotalCounter.Cv).Add(); err != nil {
|
|
tlog.Error("Could not push to Pushgateway:", err)
|
|
}
|
|
// time is up
|
|
close(jobChannel)
|
|
tlog.Infof("Jobs left: %d", len(jobChannel))
|
|
for leftJob := range jobChannel {
|
|
tlog.Info(leftJob)
|
|
}
|
|
|
|
tlog.Info("Server exit")
|
|
|
|
}
|
|
|
|
var (
|
|
modes = []string{"SMS", "WeChat", "WhatsApp", "GooglePush", "ApplePush"}
|
|
workers = 0
|
|
|
|
// Two ways to define name
|
|
// Name : fullname
|
|
// or Namespace / Subsystem / Name to combine a full name
|
|
workerTotalCounter = prometheus.NewCounterFrom(stdprometheus.CounterOpts{
|
|
Name: "worker_jobs_processed_total",
|
|
Help: "Total number of jobs processed by the workers.",
|
|
}, []string{"worker_id", "mode"})
|
|
|
|
inqueueGauge = prometheus.NewGaugeFrom(stdprometheus.GaugeOpts{
|
|
Namespace: "worker",
|
|
Subsystem: "jobs",
|
|
Name: "inqueue",
|
|
Help: "Number of jobs in queue.",
|
|
}, []string{"mode"})
|
|
|
|
processingTimeHistogram = prometheus.NewHistogramFrom(stdprometheus.HistogramOpts{
|
|
Name: "worker_jobs_process_time_seconds",
|
|
Help: "Amount of time spent processing jobs",
|
|
Buckets: []float64{1, 2, 3, 4, 5}, //if no set, then use DefBuckets : []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10}
|
|
}, []string{"worker_id", "mode"})
|
|
)
|
|
|
|
type Job struct {
|
|
Mode string
|
|
Sleep time.Duration
|
|
}
|
|
|
|
func init() {
|
|
flag.IntVar(&workers, "workers", 10, "Number of workers to use")
|
|
}
|
|
|
|
func getMode() string {
|
|
return modes[rand.Int()%len(modes)]
|
|
}
|
|
|
|
// makeJob creates a new job with a random sleep time between 0.5s and 5s
|
|
func makeJob() *Job {
|
|
return &Job{
|
|
Mode: getMode(),
|
|
Sleep: time.Duration(rand.Int()%4500+500) * time.Millisecond,
|
|
}
|
|
}
|
|
|
|
func createJobs(jobs chan<- *Job) {
|
|
for {
|
|
if workers == 0 {
|
|
tlog.Info("Stop create jobs.")
|
|
return
|
|
}
|
|
|
|
// create a random job
|
|
job := makeJob()
|
|
// track the job in the inqueue tracker
|
|
inqueueGauge.With("mode", job.Mode).Add(1)
|
|
// send the job down the channel
|
|
jobs <- job
|
|
// don't pile up too quickly
|
|
time.Sleep(200 * time.Millisecond)
|
|
}
|
|
}
|
|
|
|
// creates a worker that pulls jobs from the job channel
|
|
func startWorker(workerID int, jobs <-chan *Job) {
|
|
for {
|
|
select {
|
|
// read from the job channel
|
|
case job := <-jobs:
|
|
startTime := time.Now()
|
|
// decrement the in queue tracker
|
|
inqueueGauge.With("mode", job.Mode).Sub(1)
|
|
|
|
// fake processing the request
|
|
time.Sleep(job.Sleep)
|
|
tlog.Infof("[%d][%s] Processed job in %0.3f seconds", workerID, job.Mode, time.Now().Sub(startTime).Seconds())
|
|
// track the total number of jobs processed by the worker
|
|
workerTotalCounter.With("worker_id", strconv.FormatInt(int64(workerID), 10), "mode", job.Mode).Inc()
|
|
processingTimeHistogram.With("worker_id", strconv.FormatInt(int64(workerID), 10), "mode", job.Mode).Observe(time.Now().Sub(startTime).Seconds())
|
|
|
|
if workers == 0 {
|
|
tlog.Infof("worker[%d] quit", workerID)
|
|
return
|
|
}
|
|
|
|
}
|
|
}
|
|
}
|
|
|
|
func startJobProcessor(jobs <-chan *Job) {
|
|
tlog.Infof("[INFO] starting %d workers\n", workers)
|
|
wait := sync.WaitGroup{}
|
|
// notify the sync group we need to wait for 10 goroutines
|
|
wait.Add(workers)
|
|
|
|
// start 10 works
|
|
for i := 0; i < workers; i++ {
|
|
go func(workerID int) {
|
|
// start the worker
|
|
startWorker(workerID, jobs)
|
|
wait.Done()
|
|
}(i)
|
|
}
|
|
|
|
wait.Wait()
|
|
}
|