Skip to content

Commit e32d374

Browse files
committed
run server worker once on launch, then make it loop
1 parent 20c48bd commit e32d374

File tree

2 files changed

+66
-57
lines changed

2 files changed

+66
-57
lines changed

app/services/serverworker/serverworker.go

Lines changed: 65 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -72,74 +72,83 @@ func (w *Worker) RunWithSeed(ctx context.Context, window time.Duration, addresse
7272
return w.Run(ctx, window)
7373
}
7474

75-
func (w *Worker) Run(ctx context.Context, window time.Duration) error {
76-
tc := time.NewTicker(window)
77-
for range tc.C {
78-
zap.L().Info("Running server scraper worker")
79-
addresses, err := w.db.GetServersToQuery(ctx, window)
80-
if err != nil {
81-
zap.L().Error("failed to get servers to query",
82-
zap.Error(err))
83-
continue
84-
}
85-
86-
if len(addresses) == 0 {
87-
zap.L().Info("no servers to query, skipping")
88-
continue
89-
}
75+
func (w *Worker) FetchAndQueryServers(ctx context.Context, window time.Duration) error {
76+
zap.L().Info("Running server scraper worker")
77+
addresses, err := w.db.GetServersToQuery(ctx, window)
78+
if err != nil {
79+
zap.L().Error("failed to get servers to query",
80+
zap.Error(err))
81+
return err
82+
}
9083

91-
zap.L().Info("got servers to update",
92-
zap.Int("servers", len(addresses)))
84+
if len(addresses) == 0 {
85+
zap.L().Info("no servers to query, skipping")
86+
return err
87+
}
9388

94-
for s := range w.sc.Scrape(ctx, addresses) {
95-
zap.L().Info("updating server",
96-
zap.String("address", s.IP),
97-
zap.Bool("active", s.Active))
89+
zap.L().Info("got servers to update",
90+
zap.Int("servers", len(addresses)))
9891

99-
if err := w.db.Upsert(ctx, s); err != nil {
100-
zap.L().Error("failed to upsert server",
101-
zap.Error(err), zap.String("ip", s.IP))
102-
}
92+
for s := range w.sc.Scrape(ctx, addresses) {
93+
zap.L().Info("updating server",
94+
zap.String("address", s.IP),
95+
zap.Bool("active", s.Active))
10396

104-
time.Sleep(time.Second)
97+
if err := w.db.Upsert(ctx, s); err != nil {
98+
zap.L().Error("failed to upsert server",
99+
zap.Error(err), zap.String("ip", s.IP))
105100
}
106101

107-
zap.L().Info("finished updating servers",
108-
zap.Int("servers", len(addresses)))
102+
time.Sleep(time.Second)
103+
}
109104

110-
// TODO: GetAll needs an "include inactive" flag, and make default duration configurable
111-
// It should also probably just use existing data queried earlier.
112-
// Only retrieve servers active since 8 hours ago (Used to be 3, but we got bigger now I guess, so many servers!!!)
113-
all, err := w.db.GetAll(ctx, time.Duration(-8)*time.Hour)
114-
if err != nil {
115-
zap.L().Error("failed to get all servers for metrics",
116-
zap.Error(err))
117-
continue
105+
zap.L().Info("finished updating servers",
106+
zap.Int("servers", len(addresses)))
107+
108+
// TODO: GetAll needs an "include inactive" flag, and make default duration configurable
109+
// It should also probably just use existing data queried earlier.
110+
// Only retrieve servers active since 8 hours ago (Used to be 3, but we got bigger now I guess, so many servers!!!)
111+
all, err := w.db.GetAll(ctx, time.Duration(-8)*time.Hour)
112+
if err != nil {
113+
zap.L().Error("failed to get all servers for metrics",
114+
zap.Error(err))
115+
return err
116+
}
117+
118+
zap.L().Info("Saving all servers into a JSON file to be used as cache")
119+
// Let's save all servers info our cache file to be used in our API data processing instead of DB
120+
err = w.db.GenerateCacheFromData(ctx, all)
121+
if err != nil {
122+
zap.L().Error("There was an error converting native array of servers to JSON data",
123+
zap.Error(err))
124+
return err
125+
}
126+
127+
active := 0
128+
inactive := 0
129+
for _, s := range all {
130+
if s.Active {
131+
active++
132+
} else {
133+
inactive++
118134
}
135+
Players.With(prometheus.Labels{
136+
"addr": s.IP,
137+
}).Set(float64(s.Core.Players))
138+
}
139+
Active.Set(float64(active))
140+
Inactive.Set(float64(inactive))
141+
return nil
142+
}
119143

120-
zap.L().Info("Saving all servers into a JSON file to be used as cache")
121-
// Let's save all servers info our cache file to be used in our API data processing instead of DB
122-
err = w.db.GenerateCacheFromData(ctx, all)
144+
func (w *Worker) Run(ctx context.Context, window time.Duration) error {
145+
_ = w.FetchAndQueryServers(ctx, window)
146+
tc := time.NewTicker(window)
147+
for range tc.C {
148+
err := w.FetchAndQueryServers(ctx, window)
123149
if err != nil {
124-
zap.L().Error("There was an error converting native array of servers to JSON data",
125-
zap.Error(err))
126150
continue
127151
}
128-
129-
active := 0
130-
inactive := 0
131-
for _, s := range all {
132-
if s.Active {
133-
active++
134-
} else {
135-
inactive++
136-
}
137-
Players.With(prometheus.Labels{
138-
"addr": s.IP,
139-
}).Set(float64(s.Core.Players))
140-
}
141-
Active.Set(float64(active))
142-
Inactive.Set(float64(inactive))
143152
}
144153

145154
return nil

app/transports/api/servers/h_get.go

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -21,7 +21,7 @@ func (s *service) get(w http.ResponseWriter, r *http.Request) {
2121
if errors.Is(err, db.ErrNotFound) {
2222
web.StatusNotFound(w, err)
2323
} else {
24-
web.StatusInternalServerError(w, errors.Wrap(err, "failed to get server"))
24+
// web.StatusInternalServerError(w, errors.Wrap(err, "failed to get server"))
2525
}
2626
return
2727
}

0 commit comments

Comments
 (0)