-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathmain.go
303 lines (253 loc) · 9.04 KB
/
main.go
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
package main
import (
"bytes"
"encoding/json"
"flag"
"fmt"
"io/ioutil"
"log"
"net/http"
"os"
"strconv"
"strings"
"time"
yaml "gopkg.in/yaml.v2"
"github.com/nxadm/tail"
)
const (
// SearchLineFor determines what lines of the log file we're interested in.
SearchLineFor = "eligible"
// LogColumn is used for splitting the log line into two slices.
//
// The log files have a timestamp, module name, name space, and log level on
// the left side of the column. We aren't interested in any of that. So, we
// split the log line using the LogColumn.
LogColumn = " "
// APIURL is where we send the data.
APIURL = "https://plot-tracker.app/api/v1/counter"
// ChiaDateFormat is how Chia formats the dates in the logs.
//
// https://golang.org/pkg/time/#pkg-constants
ChiaDateFormat = "2006-01-02T15:04:05.000"
// EligibilityHistorySize is the max number of items in our History slice. I
// like using 100 because it looks nice in the UI and it's easy to show the
// percentage. Since the history slice is boolean, we're only measuring if
// the farm had any eligible plots for that signage point.
EligibilityHistorySize = 100
DefaultSleepBetweenIterations time.Duration = 500
// MaxBackoff is the maximum amount of time we want to back off before we
// panic and decide that there is something wrong that can't be fixed by
// slowing down our HTTP POST requests.
MaxBackoff time.Duration = 2000
)
// SleepBetweenIterations is a number in milliseconds that we want to sleep
// before moving on to the next iteration. The first time you run this app
// it reads your log file from the very beginning and sends all of those
// applicable logs to the server, while the server could handle it, we
// think it's better to pause for a moment before sending the next load.
// Right now, it's only sleeping 0.5 seconds. It will eventually catch up
// and then listen to live updates from the log file.
var SleepBetweenIterations time.Duration = DefaultSleepBetweenIterations
// BackoffIncrease is the amount we add to SleepBetweenIterations each time a
// HTTP POST fails.
var BackoffIncrease time.Duration = 100
// Version is the version of the program
var Version = "dev"
// Config contains the fields we need for running the client.
type Config struct {
LogFile string `yaml:"log_file"`
APIKey string `yaml:"api_key"`
FarmKey string `yaml:"farm_key"`
}
var cfg Config
// LoadConfig loads the YAML config from a file and sets the global Config var.
func LoadConfig(filename string) {
log.Printf("Loading config %s\n", filename)
f, err := os.Open(filename)
if err != nil {
panic(err)
}
defer f.Close()
decoder := yaml.NewDecoder(f)
err = decoder.Decode(&cfg)
if err != nil {
panic(err)
}
}
// LogData is a type for keeping our log data together.
type LogData struct {
Plots int `json:"plots"`
Eligible int `json:"eligible"`
Proofs int `json:"proofs"`
TimeTaken float64 `json:"time_taken"`
Timestamp *time.Time `json:"timestamp"`
EligibilityHistory []bool `json:"eligibility_history"`
}
// UpdateHistoryQueue uses the EligibilityHistorySize to keep the most recent
// data in regards to if a farm had any eligible plots for a signage point.
func UpdateHistoryQueue(queue []bool, eligible bool) []bool {
queue = append(queue, eligible)
if len(queue) > EligibilityHistorySize {
queue = queue[1:]
}
return queue
}
// Send sends the data to the API.
func (ld LogData) Send() {
// Turn LogData into a JSON string
js, err := json.Marshal(&ld)
if err != nil {
log.Println(err)
}
// Create a new HTTP Request
req, err := http.NewRequest("POST", APIURL, bytes.NewBuffer(js))
if err != nil {
log.Println(err)
}
req.Header.Set("Content-Type", "application/json")
req.Header.Set("X-API-Key", cfg.APIKey)
req.Header.Set("X-Farmer-ID", cfg.FarmKey)
// Sending a custom User-Agent in the request headers. The User-Agent header
// is a characteristic string that lets servers and network peers identify
// the client making the request.
//
// User-Agent: <product> / <product-version> <comment>
//
// <product>
// A product identifier — its name or development codename.
//
// <product-version>
// Version number of the product.
//
// <comment>
// Zero or more comments containing more details; sub-product information, for example.
//
// https://developer.mozilla.org/en-US/docs/Web/HTTP/Headers/User-Agent
req.Header.Set("User-Agent", "PlotTracker/"+Version)
tr := &http.Transport{
MaxIdleConns: 10,
IdleConnTimeout: 30 * time.Second,
DisableCompression: true,
}
// Send it away!
client := &http.Client{Transport: tr}
resp, err := client.Do(req)
if err != nil {
log.Println(err)
// Backoff
if SleepBetweenIterations > MaxBackoff {
panic("Error sending data to API, tried backing off")
}
// Backoff
// We wait to increase how long we sleep between iterations since we had
// an error POST'ing to the API.
SleepBetweenIterations += BackoffIncrease
log.Println("Backing off")
return
}
defer resp.Body.Close()
// API returned a status that indicates everything went well, so we're done.
if resp.Status == "200 OK" {
log.Println(resp.Status)
// Backoff
// We want to reset the SleepBetweenIterations to the default value of
// DefaultSleepBetweenIterations since we had a successful POST.
if SleepBetweenIterations > DefaultSleepBetweenIterations {
SleepBetweenIterations = DefaultSleepBetweenIterations
log.Println("Stop backing off, reset sleep duration")
}
return
}
// If we make it here, then we might have had a problem either on the client
// or server side. We print out the response status and body in the logs to
// help identify where the problem may be. Please report any issues to the
// repo: https://github.com/levidurfee/plot-tracker/issues
body, _ := ioutil.ReadAll(resp.Body)
log.Println(resp.Status, string(body))
}
// GetTimestamp parses the left side of the log column to get the timestamp.
//
// 2021-06-18T08:28:35.589 harvester chia.harvester.harvester: INFO
//
// https://golang.org/pkg/time/#example_Parse
func GetTimestamp(line string) (time.Time, error) {
pieces := strings.Split(line, " ")
textTimestamp := pieces[0]
return time.Parse(ChiaDateFormat, textTimestamp)
}
func main() {
fmt.Printf("Plot Tracker %s\n", Version)
fmt.Println("==============================")
log.Println("Starting...")
// Optionally specify config file.
cfgFlag := flag.String("config", "config.yml", "location of the config file")
flag.Parse()
LoadConfig(*cfgFlag)
// https://github.com/nxadm/tail
// https://pkg.go.dev/github.com/nxadm/tail#Config
tailer, err := tail.TailFile(
cfg.LogFile, // The file we're reading
tail.Config{
Follow: true, // Continue looking for new lines (tail -f)
ReOpen: true, // Reopen recreated files (tail -F)
},
)
// If something went wrong with TailFile, we panic, and quit. Hopefully this
// never happens, but it might and can be useful for debugging.
if err != nil {
panic(err)
}
// We want to keep track of the latest signage points and know if they were
// eligible or not. It allows for easy visual grepping.
var EligibilityHistory []bool
// This is the main event loop. When a new line is written to the file, this
// loop will start an iteration.
for line := range tailer.Lines {
// We're only interested in certain lines in the log file, the rest of
// them can be ignored.
if strings.Contains(line.Text, SearchLineFor) {
logColumns := strings.Split(line.Text, LogColumn)
// The log files are fairly verbose so we split the part we want by
// spaces, it makes it easier to grab the information we want. We
// also need to convert the data from a `string` type to an `int`
// type.
//
// 0 plots were eligible for farming 2ce35966c9... Found 0 proofs. Time: 0.02905 s. Total 172 plots
data := strings.Split(logColumns[1], " ")
plots, _ := strconv.Atoi(data[14])
eligible, _ := strconv.Atoi(data[0])
proofs, _ := strconv.Atoi(data[8])
timeTaken, _ := strconv.ParseFloat(data[11], 64)
// Get the timestamp of the log. Using the timestamp helps us from
// adding data sent more than once. When this program starts, the
// tail package reads a few of the last lines of the log file and
// sends those. So, in the API, we can check if the log entry is
// greater than or equal to the last timestamp sent from the client.
t, err := GetTimestamp(logColumns[0])
if err != nil {
log.Println(err)
}
logData := &LogData{
Plots: plots,
Eligible: eligible,
Proofs: proofs,
Timestamp: &t,
TimeTaken: timeTaken,
}
// Set wasEligible to false by default
wasEligible := false
// Check if any plots were eligible
if eligible > 0 {
// If 1 or more plots were eligible, set wasEligible to true
wasEligible = true
}
// Add eligibility to queue
EligibilityHistory = UpdateHistoryQueue(EligibilityHistory, wasEligible)
logData.EligibilityHistory = EligibilityHistory
// Create a new goroutine and send the data to the API.
go logData.Send()
// @TODO re-eval this.
time.Sleep(SleepBetweenIterations * time.Millisecond)
}
}
}