From 0fe6f4926021d1337f8fc5a3122444aee815337e Mon Sep 17 00:00:00 2001 From: Soypete Date: Mon, 15 Jan 2024 11:20:34 -0700 Subject: [PATCH] all: update courses for 2024 We have updated the course to remove general programming guidelines and the linter part. We are adding content to focus on pprof and other speed and memory management practices. --- .gitignore | 1 + README.md | 6 + .../ex-2-channels-routines}/README.md | 0 .../ex-2-channels-routines}/main.go | 0 .../solution/.golangci-lint.yml | 0 .../ex-2-channels-routines}/solution/main.go | 0 .../ex-3-memory-sketch}/README.md | 2 + .../Untitled Diagram.drawio | 0 .../ex-3-memory-sketch}/solution/main.go | 0 .../linter}/.golangci-lint.yml | 0 {ex-1-local-env => archived/linter}/README.md | 0 {ex-1-local-env => archived/linter}/main.go | 0 ex-4-benchmarking/README.md | 88 ------------ ex-4-benchmarking/solution/cpu.prof | Bin 106 -> 0 bytes ex-4-benchmarking/solution/mem.prof | Bin 1312 -> 0 bytes exercise-1/README.md | 94 ++++++++++++ exercise-1/Untitled Diagram.drawio | 72 ++++++++++ exercise-1/main.go | 42 ++++++ exercise-1/solution/.golangci-lint.yml | 29 ++++ exercise-1/solution/main.go | 45 ++++++ exercise-2/README.md | 21 +++ exercise-2/main.go | 121 ++++++++++++++++ exercise-2/solution/main.go | 135 ++++++++++++++++++ exercise-3/README.md | 29 ++++ .../solution => exercise-3}/main.go | 0 exercise-3/solution/main.go | 135 ++++++++++++++++++ .../solution/worker_test.go | 0 exercise-3/worker_test.go | 14 ++ 28 files changed, 746 insertions(+), 88 deletions(-) rename {ex-2-channels-routines => archived/ex-2-channels-routines}/README.md (100%) rename {ex-2-channels-routines => archived/ex-2-channels-routines}/main.go (100%) rename {ex-2-channels-routines => archived/ex-2-channels-routines}/solution/.golangci-lint.yml (100%) rename {ex-2-channels-routines => archived/ex-2-channels-routines}/solution/main.go (100%) rename {ex-3-memory-sketch => archived/ex-3-memory-sketch}/README.md (97%) rename {ex-3-memory-sketch => archived/ex-3-memory-sketch}/Untitled Diagram.drawio (100%) rename {ex-3-memory-sketch => archived/ex-3-memory-sketch}/solution/main.go (100%) rename {ex-1-local-env => archived/linter}/.golangci-lint.yml (100%) rename {ex-1-local-env => archived/linter}/README.md (100%) rename {ex-1-local-env => archived/linter}/main.go (100%) delete mode 100644 ex-4-benchmarking/README.md delete mode 100644 ex-4-benchmarking/solution/cpu.prof delete mode 100644 ex-4-benchmarking/solution/mem.prof create mode 100644 exercise-1/README.md create mode 100644 exercise-1/Untitled Diagram.drawio create mode 100644 exercise-1/main.go create mode 100644 exercise-1/solution/.golangci-lint.yml create mode 100644 exercise-1/solution/main.go create mode 100644 exercise-2/README.md create mode 100644 exercise-2/main.go create mode 100644 exercise-2/solution/main.go create mode 100644 exercise-3/README.md rename {ex-4-benchmarking/solution => exercise-3}/main.go (100%) create mode 100644 exercise-3/solution/main.go rename {ex-4-benchmarking => exercise-3}/solution/worker_test.go (100%) create mode 100644 exercise-3/worker_test.go diff --git a/.gitignore b/.gitignore index eddb27f..e73ca18 100644 --- a/.gitignore +++ b/.gitignore @@ -11,6 +11,7 @@ # Output of the go coverage tool, specifically when used with LiteIDE *.out +*.pprof # Dependency directories (remove the comment below to include it) # vendor/ diff --git a/README.md b/README.md index d5b18fa..bd1515a 100644 --- a/README.md +++ b/README.md @@ -20,6 +20,12 @@ These exercises are for people programming but who are new to the go programming - [Golang Bot](https://golangbot.com/learn-golang-series/) - [Learn Go](https://go.dev/learn/) +## Exercises + +[Exercise 1](/exercise-1/README.md) +[Exercise 2](/exercise-2/README.md) +[Exercise 3](/exercise-3/README.md) + ### Solutions There are examples of solutions for each exercise in the internal solutions directory. They are not the only possible solutions to the exercises but are references that can be used if you get stuck. diff --git a/ex-2-channels-routines/README.md b/archived/ex-2-channels-routines/README.md similarity index 100% rename from ex-2-channels-routines/README.md rename to archived/ex-2-channels-routines/README.md diff --git a/ex-2-channels-routines/main.go b/archived/ex-2-channels-routines/main.go similarity index 100% rename from ex-2-channels-routines/main.go rename to archived/ex-2-channels-routines/main.go diff --git a/ex-2-channels-routines/solution/.golangci-lint.yml b/archived/ex-2-channels-routines/solution/.golangci-lint.yml similarity index 100% rename from ex-2-channels-routines/solution/.golangci-lint.yml rename to archived/ex-2-channels-routines/solution/.golangci-lint.yml diff --git a/ex-2-channels-routines/solution/main.go b/archived/ex-2-channels-routines/solution/main.go similarity index 100% rename from ex-2-channels-routines/solution/main.go rename to archived/ex-2-channels-routines/solution/main.go diff --git a/ex-3-memory-sketch/README.md b/archived/ex-3-memory-sketch/README.md similarity index 97% rename from ex-3-memory-sketch/README.md rename to archived/ex-3-memory-sketch/README.md index 4e31ce2..7869cd1 100644 --- a/ex-3-memory-sketch/README.md +++ b/archived/ex-3-memory-sketch/README.md @@ -64,6 +64,8 @@ _cum_: cumulative ## Resources: - [Scheduler saga](https://www.youtube.com/watch?v=YHRO5WQGh0k) +- [pprof for beginners]() +- [pprof docs]() - [GC traces](https://www.ardanlabs.com/blog/2019/05/garbage-collection-in-go-part2-gctraces.html) - [how to pprof](https://dev.to/agamm/how-to-profile-go-with-pprof-in-30-seconds-592a) - [Two Go Programs, Three Different Profiling Techniques](https://www.youtube.com/watch?v=nok0aYiGiYA) diff --git a/ex-3-memory-sketch/Untitled Diagram.drawio b/archived/ex-3-memory-sketch/Untitled Diagram.drawio similarity index 100% rename from ex-3-memory-sketch/Untitled Diagram.drawio rename to archived/ex-3-memory-sketch/Untitled Diagram.drawio diff --git a/ex-3-memory-sketch/solution/main.go b/archived/ex-3-memory-sketch/solution/main.go similarity index 100% rename from ex-3-memory-sketch/solution/main.go rename to archived/ex-3-memory-sketch/solution/main.go diff --git a/ex-1-local-env/.golangci-lint.yml b/archived/linter/.golangci-lint.yml similarity index 100% rename from ex-1-local-env/.golangci-lint.yml rename to archived/linter/.golangci-lint.yml diff --git a/ex-1-local-env/README.md b/archived/linter/README.md similarity index 100% rename from ex-1-local-env/README.md rename to archived/linter/README.md diff --git a/ex-1-local-env/main.go b/archived/linter/main.go similarity index 100% rename from ex-1-local-env/main.go rename to archived/linter/main.go diff --git a/ex-4-benchmarking/README.md b/ex-4-benchmarking/README.md deleted file mode 100644 index 367065c..0000000 --- a/ex-4-benchmarking/README.md +++ /dev/null @@ -1,88 +0,0 @@ -# Exercise 4 - Benchmarking - -Benchmark is a tool often under utilized by people new to go. The [Benchmarking tool]() ships with the go binary and is part of the [go tools]() testing suite. - -[Pprof]() is another tool that ships with the standard library. This tool gives insights to the [memory information]() and profiling of your go app. - -In this exercise we will use both tools to analytics our worker pool app. - -## Part 1: - -Complete the benchmarking test suite in the file `ex-4-bemarking/worker_test.go`. - -run your benchmark tests using the command: -```bash -go test -bench=. -benchmem=true -benchtime=20s -``` - -Questions: -1. What information is provided by the benchmark? -1. Do you consider you code efficient? -1. Post the amount of time your code to took execute with 10 workers, the os, and the processor (your can get this information with `go version`) - - example: 900ns darwin/arm64 - - -### additional practice: Run pprof on your machine -Start by installing graphviz on your machine - -On Mac: -```bash -brew install graphviz -``` - -also setup a memory and cpu profiles by adding these two command line flags -```go -var cpuprofile = flag.String("cpuprofile", "", "write cpu profile to `file`") -var memprofile = flag.String("memprofile", "", "write memory profile to `file`") -``` - -add this code to your main function to write your memory profile to a file that can be processed with the pprof tool. - -```go -if *memprofile != "" { - f, err := os.Create(*memprofile) - if err != nil { - log.Fatal("could not create memory profile: ", err) - } - defer f.Close() // error handling omitted for example - runtime.GC() // get up-to-date statistics - if err := pprof.WriteHeapProfile(f); err != nil { - log.Fatal("could not write memory profile: ", err) - } -} -``` - -add this code to your main function to write your cpu profile to a file that can be processed with the pprof tool. - -```go -if *cpuprofile != "" { - f, err := os.Create(*cpuprofile) - if err != nil { - log.Fatal("could not create CPU profile: ", err) - } - defer f.Close() // error handling omitted for example - if err := pprof.StartCPUProfile(f); err != nil { - log.Fatal("could not start CPU profile: ", err) - } - defer pprof.StopCPUProfile() -} -``` -Add a local server to your `func main()` for pprof to scrape: - -```go -go func() { - log.Println(http.ListenAndServe("localhost:6060:, nil)) -}() -``` - -run you main.go in one window. In another window access pprof - -```bash -go tool pprof http://localhost:6060/debug/pprof/profile?seconds=30 -``` - -explore more commands [docs](pkg.go.dev/net/http/pprof). -## Part 2: - -1. Run the code coverage tool -2. write tests to achieve 60% code coverage. diff --git a/ex-4-benchmarking/solution/cpu.prof b/ex-4-benchmarking/solution/cpu.prof deleted file mode 100644 index 0dad779e1a0ddc80371186c5e4b69dfee72e8591..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 106 zcmb2|=3oE;mj4WX3>*J@T-dh#;_a)`zlm|Na4-rm0ZC>7mVm2wzsE;`nF$SRAF)U= zaWEP&7_k>8<`(3n78|i9=a=S{7%?Xolp1m8CFbQ9rzYp;r4%zVF#P}TC)_0K2ebhI D(x@PU diff --git a/ex-4-benchmarking/solution/mem.prof b/ex-4-benchmarking/solution/mem.prof deleted file mode 100644 index a6a98d5ef8b16ac5366dcb5de212b07f24640c2d..0000000000000000000000000000000000000000 GIT binary patch literal 0 HcmV?d00001 literal 1312 zcmV+*1>gD~iwFP!00004|D2LtXdG1(fHS+Zo4d0=IWwDV&Tf7>9b22E?MyR>1{E!> z4_c@ce<=78C!3q?nBAFWW|ml=wklRpX#FV>6cO5@Sgce8#XPhqB3iIQEiJU5w)Kyu zv?y4tFLLkQP1@>1<38NIbH97ObMC$8F_}mZd%W4y6`90AR%DWZm?D!TpLas6o-kA* z;U7D8?*k)2BzP+U(g-0E5F~oaP)Q6Ay!*;NFcKsNhb0bUprE3s4VA=k?!(u3WE`4t z4&#u)jGi@ABIEI|U*%ASgGml$$YDhj(;PKDT z?FA!25^yXFtd;}-g031WN#ZXj&T*K8_kbIdprOWLCtf)#U?=R*IP8Qx<~byI<&uB| z&I?GO8@oA7;oSc7T$_S3y*x4nJ=nvcg0qJNRNyLds6YV=9H#N~oPcR~zQkb~da;+o z3_f%IBG+c%Q;EY2AfjH>1~lc%_>pIIJ}4xS>?A2Aohc>QJ+NEP;>n|@d2ANmO!3$( zl(3}tX$39QY)ZSEx)@2eB$F8GhDvgH>IoK^OJO};QuXh!%&Ow-{Bs|=7%l75DxJaMLhn# zfW7d8!eK8Av62#`A1;c= z=?4={y{Hv6Wj^)o4xJB*+}A%a$jf;`;N#O$Ail1!vA$?mWdVcjWHJ2A06 zqM@C2s#U){?oT{H%TZ`2%6`L(?6kvI#m=njHA1?uB`rP$!?Iti2Q&<=sj3rE^FcbZ zHE<&u*c&Xr9$DM`8l4E}jJ1(gt4?55d~4Jk9kZ(L1bvd0tuQEC*DI_^DpVEU9#}w? zr`)seK)~Hpl2mX{fG{XMeD&5dGrI#D{WoF8>O&*=8EqcSq38I>v zZPOWNn%3=}_Gdst8ijS&^IWfDLwh+aZ=;iqDqZx{onU(W8c(h4RIB#gOD@+5YvWE( zyZtsZvIc&_u1x9m?+)AiiQzVoJQXoXpkGf|}> z^DcDor2hbOb+z8WtntVgPVuaXnZDrMKdeEaYthYxOo3*uKPBpypsGV;o3b%(5 Wt%Z?Op8h8Q0RR8daai<%2><{y;)GZL diff --git a/exercise-1/README.md b/exercise-1/README.md new file mode 100644 index 0000000..050e6d9 --- /dev/null +++ b/exercise-1/README.md @@ -0,0 +1,94 @@ +# Exercise 3 - Draw Go Scheduler + +## Part 1 - scheduling a single process go app: +Using the free and opensource tool [Draw IO app](https://app.diagrams.net/) draw a diagram worker-pool app from the last exercise being scheduled by the Go Scheduler + +## Part 2 - Add pprof to your Go App: +Using the provided `main.go` file, add pprof and explore the memory insights of a single process Go app. + +If you are completing this on your own, here are some helpful videos: +* [pprof setup](https://youtu.be/vSdOAzrVvaU) +* [pprof cli](https://youtu.be/Fuz3fNg30cU) +* [pprof web ui](https://youtu.be/v6skRrlXsjY) + +### Step 1: +add pprof server to your text parsing app. +First add the pprof driver to your app. + +```go +import _ "net/http/pprof" +``` + +_*NOTE*: the "\_" means that the import is added globally as a backend system. This is common for servers, db drivers, etc_ + +### Step 2: +add a pprof server as it's own goroutine in your main function. + +```go +// run pprof +go func() { + http.ListenAndServe("localhost:6060", nil) +}() +``` + +_*NOTE*: When you do a default `ListenAndServe()` to spin up your server, your pprof is open to the public internet. To add protections use a `mux.Server()` for a custom server and you basic security precautions._ + +### Step 3: +install [graphviz](https://graphviz.org/download/) on your machine to get the visual insights. + +*Mac:* +```bash +brew install graphviz +``` + +### Step 4: +run pprof while your worker-pool is executing + +```bash +go tool pprof -http=:18080 http://localhost:6060/debug/pprof/profile?seconds=30 +``` + +In the default graph each node is a function that your program is running. Size and color indicate how much cpu and time each function is taking. + +To acces the commandline tool tool run: + +``` bash +go tool pprof http://localhost:6060/debug/pprof/allocs +``` + +in the command line tool you can search for functions like this + +```bash +(pprof) list worker +``` + +The functions will provide insights in the following categories: + +* allocs: A sampling of all past heap memory allocations +* heap: A sampling of heap memory allocations of live objects. +* profile: CPU profile. +* goroutine: Stack traces of all current goroutines. +* block: Stack traces that led to blocking on synchronization primitives +* cmdline: The command line invocation of the current program +* mutex: Stack traces of holders of contended mutexes +* threadcreate: Stack traces that led to the creation of new OS threads +* trace: A trace of execution of the current program. + + +### Step 5: + +Take some time to expore pprof. Be able to answer the following questions: +1. What function takes the most time? +1. What function take the most cpu? +1. What function takes the most memory? +1. Are any funcitons inlined? + + +## Resources: +- [Scheduler saga](https://www.youtube.com/watch?v=YHRO5WQGh0k) +- [pprof for beginners](https://captainnobody1.medium.com/a-beginners-guide-to-pprof-optimizing-your-go-code-c0310e59c485) +- [pprof talk](https://www.youtube.com/watch?v=HjzJ5r2D8ZM) +- [pprof docs](https://github.com/google/pprof/tree/main/doc) +- [GC traces](https://www.ardanlabs.com/blog/2019/05/garbage-collection-in-go-part2-gctraces.html) +- [how to pprof](https://dev.to/agamm/how-to-profile-go-with-pprof-in-30-seconds-592a) +- [Two Go Programs, Three Different Profiling Techniques](https://www.youtube.com/watch?v=nok0aYiGiYA) diff --git a/exercise-1/Untitled Diagram.drawio b/exercise-1/Untitled Diagram.drawio new file mode 100644 index 0000000..848f86b --- /dev/null +++ b/exercise-1/Untitled Diagram.drawio @@ -0,0 +1,72 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/exercise-1/main.go b/exercise-1/main.go new file mode 100644 index 0000000..307aee3 --- /dev/null +++ b/exercise-1/main.go @@ -0,0 +1,42 @@ +package main + +import ( + "fmt" + "math/rand" + "os" + "strings" + "time" +) + +// getWords gets a slice of messages to process +func getWords() []string { + file, _ := os.ReadFile("datums/melville-moby_dick.txt") + words := strings.Split(string(file), " ") + return words +} + +func detectWords() int { + msgs := getWords() + var numWordsDetected int + for _, word := range msgs { + // golang is too powerful, so we have to slow it down to run pprof + // this 'sleep' is to simlutate work. + length := time.Duration(rand.Int63n(50)) + time.Sleep(length * time.Millisecond) + // this condition returns words like whale, whaling, whales + if strings.Contains(word, "whal") { + numWordsDetected++ + } + } + return numWordsDetected + +} + +func main() { + + startTime := time.Now() + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + numWords := detectWords() + fmt.Printf("Number of words: %d\nTime to process file: %2f seconds", numWords, time.Since(startTime).Seconds()) +} diff --git a/exercise-1/solution/.golangci-lint.yml b/exercise-1/solution/.golangci-lint.yml new file mode 100644 index 0000000..c696de9 --- /dev/null +++ b/exercise-1/solution/.golangci-lint.yml @@ -0,0 +1,29 @@ +run: + timeout: 5m + issues-exit-code: 2 + tests: false + modules-download-mode: readonly + go: '1.18' + linters: + enable: + - bodyclose + - deadcode + - dogsled + - errcheck + - goconst + - gocyclo + - gofmt + - gosimple + - govet + - importas + - ineffassign + - misspell + - revive + - rowserrcheck + - sqlclosecheck + - staticcheck + - structcheck + - stylecheck + - typecheck + - unused + - varcheck diff --git a/exercise-1/solution/main.go b/exercise-1/solution/main.go new file mode 100644 index 0000000..465f1e3 --- /dev/null +++ b/exercise-1/solution/main.go @@ -0,0 +1,45 @@ +package main + +import ( + "fmt" + "net/http" + _ "net/http/pprof" + "os" + "strings" + "time" +) + +// getWords gets a slice of messages to process +func getWords() []string { + file, _ := os.ReadFile("datums/melville-moby_dick.txt") + words := strings.Split(string(file), " ") + return words +} + +func detectWords() int { + msgs := getWords() + var numWordsDetected int + for _, word := range msgs { + // this condition returns words like whale, whaling, whales + if strings.Contains(word, "whal") { + numWordsDetected++ + // golang is too powerful, so we have to slow it down to run pprof + time.Sleep(50 * time.Millisecond) + } + } + return numWordsDetected + +} + +func main() { + // run pprof + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() + + startTime := time.Now() + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + numWords := detectWords() + fmt.Printf("Number of words: %d\nTime to process file: %2f seconds", numWords, time.Since(startTime).Seconds()) +} diff --git a/exercise-2/README.md b/exercise-2/README.md new file mode 100644 index 0000000..4b67e5f --- /dev/null +++ b/exercise-2/README.md @@ -0,0 +1,21 @@ +# Build a worker pool + +Using the template provided in `main.go` build a worker pool using what you learned about go channels and goroutines. This example will accept a number of workers as a flag `--workers` and then use the workers to perform a task. It should take between 10-15 min? + +## Step 1 build the worker pool + +Fill in the `func (wp *workerPool) run()` to queue up the amount of workers input via command line arguments. + +## Step 2 running your code + +You can run the following exercise code locally with the following command. + +```bash +go run main.go --workers=2 +``` + +Follow-up questions: +1. is this a parallel or concurrent process? +1. do we need channels and go routines to write this program? What are advantages and disadvantages? +1. did you notice any changes in your pprof metrics? +1. do multiple routines give you any advantage? diff --git a/exercise-2/main.go b/exercise-2/main.go new file mode 100644 index 0000000..77acb65 --- /dev/null +++ b/exercise-2/main.go @@ -0,0 +1,121 @@ +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net/http" + _ "net/http/pprof" + "os" + "runtime" + "runtime/pprof" + "strings" + "sync" + "time" +) + +type workerPool struct { + numWorkers int + mu *sync.Mutex + msgs chan string + wg *sync.WaitGroup +} + +func (wp *workerPool) run() int { + numWords := 0 + // TODO: add logic for queueing workers based on the number provided in the flag. + // add the number of words detected by each worker to the totalNumberWorkder +} + +// getMessages gets a slice of messages to process +func getMessages() []string { + // file, _ := os.ReadFile("datums/melville-moby_dick.txt") + words := strings.Split("We dont want to overload io threads and the runtime while we are benchmarking, so we are addings some others words that are not as much but talk about go and other cool software things", " ") + return words +} + +// this will block and not close if the len(msgs) is larger than the channel buffer. +func (wp *workerPool) queueMessages() { + msgs := getMessages() + for _, msg := range msgs { + // add messages to string channel + wp.msgs <- msg + } + + // close the worker channel and signal there won't be any more data + close(wp.msgs) +} + +func (wp workerPool) detectWords() int { + var numWordsDetected int + for word := range wp.msgs { + // simulate work + length := time.Duration(rand.Int63n(50)) + time.Sleep(length * time.Millisecond) + // this condition returns words like whale, whaling, whales + if strings.Contains(word, "whal") { + wp.mu.Lock() + numWordsDetected++ + wp.mu.Unlock() + } + } + return numWordsDetected +} + +// export cpu and mem profiles to a file that can be processed by pprof tool +func runPprof(memprofile, cpuprofile string) { + + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() + + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + defer f.Close() // error handling omitted for example + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatal("could not create memory profile: ", err) + } + defer f.Close() // error handling omitted for example + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatal("could not write memory profile: ", err) + } + } +} + +func main() { + numWorkers := flag.Int("workers", 1, "number of workers") + cpuprofile := flag.String("cpuprofile", "", "write cpu profile to `file`") + memprofile := flag.String("memprofile", "", "write memory profile to `file`") + + flag.Parse() + startTime := time.Now() + + // run pprof + runPprof(*cpuprofile, *memprofile) + + workerPool := &workerPool{ + wg: new(sync.WaitGroup), + msgs: make(chan string), + numWorkers: *numWorkers, + mu: new(sync.Mutex), + } + + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + go workerPool.queueMessages() + numWords := workerPool.run() + fmt.Printf("Number of words: %d\nTime to process file: %2f seconds", numWords, time.Since(startTime).Seconds()) +} diff --git a/exercise-2/solution/main.go b/exercise-2/solution/main.go new file mode 100644 index 0000000..fae3be8 --- /dev/null +++ b/exercise-2/solution/main.go @@ -0,0 +1,135 @@ +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net/http" + _ "net/http/pprof" + "os" + "runtime" + "runtime/pprof" + "strings" + "sync" + "time" +) + +type workerPool struct { + numWorkers int + mu *sync.Mutex + msgs chan string + wg *sync.WaitGroup +} + +func (wp *workerPool) run() int { + numWords := 0 + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + wp.wg.Add(wp.numWorkers) + for i := 0; i < wp.numWorkers; i++ { + go func() { + defer wp.wg.Done() + wordsDetected := wp.detectWords() + wp.mu.Lock() + defer wp.mu.Unlock() + numWords = numWords + wordsDetected + }() + } + + // wait for the workers to stop processing and exit + wp.wg.Wait() + return numWords +} + +// getMessages gets a slice of messages to process +func getMessages() []string { + // file, _ := os.ReadFile("datums/melville-moby_dick.txt") + words := strings.Split("We dont want to overload io threads and the runtime while we are benchmarking, so we are addings some others words that are not as much but talk about go and other cool software things", " ") + return words +} + +// this will block and not close if the len(msgs) is larger than the channel buffer. +func (wp *workerPool) queueMessages() { + msgs := getMessages() + for _, msg := range msgs { + // add messages to string channel + wp.msgs <- msg + } + + // close the worker channel and signal there won't be any more data + close(wp.msgs) +} + +func (wp workerPool) detectWords() int { + var numWordsDetected int + for word := range wp.msgs { + // simulate work + length := time.Duration(rand.Int63n(50)) + time.Sleep(length * time.Millisecond) + // this condition returns words like whale, whaling, whales + if strings.Contains(word, "whal") { + wp.mu.Lock() + numWordsDetected++ + wp.mu.Unlock() + } + } + return numWordsDetected +} + +// export cpu and mem profiles to a file that can be processed by pprof tool +func runPprof(memprofile, cpuprofile string) { + + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() + + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + defer f.Close() // error handling omitted for example + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatal("could not create memory profile: ", err) + } + defer f.Close() // error handling omitted for example + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatal("could not write memory profile: ", err) + } + } +} + +func main() { + numWorkers := flag.Int("workers", 1, "number of workers") + cpuprofile := flag.String("cpuprofile", "", "write cpu profile to `file`") + memprofile := flag.String("memprofile", "", "write memory profile to `file`") + + flag.Parse() + startTime := time.Now() + + // run pprof + runPprof(*cpuprofile, *memprofile) + + workerPool := &workerPool{ + wg: new(sync.WaitGroup), + msgs: make(chan string), + numWorkers: *numWorkers, + mu: new(sync.Mutex), + } + + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + go workerPool.queueMessages() + numWords := workerPool.run() + fmt.Printf("Number of words: %d\nTime to process file: %2f seconds", numWords, time.Since(startTime).Seconds()) +} diff --git a/exercise-3/README.md b/exercise-3/README.md new file mode 100644 index 0000000..4dcb3d2 --- /dev/null +++ b/exercise-3/README.md @@ -0,0 +1,29 @@ +# Exercise 4 - Testing and Benchmarking + +The Go standard library ships with a bunch of testing tools right out of the box. These incluse the [Main](https://pkg.go.dev/testing#hdr-Main) test tool, the [Benchmarking tool](https://pkg.go.dev/testing#hdr-Benchmarks), the [Fuzzing tool](https://pkg.go.dev/testing#hdr-Fuzzing), and the Unit [Testing tool](https://pkg.go.dev/testing#Testing). + +In this exercise we will use both tools to analytics our worker pool app. + +## Part 1: + +Benchmark is a tool often under utilized by people new to go. + +Complete the benchmarking test suite in the file `ex-4-bemarking/worker_test.go`. + +run your benchmark tests using the command: + +```bash +go test -bench=. -benchmem=true -benchtime=20s +``` + +Questions: + +1. What information is provided by the benchmark? +1. Do you consider you code efficient? +1. Post the amount of time your code to took execute with 10 workers, the os, and the processor (your can get this information with `go version`) + * example: 900ns darwin/arm64 + +## Part 2: + +1. Run the code coverage tool +2. write tests to achieve 60% code coverage. diff --git a/ex-4-benchmarking/solution/main.go b/exercise-3/main.go similarity index 100% rename from ex-4-benchmarking/solution/main.go rename to exercise-3/main.go diff --git a/exercise-3/solution/main.go b/exercise-3/solution/main.go new file mode 100644 index 0000000..fae3be8 --- /dev/null +++ b/exercise-3/solution/main.go @@ -0,0 +1,135 @@ +package main + +import ( + "flag" + "fmt" + "log" + "math/rand" + "net/http" + _ "net/http/pprof" + "os" + "runtime" + "runtime/pprof" + "strings" + "sync" + "time" +) + +type workerPool struct { + numWorkers int + mu *sync.Mutex + msgs chan string + wg *sync.WaitGroup +} + +func (wp *workerPool) run() int { + numWords := 0 + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + wp.wg.Add(wp.numWorkers) + for i := 0; i < wp.numWorkers; i++ { + go func() { + defer wp.wg.Done() + wordsDetected := wp.detectWords() + wp.mu.Lock() + defer wp.mu.Unlock() + numWords = numWords + wordsDetected + }() + } + + // wait for the workers to stop processing and exit + wp.wg.Wait() + return numWords +} + +// getMessages gets a slice of messages to process +func getMessages() []string { + // file, _ := os.ReadFile("datums/melville-moby_dick.txt") + words := strings.Split("We dont want to overload io threads and the runtime while we are benchmarking, so we are addings some others words that are not as much but talk about go and other cool software things", " ") + return words +} + +// this will block and not close if the len(msgs) is larger than the channel buffer. +func (wp *workerPool) queueMessages() { + msgs := getMessages() + for _, msg := range msgs { + // add messages to string channel + wp.msgs <- msg + } + + // close the worker channel and signal there won't be any more data + close(wp.msgs) +} + +func (wp workerPool) detectWords() int { + var numWordsDetected int + for word := range wp.msgs { + // simulate work + length := time.Duration(rand.Int63n(50)) + time.Sleep(length * time.Millisecond) + // this condition returns words like whale, whaling, whales + if strings.Contains(word, "whal") { + wp.mu.Lock() + numWordsDetected++ + wp.mu.Unlock() + } + } + return numWordsDetected +} + +// export cpu and mem profiles to a file that can be processed by pprof tool +func runPprof(memprofile, cpuprofile string) { + + go func() { + fmt.Println(http.ListenAndServe("localhost:6060", nil)) + }() + + if cpuprofile != "" { + f, err := os.Create(cpuprofile) + if err != nil { + log.Fatal("could not create CPU profile: ", err) + } + defer f.Close() // error handling omitted for example + if err := pprof.StartCPUProfile(f); err != nil { + log.Fatal("could not start CPU profile: ", err) + } + defer pprof.StopCPUProfile() + } + + if memprofile != "" { + f, err := os.Create(memprofile) + if err != nil { + log.Fatal("could not create memory profile: ", err) + } + defer f.Close() // error handling omitted for example + runtime.GC() // get up-to-date statistics + if err := pprof.WriteHeapProfile(f); err != nil { + log.Fatal("could not write memory profile: ", err) + } + } +} + +func main() { + numWorkers := flag.Int("workers", 1, "number of workers") + cpuprofile := flag.String("cpuprofile", "", "write cpu profile to `file`") + memprofile := flag.String("memprofile", "", "write memory profile to `file`") + + flag.Parse() + startTime := time.Now() + + // run pprof + runPprof(*cpuprofile, *memprofile) + + workerPool := &workerPool{ + wg: new(sync.WaitGroup), + msgs: make(chan string), + numWorkers: *numWorkers, + mu: new(sync.Mutex), + } + + // start the workers in the background and wait for data on the channel + // we already know the number of workers, we can increase the WaitGroup once + go workerPool.queueMessages() + numWords := workerPool.run() + fmt.Printf("Number of words: %d\nTime to process file: %2f seconds", numWords, time.Since(startTime).Seconds()) +} diff --git a/ex-4-benchmarking/solution/worker_test.go b/exercise-3/solution/worker_test.go similarity index 100% rename from ex-4-benchmarking/solution/worker_test.go rename to exercise-3/solution/worker_test.go diff --git a/exercise-3/worker_test.go b/exercise-3/worker_test.go new file mode 100644 index 0000000..80a039e --- /dev/null +++ b/exercise-3/worker_test.go @@ -0,0 +1,14 @@ +package main + +import ( + "testing" +) + +func Benchmark2Workers(b *testing.B) { benchmarkWorkers(2, b) } +func Benchmark5Workers(b *testing.B) { benchmarkWorkers(5, b) } +func Benchmark10Workers(b *testing.B) { benchmarkWorkers(10, b) } +func Benchmark20Workers(b *testing.B) { benchmarkWorkers(20, b) } + +func benchmarkWorkers(i int, b *testing.B) { + // TODO: fill in the benchmarking code +}