generated from Tarang74/QUT-Notes-Template
-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathMXB101 Exam Notes.tex
843 lines (819 loc) · 42.4 KB
/
MXB101 Exam Notes.tex
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
809
810
811
812
813
814
815
816
817
818
819
820
821
822
823
824
825
826
827
828
829
830
831
832
833
834
835
836
837
838
839
840
841
842
843
%!TEX TS-program = xelatex
%!TEX options = -aux-directory=Debug -shell-escape -file-line-error -interaction=nonstopmode -halt-on-error -synctex=1 "%DOC%"
\documentclass{article}
\input{LaTeX-Submodule/template.tex}
% Additional packages & macros
\usepackage{changepage} % Modify page width
\usepackage{multicol} % Use multiple columns
\usepackage[explicit]{titlesec} % Modify section heading styles
\titleformat{\section}{\raggedright\normalfont\bfseries}{}{0em}{#1}
\titleformat{\subsection}{\raggedright\normalfont\small\bfseries}{}{0em}{#1}
%% A4 page
\geometry{
a4paper,
margin = 10mm
}
%% Hide horizontal rule
\renewcommand{\headrulewidth}{0pt}
\fancyhead{}
%% Hide page numbers
\pagenumbering{gobble}
%% Multi-columns setup
\setlength\columnsep{4pt}
%% Paragraph setup
\setlength\parindent{0pt}
\setlength\parskip{0pt}
%% Customise section heading styles
% \titleformat*\section{\raggedright\bfseries}
\begin{document}
% Modify spacing
\titlespacing*\section{0pt}{1ex}{1ex}
\titlespacing*\subsection{0pt}{1ex}{1ex}
%
\setlength\abovecaptionskip{8pt}
\setlength\belowcaptionskip{-15pt}
\setlength\textfloatsep{0pt}
%
\setlength\abovedisplayskip{1pt}
\setlength\belowdisplayskip{1pt}
\begin{multicols}{3}
\section{Introduction}
\subsection{Event}
Set of outcomes from an experiment.
\subsection{Sample Space}
Set of all possible outcomes \(\Omega\).
\subsection{Intersection}
Outcomes occur in both \(A\) and \(B\)
\begin{equation*}
A \cap B \quad\quad \text{or} \quad\quad AB
\end{equation*}
\subsection{Disjoint}
No common outcomes, \(AB = \varnothing\)
\begin{equation*}
\Pr{\left( AB \right)} = \Pr{\left( A \,\vert\, B \right)} = 0
\end{equation*}
\subsection{Union}
Set of outcomes in either \(A\) or \(B\)
\begin{equation*}
A \cup B
\end{equation*}
\subsection{Complement}
Set of all outcomes not in \(A\), but in \(\Omega \)
\begin{align*}
A\overline{A} & = \varnothing \\
A \cup \overline{A} & = \Omega
\end{align*}
\subsection{Subset}
\(A\) is a (non-strict) subset of \(B\) if all elements in \(A\) are also in \(B\) --- \(A \subset B\).
\begin{equation*}
AB = A \quad\quad \text{and} \quad\quad A \cup B = B
\end{equation*}
\begin{equation*}
\forall A:A\subset \Omega \land \varnothing \subset A
\end{equation*}
\begin{align*}
\Pr{\left( A \right)} & \leq \Pr{\left( B \right)} \\
\Pr{\left( B \,\vert\, A \right)} & = 1 \\
\Pr{\left( A \,\vert\, B \right)} & = \frac{\Pr{\left( A \right)}}{\Pr{\left( B \right)}}
\end{align*}
\subsection{Identities}
\begin{align*}
A \left( BC \right) & = \left( AB \right) C \\
A \cup \left( B \cup C \right) & = \left( A \cup B \right) \cup C \\
A \left(B \cup C\right) & = AB \cup AC \\
A \cup BC & = \left( A \cup B \right) \left( A \cup C \right)
\end{align*}
\subsection{Probability}
Measure of the likeliness of an event occurring
\begin{equation*}
\Pr{\left( A \right)} \quad\quad \text{or} \quad\quad \mathrm{P}\left( A \right)
\end{equation*}
\begin{equation*}
0 \leq \Pr{\left( A \right)} \leq 1
\end{equation*}
where a probability of 0 never happens, and 1 always happens.
\begin{align*}
\Pr{\left( \Omega \right)} & = 1 \\
\Pr{\left( \overline{A} \right)} & = 1 - \Pr{\left( A \right)}
\end{align*}
\subsection{Multiplication Rule}
For independent events \(A\) and \(B\)
\begin{equation*}
\Pr{\left( AB \right)} = \Pr{\left( A \right)} \Pr{\left( B \right)}.
\end{equation*}
For dependent events \(A\) and \(B\)
\begin{equation*}
\Pr{\left( AB \right)} = \Pr{\left( A \,\vert \, B \right)} \Pr{\left( B \right)}
\end{equation*}
\subsection{Addition Rule}
For independent \(A\) and \(B\)
\begin{equation*}
\Pr{\left( A \cup B \right)} = \Pr{\left( A \right)} + \Pr{\left( B \right)} - \Pr{\left( AB \right)}.
\end{equation*}
If \(AB = \varnothing \), then \(\Pr{\left( AB \right)} = 0\), so that \(\Pr{\left( A \cup B \right)} = \Pr{\left( A \right)} + \Pr{\left( B \right)}\).
\subsection{De Morgan's Laws}
\begin{align*}
\overline{A \cup B} & = \overline{A} \ \overline{B} \\
\overline{AB} & = \overline{A} \cup \overline{B}.
\end{align*}
\begin{align*}
\Pr{\left( A \cup B \right)} & = 1 - \Pr{\left( \overline{A} \ \overline{B} \right)} \\
\Pr{\left( AB \right)} & = 1 - \Pr{\left( \overline{A} \cup \overline{B} \right)}
\end{align*}
\subsection{Circuits}
A signal can pass through a circuit if there is a functional path
from start to finish where each component functions independently.
Let \(W_i\) be the event where component \(i\) functions and \(S\)
be the event where the system functions, then
\begin{equation*}
\Pr{\left( W_i \right)} = p
\end{equation*}
and \(\Pr{\left( S \right)}\) will be a function of \(p\) defined \(f:\left[ 0,\; 1 \right] \to \left[ 0,\; 1 \right]\).
\subsection{Conditional Probability}
The probability of event \(A\) given \(B\) has already occurred
\begin{equation*}
\Pr{\left( A \,\vert\, B \right)} = \frac{\Pr{\left( A B \right)}}{\Pr{\left( B \right)}}.
\end{equation*}
\(A\) and \(B\) are independent events if
\begin{align*}
\Pr{\left( A \,\vert\, B \right)} & = \Pr{\left( A \right)} \\
\Pr{\left( B \,\vert\, A \right)} & = \Pr{\left( B \right)}
\end{align*}
the following statements are also true
\begin{align*}
\Pr{\left( A \,\vert\, \overline{B} \right)} & = \Pr{\left( A \right)} \\
\Pr{\left( \overline{A} \,\vert\, B \right)} & = \Pr{\left( \overline{A} \right)} \\
\Pr{\left( \overline{A} \,\vert\, \overline{B} \right)} & = \Pr{\left( \overline{A} \right)}
\end{align*}
\subsection{Probability Rule with Conditional}
\begin{align*}
\Pr{\left( \overline{A} \,\vert\, C \right)} & = 1 - \Pr{\left( A \,\vert\, C \right)} \\
\Pr{\left( A \cup B \,\vert\, C \right)} & =
\begin{aligned}[t]
\Pr{\left( A \,\vert\, C \right)} + \Pr{\left( B \,\vert\, C \right)} \\
- \Pr{\left( AB \,\vert\, C \right)}
\end{aligned}
\\
\Pr{\left( A B \,\vert\, C \right)} & = \Pr{\left( A \,\vert\, BC \right)} \Pr{\left( B \,\vert\, C \right)}
\end{align*}
\subsection{Conditional Independence}
Given \(\Pr{\left( A \,\vert\, B \right)} \neq \Pr{\left( A
\right)}\), \(A\) and \(B\) are conditionally independent given
\(C\) if
\begin{equation*}
\Pr{\left( A \,\vert\, BC \right)} = \Pr{\left( A \,\vert\, C \right)}
\end{equation*}
so that
\begin{equation*}
\Pr{\left( AB \,\vert\, C \right)} = \Pr{\left( A \,\vert\, C \right)} \Pr{\left( B \,\vert\, C \right)}.
\end{equation*}
Conversely if \(\Pr{\left( A \,\vert\, B \right)} = \Pr{\left( A \right)}\), \(A\) and \(B\) are conditionally dependent given \(C\) if
\begin{align*}
\Pr{\left( A \,\vert\, BC \right)} & \neq \Pr{\left( A \,\vert\, C \right)} \\
\Pr{\left( AB \,\vert\, C \right)} & = \Pr{\left( A \,\vert\, BC \right)} \Pr{\left( B \,\vert\, C \right)}.
\end{align*}
Pairwise independence does not imply mutual independence for three events.
% \begin{gather*}
% \begin{cases}
% \Pr{\left( A B \right)} = \Pr{\left( A \right)} \Pr{\left( B \right)} \\
% \Pr{\left( A C \right)} = \Pr{\left( A \right)} \Pr{\left( C \right)} \\
% \Pr{\left( B C \right)} = \Pr{\left( B \right)} \Pr{\left( C \right)}
% \end{cases} \not\Rightarrow \\
% \Pr{\left( A B C \right)} = \Pr{\left( A \right)} \Pr{\left( B \right)} \Pr{\left( C \right)}.
% \end{gather*}
Independence should not be assumed unless explicitly stated.
\subsection{Marginal Probability}
The probability of an event irrespective of the outcome of another
variable.
\subsection{Total Probability}
Given \(A = AB \cup A\overline{B}\)
\begin{align*}
\Pr{\left( A \right)} & = \Pr{\left( AB \right)} + \Pr{\left( A\overline{B} \right)} \\
\Pr{\left( A \right)} & =
\begin{aligned}[t]
& \Pr{\left( A \,\vert\, B \right)}\Pr{\left( B \right)} \\
& + \Pr{\left( A \,\vert\, \overline{B} \right)}\Pr{\left( \overline{B} \right)}
\end{aligned}
\end{align*}
In general, partition \(\Omega\) into disjoint events \(B_1,\; B_2,\; \dots,\; B_n\),
such that \(\bigcup_{i=1}^n B_i = \Omega\)
\begin{equation*}
\Pr{\left( A \right)} = \sum_{i = 1}^n \Pr{\left( A \,\vert\, B_i \right)}\Pr{\left( B_i \right)}
\end{equation*}
\subsection{Bayes' Theorem}
\begin{equation*}
\Pr{\left( A \,\vert\, B \right)} = \frac{\Pr{\left( B \,\vert\, A \right)}\Pr{\left( A \right)}}{\Pr{\left( B \right)}}
\end{equation*}
\section{Combinatorics}
\subsection{Number of Outcomes}
Let \(\abs{A}\) denote the number of outcomes in an event \(A\).
For \(k\) disjoint events \({\left\{ S_1,\:\ldots,\:S_k \right\}}\)
where the \(i\)th event has \(n_i\) possible outcomes,
\subsection{Addition Principle}
Number of possible samples from any event
\begin{equation*}
\abs*{\bigcup_{i = 0}^{k} S_i} = \sum_{i = 1}^k n_i
\end{equation*}
\subsection{Multiplication Principle}
Number of possible samples from every event
\begin{equation*}
\abs*{\bigcap_{i=0}^{k} S_i} = \prod_{i = 1}^k n_i
\end{equation*}
\subsection{Counting Probability}
If \(S_i\) has equally likely outcomes
\begin{equation*}
\Pr{\left( S_i \right)} = \frac{\abs{S_i}}{\abs{S}}
\end{equation*}
\subsection{Ordered Sampling with Replacement}
Number of ways to choose \(k\) objects from a set with \(n\)
elements
\begin{equation*}
n^k
\end{equation*}
\subsection{Ordered Sampling without Replacement}
Number of ways to arrange \(k\) objects from a set of \(n\)
elements, or the \(k\)-permutation of \(n\)-elements
\begin{align*}
\prescript{n}{}{P}_k & = \frac{n!}{\left( n - k \right)!}
\end{align*}
for \(0 \leq k \leq n\).
\subsection{Unordered Sampling without Replacement}
Number of ways to choose \(k\) objects from a set of \(n\)
elements, or the \(k\)-combination of \(n\)-elements
\begin{align*}
\prescript{n}{}{C}_k = \binom{n}{k} = \frac{\prescript{n}{}{P}_k}{k!} = \frac{n!}{k! \left( n - k \right)!}
\end{align*}
for \(0 \leq k \leq n\).
\subsection{Unordered Sampling with Replacement}
Number of ways to choose \(k\) objects from a set with \(n\)
elements
\begin{equation*}
\binom{n + k - 1}{k}
\end{equation*}
\subsection{Binomial Coefficient Recurrence Relation}
\begin{equation*}
\binom{n}{k} = \binom{n - 1}{k - 1} + \binom{n - 1}{k}
\end{equation*}
\end{multicols}
\begin{table}[H]
\centering
\begin{tabular}{c c c c c c}
\toprule
\textbf{Distribution} & \textbf{Restrictions} & \textbf{PMF} & \textbf{CDF} & \(\E{\left( X \right)}\) & \(\Var{\left( X \right)}\) \\
\midrule
\(X \sim \operatorname{Uniform}{\left( a,\: b \right)}\) & \(x \in \left\{ a, \dots, b \right\}\) & \(\frac{1}{b - a + 1}\) & \(\frac{x - a + 1}{b - a + 1}\) & \(\frac{a + b}{2}\) & \(\frac{\left( b - a + 1 \right)^2 - 1}{12}\) \\
\(X \sim \operatorname{Bernoulli}{\left( p \right)}\) & \(p \in \interval{0}{1}, x \in \left\{ 0, 1 \right\}\) & \(p^x \left( 1 - p \right)^{1 - x}\) & \(1 - p\) & \(p\) & \(p \left( 1 - p \right)\) \\
\(X \sim \operatorname{Binomial}{\left( n,\: p \right)}\) & \(x \in \left\{ 0, \dots, n \right\}\) & \(\binom{n}{x} p^x \left( 1 - p \right)^{n - x}\) & \(\sum_{u = 0}^x \binom{n}{u} p^u \left( 1 - p \right)^{n - u}\) & \(np\) & \(np\left( 1 - p \right)\) \\
\(N \sim \operatorname{Geometric}{\left( p \right)}\) & \(n \geq 1\) & \(\left( 1 - p \right)^{n - 1} p\) & \(1 - \left( 1 - p \right)^n\) & \(\frac{1}{p}\) & \(\frac{1 - p}{p^2}\) \\
\( Y \sim \operatorname{Geometric}{\left( p \right)}\) & \(y \geq 0\) & \(\left( 1 - p \right)^y p\) & \(1 - \left( 1 - p \right)^{y + 1}\) & \(\frac{1 - p}{p}\) & \(\frac{1 - p}{p^2}\) \\
\( N \sim \operatorname{NB}{\left( k,\: p \right)}\) & \(n \geq k\) & \(\binom{n - 1}{k - 1} \left( 1 - p \right)^{n - k} p^k\) & \(\sum_{u = k}^n \binom{u - 1}{k - 1} \left( 1 - p \right)^{u - k} p^k\) & \(\frac{k}{p}\) & \(\frac{k\left( 1 - p \right)}{p^2}\) \\
\( Y \sim \operatorname{NB}{\left( k,\: p \right)}\) & \(y \geq 0\) & \(\binom{y + k - 1}{k - 1} \left( 1 - p \right)^y p^k\) & \(\sum_{u = 0}^y \binom{u + k - 1}{k - 1} \left( 1 - p \right)^u p^k\) & \(\frac{k\left( 1 - p \right)}{p}\) & \(\frac{k\left( 1 - p \right)}{p^2}\) \\
\( N \sim \operatorname{Poisson}{\left( \lambda \right)}\) & \(n \geq 0\) & \(\frac{\lambda^n e^{-\lambda}}{n!}\) & \(e^{-\lambda} \sum_{u = 0}^n \frac{\lambda^u}{u!}\) & \(\lambda\) & \(\lambda\) \\
\bottomrule
\end{tabular}
\caption{Discrete probability distributions.} % \label{}
\end{table}
\begin{table}[H]
\centering
\begin{tabular}{c c c c c c}
\toprule
\textbf{Distribution} & \textbf{Restrictions} & \textbf{PDF} & \textbf{CDF} & \(\E{\left( X \right)}\) & \(\Var{\left( X \right)}\) \\
\midrule
\(X \sim \operatorname{Uniform}{\left( a,\: b \right)}\) & \(a < x < b\) & \(\frac{1}{b - a}\) & \(\frac{x - a}{b - a}\) & \(\frac{a + b}{2}\) & \(\frac{\left( b - a \right)^2}{12}\) \\
\(T \sim \operatorname{Exp}{\left( \eta \right)}\) & \(t > 0\) & \(\eta e^{-\eta t}\) & \(1 - e^{-\eta t}\) & \(1/\eta\) & \(1/\eta\) \\
\(X \sim \operatorname{N}{\left( \mu,\: \sigma^2 \right)}\) & \(x \in \left\{ 0, \dots, n \right\}\) & \(\frac{1}{\sqrt{2 \pi \sigma^2}} e^{-\frac{\left( x - \mu \right)^2}{2 \sigma^2}}\) & \(\frac{1}{2} \left( 1 + \erf{\left( \frac{x - \mu}{\sigma \sqrt{2}} \right)} \right)\) & \(\mu\) & \(\sigma^2\) \\
\bottomrule
\end{tabular}
\caption{Continuous probability distributions.} % \label{}
\end{table}
\begin{minipage}{126.1962963mm}
\begin{table}[H]
\centering
\begin{tabular}{c c c }
\toprule
& \textbf{Discrete} & \textbf{Continuous} \\
\midrule
Valid probabilities & \(0 \leq p_x \leq 1\) & \(f\left( x \right) \geq 0\) \\
Cumulative probability & \(\sum_{u \leq x} p_u\) & \(\int_{-\infty}^{x} f\left( u \right) \odif{u}\) \\
\(\E{\left( X \right)}\) & \(\sum_{\Omega} xp_x\) & \(\int_{\Omega} xf\left( x \right)\odif{x}\) \\
\(\E{\left( g\left( X \right) \right)}\) & \(\sum_{\Omega} g\left( x \right)p_x\) & \(\int_{\Omega} g\left( x \right)f\left( x \right)\odif{x}\) \\
\(\Var{\left( X \right)}\) & \(\sum_{\Omega} \left( x - \mu \right)^2 p_x\) & \(\int_{\Omega} \left( x - \mu \right)^2f\left( x \right)\odif{x}\) \\
\bottomrule
\end{tabular}
\caption{Probability rules for univariate \(X\).} % \label{}
\end{table}
\begin{multicols}{2}
\section{Stochastic Models}
\subsection{Random Variables}
Measurable variable whose value holds some uncertainty. An
event is when a random variable assumes a certain value or
range of values.
\subsection{Probability Distribution}
The probability distribution of a random variable \(X\) is a
function that links all outcomes \(x \in \Omega\) to the
probability that they will occur \(\Pr{\left( X = x \right)}\).
\subsection{Probability Mass Function}
\begin{equation*}
\Pr{\left( X = x \right)} = p_x
\end{equation*}
\subsection{Probability Density Function}
\begin{equation*}
\Pr{\left( x_1 \leq X \leq x_2 \right)} = \int_{x_1}^{x_2} f\left( x \right) \odif{x}
\end{equation*}
\subsection{Cumulative Distribution Function}
Probability that a random variable is less than or equal to a
particular realisation \(x\). \(F\left( x \right)\) is a valid
CDF if:
\begin{enumerate}
\item \(F\) is monotonically increasing and continuous
\item \(\lim_{x \to -\infty} F\left( x \right) = 0\)
\item \(\lim_{x \to \infty} F\left( x \right) = 1\)
\end{enumerate}
\begin{equation*}
\odv{F\left( x \right)}{x} = \odv{}{x} \int_{-\infty}^x f\left( u \right) \odif{u} = f\left( x \right)
\end{equation*}
\subsection{Complementary CDF (Survival Function)}
\begin{equation*}
\Pr{\left( X > x \right)} = 1 - \Pr{\left( X \leq x \right)} = 1 - F\left( x \right)
\end{equation*}
\subsection{\texorpdfstring{\(p\)}{p}-Quantiles}
\begin{equation*}
F\left( x \right) = \int_{-\infty}^x f\left( u \right) \odif{u} = p
\end{equation*}
\subsection{Special Quantiles}
\begin{align*}
\text{Lower quartile \(q_1\):} & & & p = \frac{1}{4} \\
\text{Median \(m\):} & & & p = \frac{1}{2} \\
\text{Upper quartile \(q_2\):} & & & p = \frac{3}{4} \\
\text{Interquartile range IQR:} & & & q_2 - q_1
\end{align*}
\subsection{Quantile Function}
\begin{equation*}
x = F^{-1}\left( p \right) = Q\left( p \right)
\end{equation*}
\subsection{Expectation (Mean)}
Expected value given an infinite number of observations. For
\(a < c < b\):
\begin{equation*}
\E{\left(X\right)} =
\begin{aligned}[t]
& -\int_{a}^c F\left( x \right) \odif{x} \\
& + \int_c^b \left(1 - F\left( x \right)\right) \odif{x} + c
\end{aligned}
\end{equation*}
\subsection{Variance}
Measure of spread of the distribution (average squared distance
of each value from the mean).
\begin{equation*}
\Var{\left( X \right)} = \sigma^2 = \E{\left( X^2 \right)} - \E{\left( X \right)}^2
\end{equation*}
\subsection{Standard Deviation}
\begin{equation*}
\sigma = \sqrt{\Var{\left( X \right)}}
\end{equation*}
\end{multicols}
\end{minipage}
\hfill%
\begin{minipage}{62.39259259mm}
\subsection{Uniform Distribution}
Single trial \(X\) in a set of equally likely elements.
\subsection{Bernoulli (Binary) Distribution}
Boolean-valued outcome \(X\), i.e., success (1) or failure (0).
\(\left( 1 - p \right)\) is sometimes denoted as \(q\).
\subsection{Binomial Distribution}
Number of successes \(X\) for \(n\) independent trials with the
same probability of success \(p\).
\begin{align*}
X & = Y_1 + \cdots + Y_n \\
Y_i & \overset{\mathrm{iid}}{\sim} \operatorname{Bernoulli}{\left( p \right)} : \forall i \in \left\{ 1,\: 2,\: \dots,\: n \right\}.
\end{align*}
\subsection{Geometric Distribution}
Number of trials \(N\) up to and including the first success where
each trial is independent and has the same probability of success
\(p\).
\subsection{Alternate Geometric Distribution}
Number of failures \(Y = N - 1\) until a success.
\subsection{Negative Binomial Distribution}
Number of trials \(N\) until \(k \geq 1\) successes, where each
trial is independent and has the same probability of success \(p\).
\begin{align*}
N & = Y_1 + Y_2 + \cdots + Y_k \\
Y_i & \overset{\mathrm{iid}}{\sim} \operatorname{Geom}{\left( p \right)} : \forall i \in \left\{ 1,\: 2,\: \dots,\: k \right\}.
\end{align*}
\subsection{Alternate Negative Binomial Distribution}
Number of failures \(Y = N - k\) until \(k\) successes:
\subsection{Poisson Distribution}
Number of events \(N\) which occur over a fixed interval of time
\(\lambda\).
\subsection{Modelling Count Data}
\begin{itemize}
\setlength\itemsep{-0.2em}
\item Poisson (mean = variance)
\item Binomial (underdispersed, mean > variance)
\item Geometric/Negative Binomial \newline (overdispersed, mean
< variance)
\end{itemize}
\end{minipage}
\begin{minipage}{62.39259259mm}
\subsection{Uniform Distribution}
Outcome \(X\) within some interval, where the probability of an
outcome in one interval is the same as all other intervals of the
same length.
\begin{equation*}
m = \frac{a + b}{2}
\end{equation*}
\subsection{Exponential Distribution}
Time \(T\) between events with rate \(\eta\).
\begin{equation*}
m = \frac{\ln{\left( 2 \right)}}{\eta}
\end{equation*}
\subsection{Memoryless Property}
For \(T \sim \operatorname{Exp}{\left( \lambda \right)}\):
\begin{equation*}
\Pr{\left( T > s + t \,\vert\, T > t \right)} = \Pr{\left( T > s \right)}
\end{equation*}
For \(N \sim \operatorname{Geometric}{\left( p \right)}\):
\begin{equation*}
\Pr{\left( N > s + n \,\vert\, N > n \right)} = \Pr{\left( N > s \right)}
\end{equation*}
\subsection{Normal Distribution}
Used to represent random situations, i.e., measurements and their
errors. Also used to approximate other distributions.
\subsection{Standard Normal Distribution}
Given \(X \sim \operatorname{N}{\left( \mu,\: \sigma^2 \right)}\),
consider
\begin{equation*}
Z = \frac{X - \mu}{\sigma}
\end{equation*}
so that \(Z \sim \operatorname{N}{\left( 0,\: 1 \right)}\).
\end{minipage}
\hfill%
\begin{minipage}{126.1962963mm}
\begin{multicols}{2}
\section{Central Limit Theorem}
The sum of independent and identically distributed random
variables, when properly standardised, can be approximated by a
normal distribution, as \(n \to \infty\).
Let \(X_1,\: \ldots,\: X_n \overset{\mathrm{iid}}{\sim} X\)
with \(\E{\left( X \right)} = \mu\) and \(\Var{\left( X
\right)} = \sigma^2\):
\subsection{Average of Random Variables}
If \(\overline{X} = \frac{1}{n} \sum_{i = 1}^n X_i\):
\begin{align*}
\E{\left( \overline{X} \right)} & = \mu \\
\Var{\left( \overline{X} \right)} & = \frac{\sigma^2}{n}
\end{align*}
By standardising \(\overline{X}\), we can define
\begin{equation*}
Z = \lim_{n \to \infty} \frac{\overline{X} - \mu}{\sigma / \sqrt{n}}
\end{equation*}
so that \(Z \to \operatorname{N}{\left( 0,\: 1 \right)}\) as \(n \to \infty\).
\subsection{Sum of Random Variables}
If \(Y = \sum_{i = 1}^n X_i\):
\begin{align*}
\E{\left( Y \right)} & = n \mu \\
\Var{\left( Y \right)} & = n \sigma^2
\end{align*}
\begin{equation*}
Y \sim \operatorname{N}{\left( n \mu,\: n \sigma^2 \right)}
\end{equation*}
as \(n \to \infty\).
\subsection{Binomial Approximations}
If \(X \sim \operatorname{Binomial}{\left( n,\: p \right)}\):
\begin{equation*}
X \approx Y \sim \operatorname{N}{\left( np,\: np\left( 1 - p \right) \right)}
\end{equation*}
Sufficient for \(np > 5\) and \(n\left( 1 - p \right) > 5\).
If \(np < 5\):
\begin{equation*}
X \approx Y \sim \operatorname{Pois}{\left( np \right)}.
\end{equation*}
If \(n\left( 1 - p \right) < 5\), consider the number of failures \(W = n - X\):
\begin{equation*}
W \approx Y \sim \operatorname{Pois}{\left( n\left( 1 - p \right) \right)}.
\end{equation*}
\subsection{Continuity Correction}
\begin{multline*}
\Pr{\left( a \leq X \leq b \right)} = \\
\Pr{\left( a - 1 < X < b + 1 \right)}
\end{multline*}
must hold for all \(a\) and \(b\). Therefore
\begin{multline*}
\Pr{\left( a \leq X \leq b \right)} \approx \\
\Pr{\left( a - \frac{1}{2} \leq Y \leq b + \frac{1}{2} \right)}.
\end{multline*}
\subsection{Poisson Approximation}
If \(X_i \sim \operatorname{Poisson}{\left( \lambda \right)}\):
Let \(X = \sum_{i = 1}^n X_i\):
\begin{align*}
\E{\left( X \right)} & = n \lambda \\
\Var{\left( X \right)} & = n \lambda
\end{align*}
\begin{equation*}
X \approx Y \sim \operatorname{N}{\left( n\lambda,\: n\lambda \right)}.
\end{equation*}
Sufficient for \(n \lambda > 10\), and for accurate approximations, \(n \lambda > 20\).
\end{multicols}
\end{minipage}
\vspace{2mm}
\hrule
\vspace{-2mm}
\begin{multicols}{3}
\section{Bivariate Distributions}
\subsection{Bivariate Probability Mass Function}
Distribution over the joint space of two discrete random variables
\(X\) and \(Y\):
\begin{align*}
\Pr{\left( X = x,\: Y = y \right)} & = p_{x,\: y} \geq 0 \\
\sum_{y \in \Omega_2} \sum_{x \in \Omega_1} \Pr{\left( X = x,\: Y = y \right)} & = 1
\end{align*}
for all pairs of \(x \in \Omega_1\) and \(y \in \Omega_2\).
The joint probability mass function can be shown using a table:
{\small
\begin{equation*}
\begin{matrix}[c|ccc] % chktex 44
& y_1 & \cdots & y_n \\
\hline % chktex 44
x_1 & p_{1,\: 1} & \cdots & p_{1,\: n} \\
\vdots & \vdots & \ddots & \vdots \\
x_n & p_{n,\: 1} & \cdots & p_{n,\: n}
\end{matrix}
\end{equation*}
}
\subsection{Bivariate Probability Density Function}
Distribution over the joint space of two continuous random
variables \(X\) and \(Y\):
\begin{multline*}
\Pr{\left( x_1 \leq X \leq x_2,\: y_1 \leq Y \leq y_2 \right)} = \\
\int_{x_1}^{x_2} \int_{y_1}^{y_2} f\left( x,\: y \right) \odif{y} \odif{x}
\end{multline*}
This function must satisfy
\begin{align*}
f\left( x,\: y \right) & \geq 0 \\
\int_{x \in \Omega_1} \int_{y \in \Omega_2} f\left( x,\: y \right) \odif{y} \odif{x} & = 1.
\end{align*}
for all pairs of \(x \in \Omega_1\) and \(y \in \Omega_2\).
\begin{multline*}
\Pr{\left( X = x ,\: Y = y \right)} = \\
\Pr{\left( X = x \,\vert\, Y = y \right)} \Pr{\left( Y = y \right)}
\end{multline*}
\subsection{Marginal Probability}
Probability function of each random variable. Must specify the
range of values that variable can take.
\subsection{Marginal Probability Mass Function}
\begin{align*}
p_x & = \sum_{y \in \Omega_2} \Pr{\left( X = x,\: Y = y \right)} \\
p_y & = \sum_{x \in \Omega_1} \Pr{\left( X = x,\: Y = y \right)}
\end{align*}
\subsection{Marginal Probability Density Function}
\begin{align*}
f\left( x \right) & = \int_{y_1}^{y_2} f\left( x,\: y \right) \odif{y} \\
f\left( y \right) & = \int_{x_1}^{x_2} f\left( x,\: y \right) \odif{x}
\end{align*}
\subsection{Conditional Probability Mass Function}
\begin{gather*}
\Pr{\left( X = x \,\vert\, Y = y \right)} = \frac{\Pr{\left( X = x,\: Y = y \right)}}{\Pr{\left( Y = y \right)}} \\
\sum_{x \in \Omega_1} \Pr{\left( X = x \,\vert\, Y = y \right)} = 1
\end{gather*}
\subsection{Conditional Probability Density Function}
\begin{gather*}
f\left( x \,\vert\, y \right) = \frac{f\left( x,\: y \right)}{f\left( y \right)} \\
\int_{x_1}^{x_2} f\left( x \,\vert\, y \right) \odif{x} = 1
\end{gather*}
\subsection{Independence}
Two discrete random variables \(X\) and \(Y\) are independent if
\begin{equation*}
p_{x,\:y} = p_x p_y
\end{equation*}
for all pairs of \(x\) and \(y\).
Two continuous random variables \(X\) and \(Y\) are independent if
\begin{equation*}
f\left( x,\: y \right) = f\left( x \right) f\left( y \right)
\end{equation*}
\subsection{Conditional Expectation}
\begin{align*}
\E{\left( X \,\vert\, Y = y \right)} & = \sum_{x\in\Omega_1} x p_{x\,\vert\,y} \\
\E{\left( X \,\vert\, Y = y \right)} & = \int_{x_1}^{x_2} x f\left( x \,\vert\, y \right) \odif{x}
\end{align*}
\subsection{Conditional Variance}
\begin{multline*}
\Var{\left( X \,\vert\, Y = y \right)} \\
= \E{\left( X^2 \,\vert\, Y = y \right)} - \E{\left( X \,\vert\, Y = y \right)}^2
\end{multline*}
\subsection{Law of Total Expectation}
By treating \(\E{\left( X \,\vert\, Y \right)}\) as a random
variable of \(Y\):
\begin{equation*}
\E{\left( X \right)} = \E{\left( \E{\left( X \,\vert\, Y \right)} \right)}
\end{equation*}
\subsection{Joint Expectation}
\begin{align*}
\E{\left( XY \right)} & = \sum_{x\in\Omega_1} \sum_{y\in\Omega_2} xy p_{x,\: y} \\
\E{\left( XY \right)} & = \int_{x_1}^{x_2} \int_{y_1}^{y_2} xy f\left( x,\: y \right) \odif{y} \odif{x}.
\end{align*}
\subsection{Transformation Rules}
\begin{align*}
\E{\left( aX \pm b \right)} & = a\E{\left( X \right)} \pm b \\
\E{\left( X \pm Y \right)} & = \E{\left( X \right)} \pm \E{\left( Y \right)} \\
\Var{\left( aX \pm b \right)} & = a^2\Var{\left( X \right)} \\
\Var{\left( X \pm Y \right)} & =
\begin{aligned}[t]
& \Var{\left( X \right)} + \Var{\left( Y \right)} \\
& \pm 2\Cov{\left( X,\: Y \right)}
\end{aligned}
\end{align*}
\begin{align*}
\Cov{\left( aX + b,\: cY + d \right)} & = ac \Cov{\left( X,\: Y \right)} \\
\Cov{\left( X + Y,\: Z \right)} & =
\begin{aligned}[t]
& \Cov{\left( X,\: Z \right)} \\
& + \Cov{\left( Y,\: Z \right)}
\end{aligned}
\end{align*}
If \(X\) and \(Y\) are independent:
\begingroup
\allowdisplaybreaks
\begin{align*}
\E{\left( X \,\vert\, Y = y \right)} & = \E{\left( X \right)} \\
\Var{\left( X \,\vert\, Y = y \right)} & = \Var{\left( X \right)} \\
\Var{\left( X \pm Y \right)} & = \Var{\left( X \right)} + \Var{\left( Y \right)} \\
\E{\left( XY \right)} & = \E{\left( X \right)} \E{\left( Y \right)}
\end{align*}
\endgroup
\begin{align*}
\Var{\left( XY \right)} = \Var{\left( X \right)} \Var{\left( Y \right)} \\
+ \E{\left( X \right)}^2 \Var{\left( Y \right)} + \E{\left( Y \right)}^2 \Var{\left( X \right)}
\end{align*}
for constants \(a\), \(b\), \(c\), and \(d\).
\subsection{Covariance}
Measure of the dependence between two random variables
\begin{align*}
\Cov{\left( X,\: Y \right)} & =
\begin{aligned}[t]
\E\left( \left( X - \E{\left( X \right)} \right) \right. & \\
\left. \left( Y - \E{\left( Y \right)} \right) \right) &
\end{aligned}
\\
& = \E{\left( XY \right)} - \E{\left( X \right)} \E{\left( Y \right)}
\end{align*}
The covariance of \(X\) and \(Y\) is:
\begin{description}
\item[Positive.] if an increase in one variable is more likely
to result in an increase in the other variable.
\item[Negative.] if an increase in one variable is more likely
to result in a decrease in the other variable.
\item[Zero.] if \(X\) and \(Y\) are independent. Note that the
converse is not true.
\end{description}
Describes the direction of a relationship, but does not quantify the strength of such a relationship.
\subsection{Correlation}
Explains both the direction and strength of a linear relationship
between two random variables.
\begin{equation*}
\rho\left( X,\: Y \right) = \frac{\Cov{\left( X,\: Y \right)}}{\sqrt{\Var{\left( X \right)} \Var{\left( Y \right)}}}
\end{equation*}
where \(-1 \leq \rho\left( X,\: Y \right) \leq 1\).
The correlation is interpreted as follows:
\begin{itemize}
\item \(\rho\left( X,\: Y \right) > 0\) iff \(X\) and \(Y\) have a positive linear relationship.
\item \(\rho\left( X,\: Y \right) < 0\) iff \(X\) and \(Y\) have a negative linear relationship.
\item \(\rho\left( X,\: Y \right) = 0\) if \(X\) and \(Y\) are independent. Note that the converse is not true.
\item \(\rho\left( X,\: Y \right) = 1\) iff \(X\) and \(Y\) have a perfect linear relationship with positive slope.
\item \(\rho\left( X,\: Y \right) = -1\) iff \(X\) and \(Y\) have a perfect linear relationship with negative slope.
\end{itemize}
The slope of a perfect linear relationship cannot be obtained from the correlation.
\end{multicols}
\hrule
\begin{multicols}{3}
\section{Markov Chains}
A Markov chain is a discrete time and state stochastic process that
describes how a state evolves over time.
% In this process, the set of all states is discrete and disjoint and states change probabilistically so that
% a step may not result in a changed state.
% At each step, the next state depends only on the current state of the random variable.
States are denoted by the random variable \(X_t\) at time step
\(t\).
\subsection{Markov Property}
\begin{flalign*}
\Pr{\left( X_t = x_t \,\vert\, X_{t-1} = x_{t-1},\: \ldots,\: X_{0} = x_{0} \right)} \\
= \Pr{\left( X_t = x_t \,\vert\, X_{t-1} = x_{t-1} \right)}
\end{flalign*}
\subsection{Homogeneous Markov Chains}
A Markov chain is homogeneous when
\begin{multline*}
\Pr{\left( X_{t+n} = j \,\vert\, X_t = i \right)} = \\
\Pr{\left( X_n = j \,\vert\, X_0 = i \right)} = p_{ij}^{\left( n \right)}
\end{multline*}
% that is, the \(n\)-step conditional probabilities do not depend on the time step \(t\).
\subsection{Transition Probability Matrix}
A homogeneous Markov chain is characterised by the transition
probability matrix \(\symbf{P} \in \R^{m \times m}\), where \(m\)
is the number of states. \(\symbf{P}\) must fulfil the following
properties:
\begin{itemize}
\item \(p_{i,\:j} = \Pr{\left( X_t = j \,\vert\, X_{t-1} = i \right)}\)
\item \(p_{i,\:j} \geq 0 : \forall i,\: j\)
\item \(\sum_{j = 1}^m p_{i,\:j} = 1 : \forall i\)
\end{itemize}
\(\symbf{P}\) has the following form
\begin{equation*}
\symbf{P} = \quad \scriptscriptstyle{X_t} \overset{X_{t+1}}{
\begin{bmatrix}
\phantom{p} & \phantom{p} \\
\phantom{p} & \phantom{p}
\end{bmatrix}
}
\end{equation*}
The \(n\)-step transition probability is given by \(\symbf{P}^n\).
\subsection{Unconditional State Probabilities}
The unconditional probability of being in state \(j\) at time \(n\)
is given by
\begin{equation*}
\Pr{\left( X_n = j \right)} = p_j^{\left( n \right)}
\end{equation*}
Given multiple states, let \(\symbfit{s}^{\left( n \right)}\) denote the vector of all states \(p_j^{\left( n \right)}\) at
time \(n\). Then
\begin{align*}{\symbfit{s}^{\left( n \right)}}^\top & = {\symbfit{s}^{\left( n-1 \right)}}^\top \symbf{P} \\
{\symbfit{s}^{\left( n \right)}}^\top & = {\symbfit{s}^{\left( 0 \right)}}^\top \symbf{P}^n
\end{align*}
\subsection{Stationary Distribution}
At steady-state, the probability of being in a particular state
does not change from one step to the next.
\begin{equation*}
\symbfit{s}^{\left( n+1 \right)} = \symbfit{s}^{\left( n \right)} \implies {\symbfit{s}^{\left( n \right)}}^\top = {\symbfit{s}^{\left( n \right)}}^\top \symbf{P}
\end{equation*}
The stationary distribution \(\symbfit{\pi}\) satisfies \(\symbfit{\pi}^\top = \symbfit{\pi}^\top \symbf{P}\).
To determine \(\symbfit{\pi}\), we must use the equation \(\sum_{i = 1}^m \pi_i = 1\).
\subsection{Limiting Distribution}
Under certain conditions, each row of \(\symbf{P}^n\) will be equal
to \(\symbfit{\pi}^\top\) so that each state moves to the next step
with the same probability. \(\symbfit{\pi}\) provides the long run
probabilities of being in each state and the process forgets where
it starts.
A sufficient condition for the above is if \(\symbf{P}^n\) has
positive entries for some finite \(n\).
\textit{Note that a stationary distribution does not imply that a limiting distribution exists}.
\section{Poisson Processes}
A Poisson process is a continuous time and discrete state
stochastic process that counts events that occur randomly in time
(or space).
The rate parameter \(\eta\) is the average rate at which events
occur. The rate does not depend on how long the process has been
run nor how many events have already been observed.
The number of events that occur randomly on the interval
\(\ointerval{0}{t}\), are denoted by the random variable \(X\left(
t \right)\).
\begin{equation*}
\Pr{\left( X\left( 0 \right) = 0 \right)} = 1.
\end{equation*}
Let \(h\) be a small interval such that at most 1 event can occur during that time, then
\begin{gather*}
\Pr{\left( X\left( t + h \right) = n + 1 \,\vert\, X\left( t \right) = n \right)} \approx \eta h \\
\Pr{\left( X\left( t + h \right) = n \,\vert\, X\left( t \right) = n \right)} \approx 1 - \eta h \\
\Pr{\left( X\left( t + h \right) > n + 1 \,\vert\, X\left( t \right) = n \right)} \approx 0
\end{gather*}
\subsection{Poisson Distribution}
A Poisson process has a Poisson distribution with rate \(\eta\), so
that \(X\left( t \right) \sim \operatorname{Poisson}{\left( \eta t
\right)}\). Here \(\eta t\) is the expected number of events.
The number of events occurring between \(t_1\) and \(t_2\) is given
by \(N\left( t_1,\: t_2 \right) \sim \operatorname{Poisson}{\left(
\eta \left( t_2 - t_1 \right) \right)}\).
\subsection{Exponential Distribution}
Let \(T\) be the time between events of a Poisson process so that
\(T\) has an exponential distribution
\begin{equation*}
T \sim \operatorname{Exp}{\left( \eta \right)}.
\end{equation*}
\subsection{Properties of Poisson Processes}
\begin{enumerate}
\setlength\itemsep{-0.2em}
\item As the time between Poisson processes has an exponential
distribution, the Poisson process inherits the memoryless
property,
\begin{flalign*}
\Pr{\left( T > x + y \,\vert\, T > x \right)} = \\
\Pr{\left( T > y \right)}.
\end{flalign*}
\item Non-overlapping time intervals of a Poisson process are
independent. For \(a < b\) and \(c < d\) where \(b \leq
c\),
\begin{flalign*}
\Pr{\left( N\left( a,\: b \right) = m \,\vert\, N\left( c,\: d \right) = n \right)} = \\
\Pr{\left( N\left( a,\: b \right) = m \right)}
\end{flalign*}
\item\label{poisson_property_1_event} If exactly 1 event occurs on the interval \(\ointerval{0}{a}\), the distribution of when that event occurs is
uniform. Let \(X\) be the time \(x < a\) when the first event occurs,
\begin{flalign*}
X \,\vert\, \left( N\left( 0,\: a \right) = 1 \right) \sim \\
\operatorname{Uniform}{\left( 0,\: a \right)}
\end{flalign*}
\item\label{poisson_property_n_events} If exactly \(n\) events occur on the interval \(\ointerval{0}{t}\), then the distribution of the number of events
that occur in \(\ointerval{0}{s}\) is binomial, for \(s < t\). Let \(X\) be the number of events that occur in \(\ointerval{0}{s}\) for \(s < t\),
\begin{flalign*}
X \,\vert\, \left( N\left( 0,\: t \right) = n \right) \sim \\
\operatorname{Binomial}{\left( n,\: \frac{s}{t} \right)}
\end{flalign*}
\end{enumerate}
\end{multicols}
\end{document}