From eb98a1e144e48b0e280f1dac24387e5d2a03ede7 Mon Sep 17 00:00:00 2001 From: "github-actions[bot]" Date: Mon, 25 Dec 2023 20:42:03 +0000 Subject: [PATCH] deploy: 6c1a09454a0407ddcefccc409ba46ce4a465d1b0 --- 404.html | 4 ++-- .../Applications of Differentiation/index.html | 4 ++-- .../read/index.html | 4 ++-- Functions/Continuity and Limits/index.html | 4 ++-- Functions/Continuity and Limits/read/index.html | 4 ++-- Functions/Derivatives/index.html | 4 ++-- Functions/Derivatives/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../Inverse Functions and the Logarithm/index.html | 4 ++-- .../read/index.html | 4 ++-- Functions/Miscellanea/index.html | 4 ++-- Functions/Miscellanea/read/index.html | 4 ++-- Functions/Overview/index.html | 4 ++-- Functions/Overview/read/index.html | 4 ++-- Functions/Principles of Programming/index.html | 4 ++-- .../Principles of Programming/read/index.html | 4 ++-- Functions/Sequences and Series/index.html | 4 ++-- Functions/Sequences and Series/read/index.html | 4 ++-- Functions/Slopes of Lines and Curves/index.html | 4 ++-- .../Slopes of Lines and Curves/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- Functions/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- Multivariate to Power/Overview/index.html | 4 ++-- Multivariate to Power/Overview/read/index.html | 4 ++-- .../Power and Sample Sizes/index.html | 4 ++-- .../Power and Sample Sizes/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- Multivariate to Power/index.html | 4 ++-- Numbers to Indices/Data Vectors/index.html | 4 ++-- Numbers to Indices/Data Vectors/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- Numbers to Indices/Functions/index.html | 4 ++-- Numbers to Indices/Functions/read/index.html | 4 ++-- .../Indices and the apply Commands in R/index.html | 4 ++-- .../read/index.html | 4 ++-- Numbers to Indices/More on Algebra/index.html | 4 ++-- Numbers to Indices/More on Algebra/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- Numbers to Indices/Overview/index.html | 4 ++-- Numbers to Indices/Overview/read/index.html | 4 ++-- Numbers to Indices/Polynomials/index.html | 4 ++-- Numbers to Indices/Polynomials/read/index.html | 4 ++-- .../Simple Data Analysis in R/index.html | 4 ++-- .../Simple Data Analysis in R/read/index.html | 4 ++-- Numbers to Indices/index.html | 6 +++--- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../Multivariate Calculus/index.html | 4 ++-- .../Multivariate Calculus/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../Overview/index.html | 4 ++-- .../Overview/read/index.html | 4 ++-- .../Ranks and Determinants/index.html | 4 ++-- .../Ranks and Determinants/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../Some Regression Topics/index.html | 4 ++-- .../Some Regression Topics/read/index.html | 4 ++-- .../The Gamma Distribution/index.html | 4 ++-- .../The Gamma Distribution/read/index.html | 4 ++-- .../index.html | 4 ++-- .../read/index.html | 4 ++-- .../Vector and Matrix Operations/index.html | 4 ++-- .../Vector and Matrix Operations/read/index.html | 4 ++-- Vectors to Some Regression Topics/index.html | 4 ++-- assets/js/23374ca6.95001b49.js | 1 + assets/js/23374ca6.ddbb2705.js | 1 - .../{935f2afb.ad395550.js => 935f2afb.64cbbc6c.js} | 2 +- .../{f71aa3b8.3145a67c.js => f71aa3b8.1f4d454d.js} | 2 +- ...e~main.78c8f560.js => runtime~main.e3e1199c.js} | 2 +- index.html | 14 +++++++++----- markdown-page/index.html | 4 ++-- 86 files changed, 174 insertions(+), 170 deletions(-) create mode 100644 assets/js/23374ca6.95001b49.js delete mode 100644 assets/js/23374ca6.ddbb2705.js rename assets/js/{935f2afb.ad395550.js => 935f2afb.64cbbc6c.js} (87%) rename assets/js/{f71aa3b8.3145a67c.js => f71aa3b8.1f4d454d.js} (52%) rename assets/js/{runtime~main.78c8f560.js => runtime~main.e3e1199c.js} (96%) diff --git a/404.html b/404.html index e2a3997..957726c 100644 --- a/404.html +++ b/404.html @@ -4,13 +4,13 @@ Page Not Found | Computing and Calculus for Applied Statistics - +
Skip to main content

Page Not Found

We could not find what you were looking for.

Please contact the owner of the site that linked you to the original URL and let them know their link is broken.

- + \ No newline at end of file diff --git a/Functions/Applications of Differentiation/index.html b/Functions/Applications of Differentiation/index.html index 01b09f6..77dbd97 100644 --- a/Functions/Applications of Differentiation/index.html +++ b/Functions/Applications of Differentiation/index.html @@ -4,13 +4,13 @@ Applications of Differentiation | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Applications of Differentiation/read/index.html b/Functions/Applications of Differentiation/read/index.html index 91b9035..0be2c61 100644 --- a/Functions/Applications of Differentiation/read/index.html +++ b/Functions/Applications of Differentiation/read/index.html @@ -4,7 +4,7 @@ Applications of Differentiation | Computing and Calculus for Applied Statistics - + @@ -22,7 +22,7 @@ This leads to the regression problem of finding parameter values for α^\hat{\alpha} and β^\hat{\beta} which gives the best fitting straight line in relation to least squares:

minα,β(yi(α+βxi))2\min_{\alpha,\beta} \displaystyle\sum \left ( y_i - ( \alpha + \beta x_i) \right ) ^2

Example

As a general exercise in finding the extreme of a function, let's look at the function f(θ)=i=1N(xiθ3)2f(\theta)=\displaystyle\sum_{i=1}^N(x_i\theta -3)^2 where xix_i are some constants. We wish to find the θ\theta that minimizes this sum. We simply differentiate θ\theta to obtain:

f(θ)=i=1n2(xiθ3)x1=2i=1nxi2θ2i=1n3xif'(\theta) = \displaystyle\sum_{i=1}^n 2(x_i\theta -3)x_1 = 2\displaystyle\sum_{i=1}^n x^2_i\theta - 2\displaystyle\sum_{i=1}^n 3x_i

Thus:

f(θ)=2θi=1nxi22i=1n3xi=0θ=i=1n3xii=1nxi2\begin{aligned} f'(\theta) &= 2\theta \displaystyle\sum_{i=1}^n x^2_i-2\displaystyle\sum_{i=1}^n 3x_i=0 \\ & \Leftrightarrow \theta = \displaystyle\frac{\displaystyle\sum_{i=1}^n 3x_i}{\displaystyle\sum_{i=1}^n x^2_i} \end{aligned}
- + \ No newline at end of file diff --git a/Functions/Continuity and Limits/index.html b/Functions/Continuity and Limits/index.html index b1e27c4..c403ff1 100644 --- a/Functions/Continuity and Limits/index.html +++ b/Functions/Continuity and Limits/index.html @@ -4,13 +4,13 @@ Continuity and Limits | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Continuity and Limits/read/index.html b/Functions/Continuity and Limits/read/index.html index 64bf4b4..9e83d95 100644 --- a/Functions/Continuity and Limits/read/index.html +++ b/Functions/Continuity and Limits/read/index.html @@ -4,7 +4,7 @@ Continuity and Limits | Computing and Calculus for Applied Statistics - + @@ -164,7 +164,7 @@ M834 80h400000v40h-400000z">2=41

One-sided Limits

f(x)f(x) may tend towards different numbers depending on whether xx approaches x0x_0 from left or right, usually written:

xx0+x \rightarrow x_{0+} (from the right)

xx0x \rightarrow x_{0-} (from the left).

Fig. 21

Details

Sometimes a function is such that f(x)f(x) tends to different numbers depending on whether xx0x \rightarrow x_0 from the right ( xx0+x \rightarrow x_{0+} ) or from the left (xx0x \rightarrow x_{0-}).

If

limxx0+f(x)=f(x0)\lim_{x \to x_{0+}} f(x)=f(x_0)

then we say that ff is continuous from the right at x0x_0. Same thing goes for the limit from the left. In order for the limit to exist at the point (that is the overall limit, regardless og direction) then it must hold true that

limxx0=limxtox0+\lim_{x \to x_{0-}} = \lim_{x to x_{0+}}

i.e., the limit is the same from both directions.

- + \ No newline at end of file diff --git a/Functions/Derivatives/index.html b/Functions/Derivatives/index.html index 3e0f1f1..6e8a369 100644 --- a/Functions/Derivatives/index.html +++ b/Functions/Derivatives/index.html @@ -4,13 +4,13 @@ Derivatives | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Derivatives/read/index.html b/Functions/Derivatives/read/index.html index 079fe84..f4e3e4b 100644 --- a/Functions/Derivatives/read/index.html +++ b/Functions/Derivatives/read/index.html @@ -4,7 +4,7 @@ Derivatives | Computing and Calculus for Applied Statistics - + @@ -14,7 +14,7 @@ That is, if

f(x)=exf(x) = e^x

then

f(x)=exf'(x) = e^x

The derivatives of the natural logarithm, ln(x)\ln(x), is 1x\displaystyle\frac{1}{x}. That is, if

g(x)=ln(x)g(x) = \ln(x)

then

g(x)=1xg'(x) = \displaystyle\frac{1}{x}

The Derivative of a Sum and Linear Combination

If ff and gg are functions then the derivative of f+gf+g is given by f+gf' + g'.

Details

Similarly, the derivative of a linear combination is the linear combination of the derivatives. If ff and gg are functions and k(x)=af(x)+bg(x)k(x)=af(x) + bg(x) then k(x)=af(x)+bg(x)k'(x)=af'(x)+ bg'(x).

Examples

Example

If

f(x)=2+3x,g(x)+x3f(x) = 2+3x, g(x)+x^3

then we know that

f(x)=3,g(x)=3x2f'(x)=3, g(x)=3x^2

and if we write

h(x)=f(x)+g(x)=2+3x+x3h(x)=f(x)+g(x)=2+3x+x^3

then

h(x)=3+3x2h'(x)=3+3x^2

The Derivative of a Polynomial

The derivative of a polynomial is the sum of the derivatives of the terms of the polynomial.

Details

If

p(x)=a0+a1x++anxnp(x) = a_0+a_1x+\dots +a_n x^n

then

p(x)=a1+2a2x+3a3x2+4a4x3++nanx(n1)p'(x) = a_1+2a_2x+3a_3x^2+4a_4x^3+\dots +na_n x^{(n-1)}

Examples

Example

If

p(x)=2x4+x3p(x)=2x^4+x^3

then

p(x)=2dx4dx+dx3dx=24x3+3x2=8x3+3x2p'(x)=2\displaystyle\frac{dx^4}{dx}+\displaystyle\frac{dx^3}{dx}=2 \cdot 4x^3 +3x^2 = 8x^3 +3x^2

The Derivative of a Product

If

h(x)=f(x)g(x)h(x)=f(x)\cdot g(x)

then

h(x)=f(x)g(x)+f(x)g(x)h'(x)=f'(x)\cdot g(x)+f(x)\cdot g'(x)

Details

Consider two functions, ff and gg and their product, hh :

h(x)=f(x)g(x)h(x)=f(x)\cdot g(x)

The derivative of the product is given by

h(x)=f(x)g(x)+f(x)g(x)h'(x)=f'(x)\cdot g(x)+f(x)\cdot g'(x)

Examples

Example

Suppose the function ff is given by

f(x)=xex+x2lnxf(x)=xe^x+x^2\ln x

Then the derivative can be computed step by step as

f(x)=dxdxex+xdexdx+dx2dxlnx+x2dlnxdx=1ex+xex+2xlnx+x21x=ex(1+x)+2xlnx+x\begin{aligned} f(x) &= \displaystyle\frac{dx}{dx}e^x+x\displaystyle\frac{de^x}{dx}+\displaystyle\frac{dx^2}{dx}\ln x +x^2\displaystyle\frac{d \ln x}{dx} \\ & = 1\cdot e^x + x \cdot e^x + 2x \cdot \ln x + x^2 \cdot \displaystyle\frac{1}{x} \\ & = e^x \left ( 1+x \right ) + 2x \ln x + x \end{aligned}

Derivatives of Composite Functions

If ff and gg are functions and h=fgh=f \circ g so that\ h(x)=f(g(x))h(x) = f(g(x)) then h(x)=dh(x)dx=f(g(x))g(x)h'(x) = \displaystyle\frac{dh(x)}{dx} = f'(g(x)) g'(x)

Examples

Example

For fixed xx consider:

f(p)=ln(px(1p)nx)=lnpx+ln(1p)nx=xlnp+(nx)ln(1p)\begin{aligned} f(p) &= \ln(p^{x} (1-p)^{n-x}) \\ &= \ln p^{x} + \ln(1-p)^{n-x} \\ &= x \ln p + (n-x) \ln (1-p) \end{aligned}

Then the derivative is computed as follows:

f(p)=x1p+nx1p(1)=xpnx1p\begin{aligned} f'(p) &= x \displaystyle\frac{1}{p} + \displaystyle\frac{n-x}{1-p}(-1) \\ &= \displaystyle\frac{x}{p} - \displaystyle\frac{n-x}{1-p} \end{aligned}
Example

For fixed xx and yy consider

f(b)=(ybx)2f(b) = (y-bx)^2

Then the derivative is computed as follows:

f(b)=2(ybx)(x)=2x(ybx)=(2xy)+(2x2)b\begin{aligned} f'(b) &= 2 (y-bx) (-x) \\ &= -2x (y-bx) \\ &= (-2xy) + (2x^2)b \end{aligned}
- + \ No newline at end of file diff --git a/Functions/Functions of Functions and the Exponential Function/index.html b/Functions/Functions of Functions and the Exponential Function/index.html index 94bf70f..cfe701f 100644 --- a/Functions/Functions of Functions and the Exponential Function/index.html +++ b/Functions/Functions of Functions and the Exponential Function/index.html @@ -4,13 +4,13 @@ Functions of Functions and the Exponential Function | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Functions of Functions and the Exponential Function/read/index.html b/Functions/Functions of Functions and the Exponential Function/read/index.html index 411a031..9e56323 100644 --- a/Functions/Functions of Functions and the Exponential Function/read/index.html +++ b/Functions/Functions of Functions and the Exponential Function/read/index.html @@ -4,7 +4,7 @@ Functions of Functions and the Exponential Function | Computing and Calculus for Applied Statistics - + @@ -15,7 +15,7 @@ If y=f(x)y = f(x) and g(y)g(y) exist then we can compute g(f(x))g(f(x)) for any xx.

If

f(x)=x2 and g(y)=eyf(x) = {x}^2 \text{ and } g(y)= {e}^y

then

g(f(x))=ef(x)=ex2g(f(x))= {e}^{f(x)} = {e}^{x^2}

If we call the resulting function hh, then h(x)=g(f(x))h(x) = g(f(x)). Another common notation for this is

h=gfh = g\circ f

Examples

Example

If g(x)=3+2xg(x)= {3}+ {2}x and f(x)=5x2f(x) = {5}{x}^2, then

g(f(x))=3+2f(x)=3+10x2,g(f(x)) = {3} +{2} f(x) = {3} +{10x}^2,

and

f(g(x))=5(g(x))2=5(3+2x)2=45+60x+20x2f(g(x)) = {5}{(g(x))}^2 = {5}{({3}+{2x})}^2 = {45}+{60x}+{20x}^2

Storing and Using R Code

As R code gets more complex (more lines) it is usually stored in files. Functions are typically stored in separate files.

Examples

Example

Save the following file (test.r):

x=4
y=8
cat("x+y is", x+y, "\n")

To read the file use:

source("test.r")

and the outcome of the equation is displayed in R.

Storing and Calling Functions In R

To save a function in a separate file use a command of the form function.r.

Examples

Example
f<-function(x) { return (exp(sum(x))) }

can be stored in a file function.r and subsequently read using the source command.

- + \ No newline at end of file diff --git a/Functions/Integrals and Probability Density Functions/index.html b/Functions/Integrals and Probability Density Functions/index.html index 19dc237..b818fcf 100644 --- a/Functions/Integrals and Probability Density Functions/index.html +++ b/Functions/Integrals and Probability Density Functions/index.html @@ -4,13 +4,13 @@ Integrals and Probability Density Functions | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Integrals and Probability Density Functions/read/index.html b/Functions/Integrals and Probability Density Functions/read/index.html index 134959c..34d374e 100644 --- a/Functions/Integrals and Probability Density Functions/read/index.html +++ b/Functions/Integrals and Probability Density Functions/read/index.html @@ -4,7 +4,7 @@ Integrals and Probability Density Functions | Computing and Calculus for Applied Statistics - + @@ -79,7 +79,7 @@ c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">1dt=14etdt=e4e

Handout

The two most common "tricks" applied in integration are a) integration by parts and b) integration by substitution

a) Integration by parts

(fg)=fg+fg(fg)'=f'g+fg'

by integrating both sides of the equation we obtain:

fg=fgdx+fgdxfgdx=fgfgdxfg=\int f'g dx + \int fg' dx \leftrightarrow \int fg' dx=fg-\int f'g dx

b) Integration by substitution

Consider the definite integral abf(x)dx\int_a^b f(x) dx and let gg be a one-to-one differential function for the interval (c,d)(c,d) to (a,b)(a,b). Then

abf(x)dx=cdf(g(y))g(y)dy\int_a^b f(x) dx=\int_c^d f(g(y))g'(y) dy

- + \ No newline at end of file diff --git a/Functions/Inverse Functions and the Logarithm/index.html b/Functions/Inverse Functions and the Logarithm/index.html index 276f03c..411279d 100644 --- a/Functions/Inverse Functions and the Logarithm/index.html +++ b/Functions/Inverse Functions and the Logarithm/index.html @@ -4,13 +4,13 @@ Inverse Functions and the Logarithm | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Inverse Functions and the Logarithm/read/index.html b/Functions/Inverse Functions and the Logarithm/read/index.html index aef0244..9f75717 100644 --- a/Functions/Inverse Functions and the Logarithm/read/index.html +++ b/Functions/Inverse Functions and the Logarithm/read/index.html @@ -4,7 +4,7 @@ Inverse Functions and the Logarithm | Computing and Calculus for Applied Statistics - + @@ -36,7 +36,7 @@ The simplest approach is to write y=f(x)y=f(x) and solve for xx.

With

f(x)=5+4xf(x) = 5 + 4x

we write

y=5+4xy = 5 + 4x

which we can now rewrite as

y5=4xy - 5 = 4x

and this implies

y54=x\displaystyle\frac{y-5}{4} = x

And there we have it, very simple:

f1(f(x))=y54f^{-1}(f(x)) = \displaystyle\frac{y - 5}{4}

The Base 10 Logarithm

When xx is a positive real number in x=10yx=10^y, yy is referred to as the base 10 logarithm of x and is written as:

y=log10(x)y=\log_{10}(x)

or

y=log(x)y=\log(x)

Details

If log(x)=a\log (x) = a and log(y)=b\log (y)=b, then x=10ax = 10^a and y=10by = 10^b, and

xy=10a10b=10a+bx \cdot y = 10^a \cdot 10^b = 10^{a+b}

so that

log(xy)=a+b\log(xy) = a+b

Examples

Example
log(100)=2log(1000)=3\begin{aligned} \log(100) &= 2 \\ \log(1000) &= 3 \end{aligned}
Example

If

log(2)0.3\log(2) \approx 0.3

then

10y=210^y=2

Note

210=10241000=1032^{10}=1024 \approx 1000 = 10^3

therefore

2103/102 \approx 10^{3/10}

so

log(2)0.3\log (2) \approx 0.3

The Natural Logarithm

A logarithm with ee as a base is referred to as the natural logarithm and is denoted as ln\ln:

y=ln(x)y=\ln(x)

if

x=ey=exp(y)x=e^y=\exp(y)

Note that ln\ln is the inverse of exp\exp.

Fig. 14

Figure: The curve depicts the function y=ln(x)y=\ln(x) and shows that ln\ln is the inverse of exp\exp. Note that ln(1)=0\ln(1)=0 and when y=0y=0 then e0=1e^0=1.

Properties of Logarithm(s)

Logarithms transform multiplicative models into additive models, i.e.

ln(ab)=lna+lnb\ln(a\cdot b) = \ln a + \ln b

Details

This implies that any statistical model, which is multiplicative becomes additive on a log scale, e.g.

y=awbxcy = a \cdot w^b \cdot x^c

lny=(lna)+ln(wb)+ln(xc)\ln y = (\ln a) + \ln (w^b) + \ln (x^c)

Next, note that

ln(x2)=ln(xx)=lnx+lnx=2lnx\begin{aligned} \ln (x^2) &= \ln (x \cdot x) \\ &= \ln x + \ln x \\ &= 2 \cdot \ln x \end{aligned}

and similarly ln(xn)=nlnx\ln (x^n) = n \cdot \ln x for any integer nn.

In general ln(xc)=clnx\ln (x^c) = c \cdot \ln x for any real number c (for x>0x>0).

Thus the multiplicative model (from above)

y=awbxcy=a \cdot w^b \cdot x^c

becomes

y=(lna)+blnw+clnxy= (\ln a) + b \cdot \ln w + c \cdot \ln x

which is a linear model with parameters (lna)(\ln a), bb and cc.

In addition, the log-transform is often variance-stabilizing.

The Exponential Function and the Logarithm

The exponential function and the logarithms are inverses of each other

x=eyy=lnxx = e^y \leftrightarrow y = \ln{x}

Details

Note

Note the properties:

ln(xy)=ln(x)+ln(y)\ln (x \cdot y) = \ln (x) + \ln (y)

and

eaeb=ea+be^a \cdot e^b = e^{a+b}

Examples

Example

Solve the equation

10e1/3x+3=2410e^{1/3x} + 3 = 24

for xx.

First, get the 33 out of the way:

10e1/3x=2110e^{1/3x} = 21

Then the 1010:

e1/3x=2.1e^{1/3x} = 2.1

Next, we can take the natural log of 2.1. Since ln\ln is an inverse function of ee this would result in

13x=ln(2.1)\displaystyle\frac{1}{3}x = \ln(2.1)

This yields

x=ln(2.1)3x = \ln(2.1) \cdot 3

which is

2.23\approx 2.23

- + \ No newline at end of file diff --git a/Functions/Miscellanea/index.html b/Functions/Miscellanea/index.html index bec18dc..eb475f4 100644 --- a/Functions/Miscellanea/index.html +++ b/Functions/Miscellanea/index.html @@ -4,13 +4,13 @@ Miscellanea | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Miscellanea/read/index.html b/Functions/Miscellanea/read/index.html index e1adf23..ce36782 100644 --- a/Functions/Miscellanea/read/index.html +++ b/Functions/Miscellanea/read/index.html @@ -4,7 +4,7 @@ Miscellanea | Computing and Calculus for Applied Statistics - + @@ -59,7 +59,7 @@ c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">Xμ0

where ZN(0,1)Z \sim N(0,1) when H0H_0 is correct. It follows that e.g. P[Z>1.96]=0.05P[\vert Z \vert > 1.96] = 0.05 and if we observe Z>1.96\vert Z \vert > 1.96 then we reject the null hypothesis.

Note that the value z=1.96z^\ast = 1.96 is a quantile of the normal distribution and we can obtain other quantiles with the pnorm function, e.g. pnorm gives 1.961.96.

- + \ No newline at end of file diff --git a/Functions/Overview/index.html b/Functions/Overview/index.html index f4450d1..5f9a69e 100644 --- a/Functions/Overview/index.html +++ b/Functions/Overview/index.html @@ -4,13 +4,13 @@ Overview | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Overview/read/index.html b/Functions/Overview/read/index.html index ac5413f..6737fe2 100644 --- a/Functions/Overview/read/index.html +++ b/Functions/Overview/read/index.html @@ -4,13 +4,13 @@ Functions of Functions to Miscellanea | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Principles of Programming/index.html b/Functions/Principles of Programming/index.html index 14d3d86..fee5205 100644 --- a/Functions/Principles of Programming/index.html +++ b/Functions/Principles of Programming/index.html @@ -4,13 +4,13 @@ Principles of Programming | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Principles of Programming/read/index.html b/Functions/Principles of Programming/read/index.html index 5059917..50fdd23 100644 --- a/Functions/Principles of Programming/read/index.html +++ b/Functions/Principles of Programming/read/index.html @@ -4,7 +4,7 @@ Principles of Programming | Computing and Calculus for Applied Statistics - + @@ -23,7 +23,7 @@ To run a complete analysis one would typically set up one file to run all the tasks by reading in data through analyses to outputs

For example, a file named run.r could contain the sequence of commands:

source("setup.r")
source("analysis.r")
source("plot.r")

Loops, for

If a piece of code is to be run repeatedly, the for-loop is normally used.

Details

If a piece of code is to be run repeatedly, the for-loop is normally used. The R code form is:

for(index in sequence){ commands }

Examples

Example

To add numbers we can use

tot <- 100
for(i in 1:100){ tot <- tot + i }
cat("the sum is ", tot, "\n")
Example

Define the plot function

plotwtle <- AS BEFORE

To plot several of these we can use a sequence:

plotwtle(101)
plotwtle(102)
...

or a loop

for (i in 101:150) {
fname <- paste("plot", i, ".pdf", sep="")
pdf(fname)
plotwtle(i)
dev.off()
}

The if and ifelse Commands

The if statement is used to conditionally execute statements

The ifelse statement conditionally replaces elements of a structure.

Examples

Example

If we want to compute xxx^x for xx-values in the range 00 through 55, we can use

xlist <- seq(0,5,0.01)
y <- NULL
for(x in xlist) {
if (x==0) {
y <- c(y,1)
}
else {
y <- c(y,x**x)
}
}
Example
x <- seq(0,5,0.01)
y <- ifelse(x==0,1,x^x)
Example
dat <- read.table("file")
dat <- ifelse(dat==0,0.01,dat)
Example
x <- ifelse(is.na(x),0,x)

Indenting

Code should be properly indented!

Details

fFunctions, for-loops, and if-statements should always be indented.

Comments

All code should contain informative comments. Comments are separated out from code using the pound symbol (#).

Examples

Example
#################### #####SETUP DATA##### ####################

dat <- read.table(filename)
x <- log(dat$le) #log-transformation of length
y <- log(dat$wt) #log-transformation of weight

###################### #####THE ANALYSIS##### ######################
- + \ No newline at end of file diff --git a/Functions/Sequences and Series/index.html b/Functions/Sequences and Series/index.html index 922dbfa..659f123 100644 --- a/Functions/Sequences and Series/index.html +++ b/Functions/Sequences and Series/index.html @@ -4,13 +4,13 @@ Sequences and Series | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Sequences and Series/read/index.html b/Functions/Sequences and Series/read/index.html index a9f992e..9122611 100644 --- a/Functions/Sequences and Series/read/index.html +++ b/Functions/Sequences and Series/read/index.html @@ -4,7 +4,7 @@ Sequences and Series | Computing and Calculus for Applied Statistics - + @@ -13,7 +13,7 @@ We denote this sequence with (an)n1(a_n)_{n\geq1}.

Details

In a sequence the same number can appear several times in different places.

Examples

Example

(1n)n1 is the sequence 1,12,13,14,(\displaystyle\frac{1}{n})_{n\geq1} \text{ is the sequence } 1, \displaystyle\frac{1}{2}, \displaystyle\frac{1}{3}, \displaystyle\frac{1}{4}, \ldots

Example

(n)n1 is the sequence 1,2,3,4,5,(n)_{n\geq1} \text{ is the sequence } 1, 2, 3, 4, 5, \ldots

Example

(2nn)n1 is the sequence 2,8,24,64,(2^nn)_{n\geq1} \text{ is the sequence } 2, 8, 24, 64, \ldots

Convergent Sequences

A sequence ana_n is said to converge to the number bb if for every ε>0\varepsilon >0 we can find an NNN\in \mathbb{N} such that anb<ε|a_n-b| < \varepsilon for all nNn \geq N. We denote this with limnan=b\lim_{n\to\infty}a_n=b or anba_n\to b, as nn\to\infty.

Details

A sequence ana_n is said to converge to the number bb if for every ε>0\varepsilon >0 we can find an NNN\in \mathbb{N} such that anb<ε|a_n-b| < \varepsilon for all nNn \geq N. We denote this with limnan=b\lim_{n\to\infty}a_n=b or anba_n\to b, as nn\to\infty.

If xx is a number, then

(1+xn)nex as n(1 + \displaystyle\frac{x}{n})^n \rightarrow e^x \text{ as } n\rightarrow\infty

Examples

Example

The sequence (1n)n(\displaystyle\frac{1}{n})_{n\geq\infty} converges to 00 as nn\to\infty.

Example

If x is a number, then

(1+xn)nex as n(1 + \displaystyle\frac{x}{n})^n \rightarrow e^x \text{ as } n\rightarrow\infty

Infinite Sums (series)

We are interested in, whether infinite sums of sequences can be defined.

Details

Consider a sequence of numbers, (an)n(a_n)_{n\to\infty}.

Now define another sequence (sn)n,(s_n)_{n\to\infty}, where

sn=k=1naks_n=\displaystyle\sum_{k=1}^na_k

If (sn)n(s_n)_{n\to\infty} is convergent to S=limnsn,S=\lim_{n\to\infty}s_n, then we write

S=n=1anS=\displaystyle\sum_{n=1}^{\infty}a_n

Examples

Example

If

ak=xk,qquadk=0,1,a_k = x^k, qquad k=0,1,\dots

then

sn=k=0nxk=x0+x1+.+xns_n=\displaystyle\sum_{k=0}^{n}x^k=x^0+x^1+\dots.+x^n

Note also that

xsn=x(x0+x1+.+xn)=x+x2++xn+1xs_n=x(x^0+x^1+\dots.+x^n)= x + x^2 + \dots + x^{n+1}

We have

sn=1+x+x2++xns_n = 1 + x + x^2 + \dots + x^n

xsn=x+x2++xn+xn+1xs_n = x + x^2 + \dots +x^n + x^{n+1}

snxsn=1xn+1s_n – xs_n = 1 - x^{n+1}

i.e.

sn(1x)=1xn+1s_n(1-x) = 1-x^{n+1}

and we have

sn=1xn+11xs_n =\displaystyle\frac{1-x^{n+1}}{1-x}

if x1x\neq1.

If 0<x<10< x<1 then xn+10x^{n+1}\to 0 as nn\to\infty and we obtain sn11xs_n\to\displaystyle\frac{1}{1-x} so

n=0xn=11x\displaystyle\sum_{n=0}^{\infty}x^n=\displaystyle\frac{1}{1-x}

The Exponential Function and the Poisson Distribution

The exponential function can be written as a series (infinite sum):

ex=n=0xnn!e^x=\displaystyle\sum_{n=0}^{\infty}\displaystyle\frac{x^n}{n!}

The Poisson distribution is defined by the probabilities

p(x)=eλλxx! for x=0, 1, 2, p(x)=e^{-\lambda}\displaystyle\frac{\lambda^x}{x!}\textrm{ for } x=0,\ 1,\ 2,\ \ldots

Details

The exponential function can be written as a series (infinite sum):

ex=n=0xnn!e^x=\displaystyle\sum_{n=0}^{\infty}\displaystyle\frac{x^n}{n!}

Knowing this we can see why the Poisson probabilities

p(x)=eλλxx!p(x)=e^{-\lambda}\displaystyle\frac{\lambda^x}{x!}

add to one:

x=0p(x)=x=0eλλxx!=eλx=0λxx!=eλeλ=1\displaystyle\sum_{x=0}^{\infty}p(x)=\displaystyle\sum_{x=0}^{\infty}e^{-\lambda}\displaystyle\frac{\lambda^x}{x!}=e^{-\lambda}\displaystyle\sum_{x=0}^{\infty}\displaystyle\frac{\lambda^x}{x!}=e^{-\lambda}e^{\lambda}=1

Relation to Expected Values

The expected value for the Poisson is given by

x=0xp(x)=x=0xeλλxx!=λ\begin{aligned} \displaystyle\sum_{x=0}^\infty x p(x) &= \displaystyle\sum_{x=0}^\infty x e^{-\lambda} \displaystyle\frac{\lambda^x}{x!} \\ &= \lambda \end{aligned}

Details

The expected value for the Poisson is given by

x=0xp(x)=x=0xeλλxx!=eλx=1xλxx!=eλx=1λx(x1)!=eλλx=1λ(x1)(x1)!=eλλx=0λxx!=eλλeλ=λ\begin{aligned} \displaystyle\sum_{x=0}^\infty x p(x) &= \displaystyle\sum_{x=0}^\infty x e^{-\lambda} \displaystyle\frac{\lambda^x}{x!} \\ &= e^{-\lambda} \displaystyle\sum_{x=1}^\infty \displaystyle\frac{x\lambda^x}{x!} \\ &= e^{-\lambda} \displaystyle\sum_{x=1}^\infty \displaystyle\frac{\lambda^x}{(x-1)!} \\ &= e^{-\lambda} \lambda \displaystyle\sum_{x=1}^\infty \displaystyle\frac{\lambda^{(x-1)}}{(x-1)!} \\ &= e^{-\lambda} \lambda \displaystyle\sum_{x=0}^\infty \displaystyle\frac{\lambda^{x}}{x!} \\ &= e^{-\lambda} \lambda e^{\lambda} \\ &= \lambda \end{aligned}
- + \ No newline at end of file diff --git a/Functions/Slopes of Lines and Curves/index.html b/Functions/Slopes of Lines and Curves/index.html index b0edec7..f4a3968 100644 --- a/Functions/Slopes of Lines and Curves/index.html +++ b/Functions/Slopes of Lines and Curves/index.html @@ -4,13 +4,13 @@ Slopes of Lines and Curves | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/Slopes of Lines and Curves/read/index.html b/Functions/Slopes of Lines and Curves/read/index.html index 4e52e70..4cc0b77 100644 --- a/Functions/Slopes of Lines and Curves/read/index.html +++ b/Functions/Slopes of Lines and Curves/read/index.html @@ -4,7 +4,7 @@ Slopes of Lines and Curves | Computing and Calculus for Applied Statistics - + @@ -21,7 +21,7 @@ Here we want to find the slope of a line tangent to the curve at a specific point (x0)(x_0). The slope of the line segment is given by the equation f(x0+h)f(x0)h\displaystyle\frac{f (x_0 +h) - f(x_0)} {h}. Reducing hh towards zero, gives the slope of this curve if it exists.

- + \ No newline at end of file diff --git a/Functions/The Central Limit Theorem and Related Topics/index.html b/Functions/The Central Limit Theorem and Related Topics/index.html index fb96f53..156de20 100644 --- a/Functions/The Central Limit Theorem and Related Topics/index.html +++ b/Functions/The Central Limit Theorem and Related Topics/index.html @@ -4,13 +4,13 @@ The Central Limit Theorem and Related Topics | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Functions/The Central Limit Theorem and Related Topics/read/index.html b/Functions/The Central Limit Theorem and Related Topics/read/index.html index 8abb61f..f69c5b5 100644 --- a/Functions/The Central Limit Theorem and Related Topics/read/index.html +++ b/Functions/The Central Limit Theorem and Related Topics/read/index.html @@ -4,7 +4,7 @@ The Central Limit Theorem and Related Topics | Computing and Calculus for Applied Statistics - + @@ -86,7 +86,7 @@ c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">xμ

but we want to know the distribution of those when μ\mu is the true mean.

For instance, n=5n=5 and μ=1\mu = 1, we can simulate (repeatedly) x1,,x5x_1, \ldots, x_5 and compute a tt -value for each. The following R commands can be used for this:

library(MASS)
n <-5
mu <-1
lambda <-1
tvec <-NULL
for(sim in 1:10000) {
x <-rexp(n,lambda)
xbar <-mean(x)
s <-sd(x)
t <-(xbar-mu)/(s/sqrt(n))
tvec <- c(tvec,t)
}

truehist(tvec) # truehist gives a better histogram

Show values at certain positions in the vector by uing:

> sort(tvec)[9750]
[1] 1.698656

> sort(tvec)[250]
[1] -6.775726
- + \ No newline at end of file diff --git a/Functions/index.html b/Functions/index.html index 3a70008..ccf6491 100644 --- a/Functions/index.html +++ b/Functions/index.html @@ -4,13 +4,13 @@ Functions | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Estimation, Estimates and Estimators/index.html b/Multivariate to Power/Estimation, Estimates and Estimators/index.html index 707f2c1..c46bdb6 100644 --- a/Multivariate to Power/Estimation, Estimates and Estimators/index.html +++ b/Multivariate to Power/Estimation, Estimates and Estimators/index.html @@ -4,13 +4,13 @@ Estimation, Estimates and Estimators | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Estimation, Estimates and Estimators/read/index.html b/Multivariate to Power/Estimation, Estimates and Estimators/read/index.html index 6253ce9..c251217 100644 --- a/Multivariate to Power/Estimation, Estimates and Estimators/read/index.html +++ b/Multivariate to Power/Estimation, Estimates and Estimators/read/index.html @@ -4,7 +4,7 @@ Estimation, Estimates and Estimators | Computing and Calculus for Applied Statistics - + @@ -57,7 +57,7 @@ s-225.272,467,-225.272,467s-235,486,-235,486c-2.7,4.7,-9,7,-19,7 c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">σ where n=4n= 4, 1616 or 6464.

Estimators and Estimates

In OLS regression, note that the values of aa and bb:

a=ybxa = \overline{y} - b \overline{x}

b=Σi=1n(xix)(yiy)Σi=1n(xix)2b = \displaystyle\frac{\Sigma_{i=1}^{n} (x_i - \overline{x}) (y_i - \overline{y})}{\Sigma_{i=1}^{n} (x_i - \overline{x})^2}

are outcomes of random variables e.g. bb is the outcome of

β^=Σi=1n(xix)(YiY)Σi=1n(xix)2\hat{\beta} = \displaystyle\frac{\Sigma_{i=1}^{n} (x_i - \overline{x}) (Y_i - \overline{Y})}{\Sigma_{i=1}^{n} (x_i - \overline{x})^2}

the estimator which has some distribution.

Fig. 37

Figure: Shows an example of the distribution of the estimator β^\hat{\beta}

Details

The following R commands can be used to generate a distribution for the estimator β^\hat{\beta}

library(MASS)
nsim <- 1000
betahat <- NULL
for (i in 1:nsim) {
n <- 20
x <- seq(1:n) # Fixed x vector
y <- 2 + 0.4*x + rnorm(n, 0, 1)
xbar <- mean(x)
ybar <- mean(y)
b <- sum((x-xbar)*(y-ybar))/sum((x-xbar)^2)
a <- ybar - b * xbar
betahat <- c(betahat, b)
}
truehist(betahat)
- + \ No newline at end of file diff --git a/Multivariate to Power/Multivariate Probability Distributions/index.html b/Multivariate to Power/Multivariate Probability Distributions/index.html index 72e7a9e..9bf6371 100644 --- a/Multivariate to Power/Multivariate Probability Distributions/index.html +++ b/Multivariate to Power/Multivariate Probability Distributions/index.html @@ -4,13 +4,13 @@ Multivariate Probability Distributions | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Multivariate Probability Distributions/read/index.html b/Multivariate to Power/Multivariate Probability Distributions/read/index.html index fd47f9e..dc4e4e7 100644 --- a/Multivariate to Power/Multivariate Probability Distributions/read/index.html +++ b/Multivariate to Power/Multivariate Probability Distributions/read/index.html @@ -4,7 +4,7 @@ Multivariate Probability Distributions | Computing and Calculus for Applied Statistics - + @@ -76,7 +76,7 @@ c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">σ21e2σ22(yμ2)2

and the joint density becomes:

12πσ1σ2e(xμ1)22σ12(yμ2)22σ22\displaystyle\frac{1}{2\pi\sigma_1\sigma_2} e^{-\displaystyle\frac{(x-\mu_1)^2}{2\sigma_1^2}-\displaystyle\frac{(y-\mu_2)^2}{2\sigma_2^2}}

Now, suppose X1,,XnN(μ,σ2)X_1,\ldots,X_n\sim N(\mu,\sigma^2) are independent and identically distributed, then

f(x)=1(2π)n2σnei=1n(xiμ)2aσ2f(\underline{x})=\displaystyle\frac{1}{(2\pi)^{\displaystyle\frac{n}{2}}\sigma^n} e^{-\displaystyle\sum^{n}_{i=1} \displaystyle\frac{(x_i-\mu)^2}{a\sigma^2}}

is the multivariate normal density in the case of independent and identically distributed variables.

More General Multivariate Probability Density Functions

Examples

Example

Suppose XX and YY have the joint density

f(x,y)={2 0yx10 otherwisef(x,y) = \begin{cases} 2 & \text{ } 0\leq y \leq x \leq 1 \\ 0 & \text{ otherwise} \end{cases}

First notice that

RRf(x,y)dxdyx=01y=0x2dydx=012xdx=1\displaystyle\int_{\mathbb{R}}\displaystyle\int_{\mathbb{R}}f(x,y)dxdy\displaystyle\int_{x=0}^{1}\displaystyle\int_{y=0}^x2dydx = \displaystyle\int_0^12xdx = 1

so ff is indeed a density function.

Now, to find the density of XX, we first find the c.d.f. of XX First note that for a<0a<0 we have P[Xa]=0P[X\leq a]=0, but, if a0a\geq 0, we obtain

FX(a)=P[Xa]x0ay=0x2dydx=[x2]0a=a2F_X(a)=P[X\leq a]\displaystyle\int_{x_0}^a \displaystyle\int_{y=0}^x2dydx=[x^2]_0^a=a^2

The density of XX is therefore

fX(x)=dF(x)dx{2x 0x10 otherwisef_X(x) = \displaystyle\frac{dF(x)}{dx} \begin{cases} 2x & \text{ } 0\leq x \leq 1 \\ 0 & \text{ otherwise} \end{cases}

Handout

If f:RnRf: \mathbb{R}^n\rightarrow\mathbb{R} is such that P[XA]=Af(x1,,xn)dx1dxnP[X \in A] =\displaystyle\int_A\ldots\displaystyle\int f(x_1,\ldots, x_n)dx_1\cdots dx_n and f(x)0f(x)\geq 0 for all xRn\underline{x}\in \mathbb{R}^n, then ff is the joint density of

X=(X1Xn)\mathbf{X}= \left( \begin{array}{ccc} X_1 \\ \vdots \\ X_n \end{array} \right)

If we have the joint density of some multidimensional random variable X=(X1,,Xn)X=(X_1,\ldots,X_n) given in this manner, then we can find the individual density functions of the XiX_i 's by integrating the other variables.

- + \ No newline at end of file diff --git a/Multivariate to Power/Overview/index.html b/Multivariate to Power/Overview/index.html index 0d90d70..b2d2e73 100644 --- a/Multivariate to Power/Overview/index.html +++ b/Multivariate to Power/Overview/index.html @@ -4,13 +4,13 @@ Overview | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Overview/read/index.html b/Multivariate to Power/Overview/read/index.html index d7b12a5..a6dfa6a 100644 --- a/Multivariate to Power/Overview/read/index.html +++ b/Multivariate to Power/Overview/read/index.html @@ -4,13 +4,13 @@ Multivariate to Power | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Power and Sample Sizes/index.html b/Multivariate to Power/Power and Sample Sizes/index.html index 779679f..8770ed1 100644 --- a/Multivariate to Power/Power and Sample Sizes/index.html +++ b/Multivariate to Power/Power and Sample Sizes/index.html @@ -4,13 +4,13 @@ Power and Sample Sizes | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Power and Sample Sizes/read/index.html b/Multivariate to Power/Power and Sample Sizes/read/index.html index e2d1a2b..bbf0b31 100644 --- a/Multivariate to Power/Power and Sample Sizes/read/index.html +++ b/Multivariate to Power/Power and Sample Sizes/read/index.html @@ -4,7 +4,7 @@ Power and Sample Sizes | Computing and Calculus for Applied Statistics - + @@ -733,7 +733,7 @@ However, this is not the correct model in the present situation. Using the above value of β\beta. Taking this into account, the power is actually a bit lower or 77.5%.

- + \ No newline at end of file diff --git a/Multivariate to Power/Some Distributions Related to Normal/index.html b/Multivariate to Power/Some Distributions Related to Normal/index.html index 75b780a..41aedeb 100644 --- a/Multivariate to Power/Some Distributions Related to Normal/index.html +++ b/Multivariate to Power/Some Distributions Related to Normal/index.html @@ -4,13 +4,13 @@ Some Distributions Related to Normal | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Some Distributions Related to Normal/read/index.html b/Multivariate to Power/Some Distributions Related to Normal/read/index.html index a23073a..d5d9c6b 100644 --- a/Multivariate to Power/Some Distributions Related to Normal/read/index.html +++ b/Multivariate to Power/Some Distributions Related to Normal/read/index.html @@ -4,7 +4,7 @@ Some Distributions Related to the Normal | Computing and Calculus for Applied Statistics - + @@ -60,7 +60,7 @@ s-225.272,467,-225.272,467s-235,486,-235,486c-2.7,4.7,-9,7,-19,7 c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">XˉμN(0,1), (XiXˉ)2σ2χn12\displaystyle\sum \displaystyle\frac{(X_i-\bar{X})^2}{\sigma^2}\sim \chi_{n-1}^2.

- + \ No newline at end of file diff --git a/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/index.html b/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/index.html index 5486fdb..f66d2af 100644 --- a/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/index.html +++ b/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/index.html @@ -4,13 +4,13 @@ Test of Hypothesis, P Values and Related Concepts | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read/index.html b/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read/index.html index 937f666..5b0bc49 100644 --- a/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read/index.html +++ b/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read/index.html @@ -4,7 +4,7 @@ Test of Hypothesis, P Values and Related Concepts | Computing and Calculus for Applied Statistics - + @@ -191,7 +191,7 @@ c-8,0,-12,-0.7,-12,-2z M1001 80 h400000v40h-400000z">xy

and reject H0H_0 if z>z1αz>z_{1-\alpha}.

The P-value

The pp -value of a test is an evaluation of the probability of obtaining results which are as extreme as those observed in the context of the hypothesis.

Examples

Example

Consider a dataset and the following hypotheses

H0:μ=42H_0:\mu=42

vs.

H1:μ>42H_1:\mu>42

and suppose we obtain

z=2.3z=2.3

We reject H0H_0 since

2.3>1.645+z0.952.3>1.645+z_{0.95}

The pp -value is

P[Z>2.3]=1Φ(2.3)P[Z>2.3]= 1-\Phi(2.3)

obtained in R using

1-pnorm(2.3) [1] 0.01072411

If this had been a two tailed test, then

P=P[Z>2.3]=P[Z<2.3]+P[Z>2.3]=2P[Z>2.3]\begin{aligned} P &= P[|Z|>2.3] \\ &= P[Z<-2.3]+P[Z>2.3] \\ &= 2\cdot P[Z>2.3] \end{aligned}

The Concept of Significance

Details

Two sample means are statistically significantly different if the null hypothesis H0:μ1=μ2H_0:\mu_1 = \mu_2, can be rejected. In this case, one can make the following statements:

But one does not say:

Similarly, if the hypothesis H0:μ1=μ2H_0: \mu_1 = \mu_2 cannot be rejected, we can say:

But we cannot say:

- + \ No newline at end of file diff --git a/Multivariate to Power/index.html b/Multivariate to Power/index.html index efecb76..0176879 100644 --- a/Multivariate to Power/index.html +++ b/Multivariate to Power/index.html @@ -4,13 +4,13 @@ Multivariate to Power | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Data Vectors/index.html b/Numbers to Indices/Data Vectors/index.html index da6a383..bbe3ea9 100644 --- a/Numbers to Indices/Data Vectors/index.html +++ b/Numbers to Indices/Data Vectors/index.html @@ -4,13 +4,13 @@ Data Vectors | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Data Vectors/read/index.html b/Numbers to Indices/Data Vectors/read/index.html index 5b78e67..8bf5cf6 100644 --- a/Numbers to Indices/Data Vectors/read/index.html +++ b/Numbers to Indices/Data Vectors/read/index.html @@ -4,7 +4,7 @@ Data Vectors | Computing and Calculus for Applied Statistics - + @@ -72,7 +72,7 @@ The second and third points were labeled using the text function and a line was drawn between them using the lines function.

plot(c(2,3),c(3,4),xlim=c(2,6),ylim=c(1,5),xlab="x",ylab="y")
points(4,2)
text(3,4,"(3,4)",pos=4, cex=2)
text(4,2,"(4,2)",pos=4, cex=2)
lines(c(3,4), c(4,2))

Note: Note that if you are unsure of what format the arguments of an R function needs to be, you can call a help file by typing ? before the function name (e.g. ?lines).

Data

Data are usually a sequence of numbers, typically called a vector.

Details

When we collect data these are one or more sequences of numbers, collected into data vectors. We commonly think of these data vectors as columns in a table.

Examples

Example

In R, if the command

x <- c(4,5,3,7)

is given, then x contains a vector of numbers.

Example

Create a function in R, give it a name Myfunction which takes the sum of x and y:

Myfunction <- function(x,y) { sum(x,y) }

If you input the vectors 1:3 and 4:7 into the function it will calculate the sum of x <- (1+2+3) and y <- (4+5+6+7) as follows:

> Myfunction(1:3,4:7)
[1] 28

Indices for a Data Vector

If data are in a vector x, then we use indices to refer to individual elements.

Details

If i is an integer then xix_i denotes the ithi^{th} element of xx.

Note: Although we do not distinguish (much) between row- and column vectors, usually a vector is thought of as a column. If we need to specify the type of vector, row or column, then for vector xx, the column vector would be referred to as xx' and the row vector as xTx^T.

(the transpose of the original).

Examples

Example

If x=(4,5,3,7)x=(4,5,3,7) then x1=4x_1=4 and x4=7x_4=7

Example

How to remove all indices below a certain value in R?

> x <- c(1,5,8,9,4,16,12,7,11)

> x
[1] 1 5 8 9 4 16 12 7 11

> y <- x[x>10]

> y
[1] 16 12 11
Example

Consider a function that takes to vectors

aRn,bNma \in \mathbb{R}^n, b \in \mathbb{N}^m

as arguments with:

nmn \ge m

and:

1b1,,bmn.1 \le b_1,\dots,b_m \le n.

The function returns the sum:

i=1mabi\sum_{i = 1}^m {a_b}_i

Long version:

> fn <- function(a,b) {
+ result <- sum(a[b])
+ return(result)
+ }

Short version:

fN <- function(a,b) sum(a[b])

Summation

We use the symbol Σ\Sigma to denote sums.

In R, the sum function adds numbers.

Examples

Example

If x=(4,5,3,7)x=(4,5,3,7)

then

i=14xi=x1+x2+x3+x4=4+5+3+7=19\sum_{i=1}^{4} x_i = x_1+x_2+x_3+x_4 = 4+5+3+7 = 19

and

i=24xi=x2+x3+x4=5+3+7=15.\sum_{i=2}^{4} x_i = x_2+x_3+x_4 = 5+3+7 = 15.

In R one can give the corresponding commands:

> x <- c(4,5,3,7)

> x
[1] 4 5 3 7

> sum(x)
[1] 19

> sum(x[2:4])
[1] 15
- + \ No newline at end of file diff --git a/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/index.html b/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/index.html index 924fc0d..28c2d28 100644 --- a/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/index.html +++ b/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/index.html @@ -4,13 +4,13 @@ Discrete Random Variables and the Binomial Distribution | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read/index.html b/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read/index.html index f6edf71..529e847 100644 --- a/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read/index.html +++ b/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read/index.html @@ -4,7 +4,7 @@ Discrete Random Variables and the Binomial Distribution | Computing and Calculus for Applied Statistics - + @@ -71,7 +71,7 @@ In this part, we are describing a different binomial distribution. It describes your expected grade. Therefore, the grade is the outcome nn, weighted by the probability of you choosing the particular alarm-clock setting procedure:

1E[X]=06+0.96+0.17=6.11 - E[X] = 0 \cdot 6 + 0.9 \cdot 6 + 0.1 \cdot 7 = 6.1

1E[X]=06+0.16+0.97=6.91 - E[X] = 0 \cdot 6 + 0.1 \cdot 6 + 0.9 \cdot 7 = 6.9

Note that the probabilities of these three choices (0 + 0.9 + 0.1) must equal 1, since these are the only three choices defined.

The Population Variance

The (population) variance, for a discrete distribution, is:

σ2=E[(Xμ)2]=(x1μ)2p1+(x2μ)2p2+\sigma^2 = E\left[ \left ( X-\mu \right ) ^2 \right ] = (x_1 - \mu)^2 p_1 + (x_2 - \mu)^2 p_2 + \dots

where it is understood that the random variable XX has this distribution and μ\mu is the expected value.

In the case of the binomial distribution, it turns out that:

σ2=np(1p)\sigma^2 = np(1 - p)

Details

Definition

If μ\mu is the expected value, then the variance of a discrete distribution is defined as

σ2=(x1μ)2p1+(x2μ)2p2+\sigma ^2=(x_1 - \mu)^2 p_1 + (x_2 - \mu)^2 p_2 + \ldots

If a random variable XX has associated probabilities, pi=P[X=xi]p_i=P[X=x_i], then one can equivalently write:

σ2=Var[X]=E[(Xμ)2]\sigma^2 = Var[X]=E\left [ \left ( X - \mu \right ) ^ 2\right ]

Examples

Example

In the case of the binomial distribution, it turns out that:

σ2=np(1p)\sigma^2 = np(1 - p)

- + \ No newline at end of file diff --git a/Numbers to Indices/Functions/index.html b/Numbers to Indices/Functions/index.html index 98eac23..332ea15 100644 --- a/Numbers to Indices/Functions/index.html +++ b/Numbers to Indices/Functions/index.html @@ -4,13 +4,13 @@ Functions | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Functions/read/index.html b/Numbers to Indices/Functions/read/index.html index f06cc4d..06bf5ff 100644 --- a/Numbers to Indices/Functions/read/index.html +++ b/Numbers to Indices/Functions/read/index.html @@ -4,7 +4,7 @@ Functions | Computing and Calculus for Applied Statistics - + @@ -16,7 +16,7 @@ If we write y=f(x)y=f(x), the outcome yy is usually called the response variable and xx is the explanatory variable. Function values are plotted on vertical axis while xx values are plotted on horizontal axis. This plots yy against xx.

Examples

Example

The following R commands can be used to generate a plot for function y=2+3xy= 2+3x:

x <- seq(0:10)
g <- function(x) {
+ yhat <- 2+3*x
+ return(yhat)
+ }

x <- seq(0,10,0.1)
y <- g(x)
plot(x,y,type="l", xlab="x",ylab="y")

Functions of Several Variables

Examples

Example

z=2x+3y+4v=t2+3xw=t2+3bx\begin{aligned} z &= 2x+3y+4\\ v &= t^2+3x\\ w &= t^2+3b \cdot x\end{aligned}

- + \ No newline at end of file diff --git a/Numbers to Indices/Indices and the apply Commands in R/index.html b/Numbers to Indices/Indices and the apply Commands in R/index.html index a165edb..ab7db82 100644 --- a/Numbers to Indices/Indices and the apply Commands in R/index.html +++ b/Numbers to Indices/Indices and the apply Commands in R/index.html @@ -4,13 +4,13 @@ Indices and the apply Commands in R | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Indices and the apply Commands in R/read/index.html b/Numbers to Indices/Indices and the apply Commands in R/read/index.html index c79e133..be0e95c 100644 --- a/Numbers to Indices/Indices and the apply Commands in R/read/index.html +++ b/Numbers to Indices/Indices and the apply Commands in R/read/index.html @@ -4,7 +4,7 @@ Indices and the Apply Commands in R | Computing and Calculus for Applied Statistics - + @@ -18,7 +18,7 @@ To do this we use the tapply command in R.

Examples

Example
> z <- c(5,7,2,9,3,4,8)
> i <- c("m","f","m","m","f","m","f")

A. Find the sum within each group

> tapply(z,i,sum)
f m
18 20

B. Find the sample sizes

> tapply(z,i,length)
f m
3 4

C. Store outputs and use names

> n <- tapply(z,i,length)

> n
f m
3 4

> n["m"]
m
4

Logical Indexing

A logical vector consists of TRUE\verb|TRUE| (1) or FALSE\verb|FALSE| (0) values. These can be used to index vectors or matrices.

Examples

Example
> i <- c("m","f","m","m","f","m","f")

> z <- c(5,7,2,9,3,4,8)

> i=="m"
[1] TRUE FALSE TRUE TRUE FALSE TRUE FALSE

> z[i=="m"]
[1] 5 2 9 4

> z[c(T,F,T,T,F,T,F)]
[1] 5 2 9 4

Lists, Indexing Lists

A list is a collection of objects. Thus, data frames are lists.

Examples

Example
> x <- list(y=2,z=c(2,3),w=c("a","b","c"))

> x[["z"]]
[1] 2 3

> names(x)
[1] "y" "z" "w"

> x["w"]
$w
[1] "a" "b" "c"

> x$w
[1] "a" "b" "c"
- + \ No newline at end of file diff --git a/Numbers to Indices/More on Algebra/index.html b/Numbers to Indices/More on Algebra/index.html index e98628e..7999f67 100644 --- a/Numbers to Indices/More on Algebra/index.html +++ b/Numbers to Indices/More on Algebra/index.html @@ -4,13 +4,13 @@ More on Algebra | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/More on Algebra/read/index.html b/Numbers to Indices/More on Algebra/read/index.html index a49c78e..6cdf044 100644 --- a/Numbers to Indices/More on Algebra/read/index.html +++ b/Numbers to Indices/More on Algebra/read/index.html @@ -4,7 +4,7 @@ More on Algebra | Computing and Calculus for Applied Statistics - + @@ -20,7 +20,7 @@ We can explicitly write the corresponding combinations of two tails as follows:

HHTT HTHT HTTH THTH TTHH THHT

(b) How many times you will end up with 1 tail? The answer is 4 times and the output can be written as:

HHHT HTHH THHH HHTH

The case of a single tail is easy: The single tail can come up in any one of four positions.

The Binomial Theorem

(a+b)n=x=0n(nx)axbnx(a+b)^n = \sum_{x=0}^n \displaystyle{n \choose x} a^xb^{n-x}

Details

If aa and bb are real numbers and nn is an integer then the expression (a+b)n(a+b)^n can be expanded as:

(a+b)n=an+(n1)an1b+(n2)an2b++(nn1)abn1+bn(a+b)^n = a^n+ \displaystyle{n \choose 1}a^{n-1}b + \displaystyle{n \choose 2}a^{n-2}b^ + \ldots + \displaystyle{n \choose n-1}ab^{n-1}+b^n

(a+b)n=σi=1n(nx)axbnx(a+b)^n = \sigma_{i=1}^n \displaystyle{n \choose x}a^xb^{n-x}

This can be seen by looking at (a+b)n(a+b)^n as a product of nn parentheses and multiply these by picking one item (aa or bb) from each. If we picked aa from xx parentheses and bb from (nx)(n-x), then the product is axbnxa^x b^{n-x}. We can choose the xx aa's in a total of (nx)\displaystyle\binom{n}{x} ways so the coefficient of axbnxa^x b^{n-x} is (nx)\displaystyle\binom{n}{x}.

Examples

Example

Since

(a+b)n=x=0n(nx)axbnx,(a+b)^n = \sum_{x=0}^n \displaystyle{n \choose x} a^xb^{n-x},

it follows that

2n=(1+1)n=x=0n(nx)2^n = (1+1)^n = \sum_{x=0}^n \displaystyle{n \choose x}

i.e.

2n=(n0)+(n1)+(n2)+(nn)2^n = \displaystyle{n \choose 0} + \displaystyle{n \choose 1} + \displaystyle{n \choose 2}\ldots + \displaystyle{n \choose n}

- + \ No newline at end of file diff --git a/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/index.html b/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/index.html index 16e8e2a..7149ed3 100644 --- a/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/index.html +++ b/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/index.html @@ -4,13 +4,13 @@ Numbers, Arithmetic and Basic Algebra | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read/index.html b/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read/index.html index df452cc..a1e3550 100644 --- a/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read/index.html +++ b/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read/index.html @@ -4,7 +4,7 @@ Numbers, Arithmetic and Basic Algebra | Computing and Calculus for Applied Statistics - + @@ -198,7 +198,7 @@ c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z"> is an irrational number, and belongs thereby to the set of real numbers R\mathbb{R}. Real numbers can be imagined as points on an infinitely long line, which is also called the real line.

- + \ No newline at end of file diff --git a/Numbers to Indices/Overview/index.html b/Numbers to Indices/Overview/index.html index 7817565..019ff63 100644 --- a/Numbers to Indices/Overview/index.html +++ b/Numbers to Indices/Overview/index.html @@ -4,13 +4,13 @@ Overview | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Overview/read/index.html b/Numbers to Indices/Overview/read/index.html index ab7c714..58180e8 100644 --- a/Numbers to Indices/Overview/read/index.html +++ b/Numbers to Indices/Overview/read/index.html @@ -4,13 +4,13 @@ Numbers to Indices In R | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Polynomials/index.html b/Numbers to Indices/Polynomials/index.html index 452c77c..66afb63 100644 --- a/Numbers to Indices/Polynomials/index.html +++ b/Numbers to Indices/Polynomials/index.html @@ -4,13 +4,13 @@ Polynomials | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Polynomials/read/index.html b/Numbers to Indices/Polynomials/read/index.html index 495da11..9412fe8 100644 --- a/Numbers to Indices/Polynomials/read/index.html +++ b/Numbers to Indices/Polynomials/read/index.html @@ -4,7 +4,7 @@ Polynomials | Computing and Calculus for Applied Statistics - + @@ -110,7 +110,7 @@ s-225.272,467,-225.272,467s-235,486,-235,486c-2.7,4.7,-9,7,-19,7 c-6,0,-10,-1,-12,-3s-194,-422,-194,-422s-65,47,-65,47z M834 80h400000v40h-400000z">

- + \ No newline at end of file diff --git a/Numbers to Indices/Simple Data Analysis in R/index.html b/Numbers to Indices/Simple Data Analysis in R/index.html index 132feb4..be20023 100644 --- a/Numbers to Indices/Simple Data Analysis in R/index.html +++ b/Numbers to Indices/Simple Data Analysis in R/index.html @@ -4,13 +4,13 @@ Simple Data Analysis in R | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Numbers to Indices/Simple Data Analysis in R/read/index.html b/Numbers to Indices/Simple Data Analysis in R/read/index.html index 2d729c8..0332bd7 100644 --- a/Numbers to Indices/Simple Data Analysis in R/read/index.html +++ b/Numbers to Indices/Simple Data Analysis in R/read/index.html @@ -4,7 +4,7 @@ Simple Data Analysis in R | Computing and Calculus for Applied Statistics - + @@ -31,7 +31,7 @@ To do this, we first plot the data in a scatter plot.

Fig. 11

Figure: Scatter plot showing the length-weight relationship of fish species X.

Data source: Marine Resource Institution - Iceland.

Details

A first step in analyzing data is to prepare different plots. The type of variable will determine the type of plot. For example, when using a scatter plot both the explanatory and response data should be continuous variables

The equation for the Pearson correlation coefficient is:

rx,y=Σi=1n(xixˉ)(yiyˉ)Σi=1n(xixˉ)2Σi=1n(yiyˉ)2,r_{x,y} = \displaystyle\frac{\Sigma_{i=1}^{n}(x_i - \bar{x})(y_i - \bar{y})}{\Sigma_{i=1}^{n}(x_i - \bar{x})^2 \Sigma_{i=1}^{n}(y_i - \bar{y})^2},

where xˉ\bar{x} and yˉ\bar{y} are the sample means of the x- and y-values.

The correlation is always between -1 and 1.

Examples

The following R commands can be used to generate a scatter plot for vectors x and y

Example
plot(x,y)
- + \ No newline at end of file diff --git a/Numbers to Indices/index.html b/Numbers to Indices/index.html index 41af8e2..2ba7d97 100644 --- a/Numbers to Indices/index.html +++ b/Numbers to Indices/index.html @@ -4,13 +4,13 @@ Numbers to Indices | Computing and Calculus for Applied Statistics - +
-
Skip to main content
- +
Skip to main content
+ \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/index.html b/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/index.html index f660666..2b0abb2 100644 --- a/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/index.html +++ b/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/index.html @@ -4,13 +4,13 @@ Independence, Expectations and the Moment-Generating Function | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read/index.html b/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read/index.html index 32b94d0..dfe47d2 100644 --- a/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read/index.html +++ b/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read/index.html @@ -4,7 +4,7 @@ Independence, Expectations and the Moment Generating Function | Computing and Calculus for Applied Statistics - + @@ -13,7 +13,7 @@ Then,

P[XA]=AfX(x)dx,P [X \in A] = \displaystyle\int_{A} f_X (x) dx,

P[YB]=BfY(y)dyP [Y \in B] = \displaystyle\int_{B} f_Y (y) dy

So XX and YY are independent if:

P[X,YB]=AfX(x)dxBfY(y)dy=AfX(x)(BfY(y)dy)dx=ABfX(x)fY(y)dydx\begin{aligned} P [X \in, Y \in B] &= \displaystyle\int_{A} f_X (x) dx \displaystyle\int_{B} f_Y (y) dy \\ &= \displaystyle\int_{A}f_X (x) (\displaystyle\int_{B} f_Y (y) dy) dx \\ &= \displaystyle\int_{A}\displaystyle\int_{B} f_X (x)f_Y (y) dydx \end{aligned}

But, if ff is the joint density of XX and YY then we know that

P[XA,YB]P [X \in A, Y \in B]

ABf(x,y)dydx\displaystyle\int_{A}\displaystyle\int_{B} f (x,y) dydx

Hence XX and YY are independent if and only if we can write the joint density in the form of,

f(x,y)=fX(x)fY(y)f(x,y) = f_X (x)f_Y (y)

Independence and Expected Values

If XX and YY are independent random variables then E[XY]=E[X]E[Y]E[XY]=E[X]E[Y].

Further, if XX and YY are independent random variables then E[g(X)h(Y)]=E[g(X)]E[h(Y)]E[g(X)h(Y)]=E[g(X)]E[h(Y)] is true if gg and hh are functions in which expectations exist.

Details

If XX and YY are random variables with a joint distribution function f(x,y)f(x,y), then it is true that for h:R2Rh:\mathbb{R}^2\to\mathbb{R} we have

E[h(X,Y)]=h(x,y)f(x,y)dxdyE[h(X,Y)]=\displaystyle\int\displaystyle\int h(x,y)f(x,y)dxdy

for those hh such that the integral on the right exists

Suppose XX and YY are independent continuous random variables, then

f(x,y)=fX(x)fY(y)f(x,y) = f_X (x) f_Y (y)

Thus,

E[XY]=xyf(x,y)dxdy=xyfX(x)fY(y)dxdy=xfX(x)dxyfY(y)dy=E[X]E[Y]\begin{aligned} E[XY] &= \displaystyle\int\displaystyle\int xy f (x,y) dxdy \\ &= \displaystyle\int\displaystyle\int xy f_X (x) f_Y (y) dxdy \\ &= \displaystyle\int xf_X (x) dx \displaystyle\int yf_Y (y) dy \\ &= E[X] E[Y] \end{aligned}
Note

Note that if XX and YY are independent then E[h(X)g(Y)]=E[h(X)]E[g(Y)]E[h(X) g(Y)] = E [h(X)] E[g(Y)] is true whenever the functions hh and gg have expected values.

Examples

Example

Suppose X,YU(0,2)X,Y \in U (0,2) are independent identically distributed then,

fX(x)={12if 0x20otherwisef_X(x) = \begin{cases} \displaystyle\frac{1}{2} & \text{if } 0 \leq x \leq 2 \\ 0 & \text{otherwise} \end{cases}

and similarly for fYf_Y.

Next, note that,

f(x,y)=fX(x)fY(y)={14if 0x,y20otherwisef(x,y) = f_X(x) f_Y(y) = \begin{cases} \displaystyle\frac{1}{4} & \text{if } 0 \leq x,y \leq 2 \\ 0 & \text{otherwise} \end{cases}

Also note that f(x,y)0f(x,y) \geq 0 for all (x,y)R2(x,y) \in \mathbb{R}^2 and

f(x,y)dxdy=020214dxdy=14.4=1\displaystyle\int\displaystyle\int f(x,y)dxdy = \displaystyle\int_{0}^{2}\displaystyle\int_{0}^{2} \displaystyle\frac {1}{4} dxdy = \displaystyle\frac {1}{4}.4 = 1

It follows that

E[XY]=f(x,y)xydxdy=020214xydxdy=1402y(02xdx)dy=1402yx2202dy=1402y(222022)dy=14022ydy=1202ydy=12y2202=12(222022)=122=1\begin{aligned} E[XY] &= \int\int f(x,y) xy dxdy \\ &= \int_{0}^{2}\int_{0}^2 \frac{1}{4} xy dxdy \\ &= \frac{1}{4} \int_{0}^{2} y (\int_{0}^{2} x dx) dy \\ &= \frac{1}{4} \int_{0}^{2} y \frac{x^{2}}{2} \vert_{0}^{2} dy \\ &= \frac{1}{4} \int_{0}^{2} y (\frac{2^{2}}{2} - \frac{0^{2}}{2}) dy \\ &= \frac{1}{4} \int_{0}^{2} 2 y dy \\ &= \frac{1}{2} \int_{0}^{2} y dy \\ &= \frac{1}{2} \frac{y^{2}}{2} \vert_{0}^{2} \\ &= \frac{1}{2} (\frac{2^{2}}{2} - \frac{0^{2}}{2}) \\ &= \frac{1}{2} 2 \\ &= 1 \end{aligned}

but

E[X]=E[Y]=02x12dx=1E[X] = E[Y] = \displaystyle\int_{0}^{2} x \displaystyle\frac{1}{2} dx = 1

so

E[XY]=E[X]E[Y]E[XY] = E[X] E[Y]

Independence and the Covariance

If XX and YY are independent then Cov(X,Y)=0Cov(X,Y)=0

In fact, if XX and YY are independent then Cov(h(X),g(Y))=0Cov(h(X),g(Y))=0 for any functions gg and hh in which expected values exist.

The Moment Generating Function

If XX is a random variable we define the moment generating function when tt exists as: M(t):=E[etX]M(t):=E[e^{tX}].

Examples

Example

If XBin(n,p)X\sim Bin(n,p) then M(t)=x=0netxp(x)=x=0netx(nx)p(1p)nxM(t)=\displaystyle\sum_{x=0}^{n} e^{tx}p(x) = \displaystyle\sum_{x=0}^{n} e^{tx} \displaystyle\binom{n}{x}p\cdot (1-p)^{n-x}

Moments and the Moment Generating Function

If MX(t)M_{X}(t) is the moment generating function (mgf) of XX, then MX(n)(0)=E[Xn]M_{X}^{(n)}(0)=E[X^n].

Details

Observe that M(t)=E[etX]=E[1+X+(tX)22!+(tX)33!+]M(t)=E[e^{tX}]=E[1+X+\displaystyle\frac{(tX)^2}{2!}+\displaystyle\frac{(tX)^3}{3!}+\dots] since ea=1+a+a22!+a33!+e^a=1+a+\displaystyle\frac{a^2}{2!}+\displaystyle\frac{a^3}{3!}+\dots. If the random variable etXe^{|tX|} has a finite expected value then we can switch the sum and the expected valued to obtain:

M(t)=E[n=0(tX)nn!]=n=0E[(tX)n]n!=n=0tnE[Xn]n!M(t)=E\left[\displaystyle\sum_{n=0}^{\infty}\displaystyle\frac{(tX)^n}{n!}\right]=\displaystyle\sum_{n=0}^{\infty}\displaystyle\frac{E[(tX)^n]}{n!}=\displaystyle\sum_{n=0}^{\infty}t^n\displaystyle\frac{E[X^n]}{n!}

This implies that the nthn^{th} derivative of M(t)M(t) evaluated at t=0t=0 is exactly E[Xn]E[X^n].

The Moment Generating Function of a Sum of Random Variables

MX+Y(t)=MX(t)MY(t)M_{X+Y}(t)=M_{X}(t)\cdot M_{Y}(t) if XX and YY are independent.

Details

Let XX and YY be independent random vaiables, then

MX+Y(t)=E[eXt+Yt]=E[eXteXt]=E[eXt]E[eXt]=MX(t)MY(t)M_{X+Y}(t)=E[e^{Xt+Yt}]=E[e^{Xt}e^{Xt}]=E[e^{Xt}]E[e^{Xt}]=M_{X}(t)M_{Y}(t)

Uniqueness of the Moment Generating Function

Moment generating functions (m.g.f.) uniquely determine the probability distribution function for random variables. Thus, if two random variables have the same moment-generating function, then they must also have the same distribution.

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Multivariate Calculus/index.html b/Vectors to Some Regression Topics/Multivariate Calculus/index.html index 75a0116..c2528c9 100644 --- a/Vectors to Some Regression Topics/Multivariate Calculus/index.html +++ b/Vectors to Some Regression Topics/Multivariate Calculus/index.html @@ -4,13 +4,13 @@ Multivariate Calculus | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Multivariate Calculus/read/index.html b/Vectors to Some Regression Topics/Multivariate Calculus/read/index.html index 13027d4..4711946 100644 --- a/Vectors to Some Regression Topics/Multivariate Calculus/read/index.html +++ b/Vectors to Some Regression Topics/Multivariate Calculus/read/index.html @@ -4,7 +4,7 @@ Multivariate Calculus | Computing and Calculus for Applied Statistics - + @@ -19,7 +19,7 @@ Then if URnU \subseteq \mathbb{R}^n is a subset,

g(U)f(x)dx=U(g(y))Jdy\displaystyle\int_{g(U)} f(\mathbf {x})d\mathbf {x} = \displaystyle\int_{U}({g}(\mathbf {y}))|J|d\mathbf {y}

where JJ is the Jacobian matrix and J|J| is the absolute value of it's determinant.

J=[g1y1g1y2g1yngny1gny2gnyn]=[g1gn]J = \left| \begin{bmatrix} \displaystyle\frac{\partial g_1}{\partial y_1} & \displaystyle\frac{\partial g_1}{\partial y_2} & \cdots &\displaystyle\frac{\partial g_1}{\partial y_n} \\ \vdots & \vdots & \cdots & \vdots \\ \displaystyle\frac{\partial g_n}{\partial y_1} & \displaystyle\frac{\partial g_n}{\partial y_2} & \cdots & \displaystyle\frac{\partial g_n}{\partial y_n} \end{bmatrix}\right| = \left|\begin{bmatrix} \nabla g_1 \\ \vdots \\ \nabla g_n \end{bmatrix} \right|

Details

Suppose ff is a continuous function f:RnRf: \mathbb{R}^n \rightarrow \mathbb{R} and g:RnRng: \mathbb{R}^n \rightarrow \mathbb{R}^n is a one-to-one function with continuous partial derivatives. Then if URnU \subseteq \mathbb{R}^n is a subset,

g(U)f(x)dx=U(g(y))Jdy\displaystyle\int_{g(U)} f(\mathbf {x})d\mathbf {x} = \displaystyle\int_{U}({g}(\mathbf {y}))|J|d\mathbf {y}

where JJ is the Jacobian determinant and |J| is its absolute value.

J=[g1y1g1y2g1yngny1gny2gnyn]=[g1gn]J = \left| \begin{bmatrix} \displaystyle\frac{\partial g_1}{\partial y_1} & \displaystyle\frac{\partial g_1}{\partial y_2} & \cdots &\displaystyle\frac{\partial g_1}{\partial y_n} \\ \vdots & \vdots & \cdots & \vdots \\ \displaystyle\frac{\partial g_n}{\partial y_1} & \displaystyle\frac{\partial g_n}{\partial y_2} & \cdots & \displaystyle\frac{\partial g_n}{\partial y_n} \end{bmatrix}\right| = \left|\begin{bmatrix} \nabla g_1 \\ \vdots \\ \nabla g_n \end{bmatrix} \right|

Similar calculations as in 28.4 give us that if XX is a continuous multivariate random variable, X=(X1,,Xn)X = (X_1, \ldots, X_n)^\prime with density ff and Y=h(X)\mathbf{Y} = \mathbf{h} (\mathbf{X}), where h\mathbf{h} is one-to-one with inverse g=h1\mathbf g= \mathbf{h}^{-1}. So, X=g(Y)\mathbf{X} = g(\mathbf{Y}), then the density of Y\mathbf{Y} is given by;

fY(y)=f(g(y))Jf_Y(\mathbf y) = f (g(\mathbf y)) |J|

Examples

Example

If Y=AX\mathbf{Y} = A \mathbf X where AA is an n×nn \times n matrix with det(A)0\det(A)\neq0 and X=(X1,,Xn)X = (X_1, \ldots, X_n)^\prime are independent and identically distributed random variables, then we have the following results.

The joint density of X1XnX_1 \cdots X_n is the product of the individual (marginal) densities,

fX(x)=f(x1)f(x2)f(xn)f_X(\mathbf x)= f(x_1) f(x_2) \cdots f(x_n)

The matrix of partial derivatives corresponds to gy\displaystyle\frac{\partial g}{\partial y} where X=g(Y)\mathbf X = \mathbf g(\mathbf{Y}), i.e. these are the derivatives of the transformation: X=g(Y)=A1Y\mathbf X = g (\mathbf{Y}) = A^{-1}\mathbf{Y}, or X=BY\mathbf X = B \mathbf{Y} where B=A1B = A^{-1}

But if X=BY\mathbf X = B \mathbf{Y}, then

Xi=bi1y1+bi2y2+bijyjbinynX_i = b_{i1}y_1 + b_{i2}y_2 + \cdots b_{ij}y_j\cdots b_{in}y_n

So, xiyj=bij\displaystyle\frac{\partial x_i}{\partial y_j} = b_{ij} and thus,

J=dxdy=B=A1=1AJ =\left|\displaystyle\frac{\partial d\mathbf x}{\partial d\mathbf y}\right| = |B| = |A^{-1}| = \displaystyle\frac {1}{|A|}

The density of Y\mathbf{Y} is therefore;

fY(y)=fX(g(y))J=fX(A1y)A1f_Y(\mathbf{y}) = f_X(g(\mathbf{y})) |J| = f_X(A^{-1}\mathbf{y}) |A^{-1}|

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/index.html b/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/index.html index f94b9ae..7646aef 100644 --- a/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/index.html +++ b/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/index.html @@ -4,13 +4,13 @@ Notes and Examples, The Linear Model | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read/index.html b/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read/index.html index 66b3566..2b9722b 100644 --- a/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read/index.html +++ b/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read/index.html @@ -4,7 +4,7 @@ Notes and Examples: The Linear Model | Computing and Calculus for Applied Statistics - + @@ -47,7 +47,7 @@ May also need u^\mathbf{\hat{u}}, this is normally done using BLUP.

Details

Recall that if WW is a random variable vector with EW=μEW = \mu and VW=ΣVW= \boldsymbol{\Sigma} then

E[AW]=AμE[AW] = A\mathbf{\mu}

Var[AW]=AΣAVar[AW]= A \boldsymbol{\Sigma} A'

In particular, if WN(μ,Σ)W \sim N(\mu, \boldsymbol{\Sigma}) then AWN(Aμ,AΣA)AW \sim N(A\mu, A \boldsymbol{\Sigma} A')

Now consider the lmm with

y=Xβ+Zu+ϵy = X \boldsymbol{\beta} + Zu + \boldsymbol{\epsilon}

where

u=(u1,,um)u = (u_1, \ldots, u_m)'

ϵ=(ϵ1,,ϵm)\boldsymbol{\epsilon} = (\epsilon_1, \ldots, \epsilon_m)'

and the random variables UiN(0,σA2)U_i \sim N(0, \sigma^2_A), ϵiN(0,σ2)\epsilon_i \sim N(0, \sigma^2) are all independent so that uN(0,σA2I)u \sim N(0, \sigma^2_A I) and ϵN(0,σ2I)\boldsymbol{\epsilon} \sim N(\mathbf{0}, \sigma^2 I).

Then Ey=XβEy = X\boldsymbol{\beta} and

Vy=Σy=Var[Zu+Var[ϵ]]=Z(σA2I)Z+σ2I=σA2ZZ+σ2I\begin{aligned} Vy &= \boldsymbol{\Sigma}_y \\ &= Var[Zu+Var[\boldsymbol{\epsilon}]] \\ &= Z(\sigma^2_A I) Z' + \sigma^2 I \\ &= \sigma^2_A Z Z' + \sigma^2 I \end{aligned}

and hence yN(Xβ,σA2ZZ+σ2I)y \sim N(X\boldsymbol{\beta},\sigma^2_A Z Z' + \sigma^2 I )

Therefore the likelihood function for the unknown parameters L(β,σA2,σ2)L(\boldsymbol{\beta},\sigma^2_A, \sigma^2) is

=1(2π)n/2Σyn/2e1/2(yXβ)Σy1(yXβ)= \displaystyle\frac{1}{(2\pi)^{n/2} \left| \boldsymbol{\Sigma}_y \right| ^{n/2}} e^{-1/2 (\mathbf{y}-X\boldsymbol{\beta})' \boldsymbol{\Sigma}^{-1}_y (y-X\boldsymbol{\beta})}

where Σy=σA2ZZ+σ2I\boldsymbol{\Sigma}_y = \sigma^2_A Z Z' + \sigma^2 I. Maximizing LL over β,σA2,σ2\boldsymbol{\beta},\sigma^2_A, \sigma^2 gives the variance components and the fixed effects. May also need u^\hat{u}, which is normally done using BLUP.

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Overview/index.html b/Vectors to Some Regression Topics/Overview/index.html index 645b12f..8615243 100644 --- a/Vectors to Some Regression Topics/Overview/index.html +++ b/Vectors to Some Regression Topics/Overview/index.html @@ -4,13 +4,13 @@ Overview | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Overview/read/index.html b/Vectors to Some Regression Topics/Overview/read/index.html index 66ceb0f..6b2c381 100644 --- a/Vectors to Some Regression Topics/Overview/read/index.html +++ b/Vectors to Some Regression Topics/Overview/read/index.html @@ -4,13 +4,13 @@ Vectors to Some Regression Topics | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Ranks and Determinants/index.html b/Vectors to Some Regression Topics/Ranks and Determinants/index.html index babce9b..1e95988 100644 --- a/Vectors to Some Regression Topics/Ranks and Determinants/index.html +++ b/Vectors to Some Regression Topics/Ranks and Determinants/index.html @@ -4,13 +4,13 @@ Ranks and Determinants | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Ranks and Determinants/read/index.html b/Vectors to Some Regression Topics/Ranks and Determinants/read/index.html index a5a2ad6..2bef4ae 100644 --- a/Vectors to Some Regression Topics/Ranks and Determinants/read/index.html +++ b/Vectors to Some Regression Topics/Ranks and Determinants/read/index.html @@ -4,7 +4,7 @@ Ranks and Determinants | Computing and Calculus for Applied Statistics - + @@ -14,7 +14,7 @@ Each such term can be written in the form a1j1a2j2a3j3anjna_{1j_1} \cdot a_{2j_2} \cdot a_{3j_3} \cdot \ldots \cdot a_{nj_n} where j1,,jnj_1, \ldots, j_n is a permutation of the integers 1,2,,n1,2, \ldots, n. Each permutation σ\sigma of the integers 1,2,,n1,2,\ldots,n can be performed by repeatedly interchanging two numbers.

Definition

A signed elementary product is an elementary product with a positive sign if the number of interchanges in the permutation is even but negative otherwise.

The determinant of AA, det(A)\det(A) or A\vert A \vert, is the sum of all signed elementary products.

Examples

Example
A=[a11a12a21a22]A = \begin{bmatrix} a_{11} & a_{12} \\ a_{21} & a_{22} \end{bmatrix}

then

A=a11a22a12a21\vert A \vert = a_{1\underline{1}} a_{2\underline{2}} - a_{1\underline{2}}a_{2\underline{1}}.

Example

If

A=[a11a12a13a21a22a23a31a32a33]A = \begin{bmatrix} a_{11} & a_{12} & a_{13} \\ a_{21} & a_{22} & a_{23} \\ a_{31} & a_{32} & a_{33} \end{bmatrix}

Then A\vert A \vert

= a11a22a33a_{11} a_{22} a_{33} This is the identity permutation and has positive sign

a11a23a32-a_{11} a_{23} a_{32} This is the permutation that only interchanges 22 and 33

a12a21a33-a_{12} a_{21} a_{33} Only one interchange

+a12a23a31+a_{12} a_{23} a_{31} Two interchanges

+a13a21a32+a_{13} a_{21} a_{32} Two interchanges

a13a22a31-a_{13} a_{22} a_{31} Three interchanges

Example
A=[1110]A = \begin{bmatrix} 1 & 1 \\ 1 & 0 \end{bmatrix}

A=1\vert A \vert = -1

Example
A=[100020003]A = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 2 & 0 \\ 0 & 0 & 3 \end{bmatrix}

A=123=6\vert A \vert = 1 \cdot 2 \cdot 3 = 6

Example
A=[100020030]A = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 2 & 0 \\ 0 & 3 & 0 \end{bmatrix}

A=0\vert A \vert = 0

Example
A=[100002030]A = \begin{bmatrix} 1 & 0 & 0 \\ 0 & 0 & 2 \\ 0 & 3 & 0 \end{bmatrix}

A=6\vert A \vert = -6

Example
A=[2121]A = \begin{bmatrix} 2 & 1 \\ 2 & 1 \end{bmatrix}

A=0\vert A \vert = 0

Example
A=[101011112]A = \begin{bmatrix} 1 & 0 & 1 \\ 0 & 1 & 1 \\ 1 & 1 & 2 \end{bmatrix}

A=0\vert A \vert = 0

Ranks, Inverses and Determinants

The following statements are true for an n×nn\times n matrix AA :

Details

Suppose AA is an n×nn\times n matrix. Then the following are truths:

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/index.html b/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/index.html index 275c6d8..48b5f3f 100644 --- a/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/index.html +++ b/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/index.html @@ -4,13 +4,13 @@ Some Notes on Matrices and Linear Operators | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read/index.html b/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read/index.html index 126410e..356319b 100644 --- a/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read/index.html +++ b/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read/index.html @@ -4,7 +4,7 @@ Some Notes on Matrices and Linear Operators | Computing and Calculus for Applied Statistics - + @@ -26,7 +26,7 @@ Writing

Y=(Y1Yn)Y = \left( \begin{array}{ccc} Y_1 \\ \vdots \\ Y_n \end{array} \right)

consider the linear combination aYa'Y and bYb'Y.

Details

The covariance between random variables UU and WW is defined by

Cov(U,W)=E[(Uμu)(Wμw)]Cov(U,W)= E[(U-\mu_u)(W-\mu_w)]

where μu=E[U]\mu_u=E[U] and μw=E[W]\mu_w=E[W]. Now, let U=aY=YiaiU=a'Y=\displaystyle\sum Y_ia_i and W=bY=YibiW=b'Y=\displaystyle\sum Y_ib_i, where Y1,,YnY_1,\ldots,Y_n are independent identically distributed with mean μ\mu and variance σ2\sigma^2, then we get

Cov(U,W)=E[(aYΣaμ)(bYΣbμ)]Cov(U,W)= E[(a'Y-\Sigma a_\mu)(b'Y-\Sigma b\mu)]

=E[(ΣaiYiΣaiμ)(ΣbjYjΣbjμ)]= E[(\Sigma a_iY_i -\Sigma a_i\mu)(\Sigma b_jY_j -\Sigma b_j\mu )]

and after some tedious (but basic) calculations we obtain

Cov(U,W)=σ2abCov(U,W)=\sigma^2a\cdot b

Examples

Example

If Y1Y_1 and Y2Y_2 are independent identically distributed, then

Cov(Y1+Y2,Y1Y2)=Cov((1,1)(Y1Y2),(1,1)(Y1Y2))Cov(Y_1+Y_2, Y_1-Y_2) = Cov \left( (1,1) \begin{pmatrix} Y_1 \\ Y_2 \end{pmatrix}, (1,-1) \begin{pmatrix} Y_1 \\ Y_2 \end{pmatrix} \right)
=(1,1)(11)σ2=0= (1,1) \begin{pmatrix} 1 \\ -1 \end{pmatrix} \sigma^2 = 0

and in general, Cov(aY,bY)=0Cov(\underline{a}'\underline{Y}, \underline{b}'\underline{Y})=0 if ab\underline{a}\bot \underline{b} and Y1,,YnY_1,\ldots,Y_n are independent.

Random Vectors

Y=(Y1,,Yn)Y= (Y_1, \ldots, Y_n) is a random vector if Y1,,YnY_1, \ldots, Y_n are random variables.

Details

Definition

If E[Yi]=μiE[Y_i] = \mu_i then we typically write

E[Y]=(μ1μn)=μE[Y] = \left( \begin{array}{ccc} \mu_1 \\ \vdots \\ \mu_n \end{array} \right) = \mu

If Cov(Yi,Yj)=σijCov(Y_i, Y_j) = \sigma_{ij} and Var[Yi]=σii=σi2Var[Y_i]=\sigma_{ii} = \sigma_i^2, then we define the matrix

Σ=(σij)\boldsymbol{\Sigma} = (\sigma_{ij})

containing the variances and covariances. We call this matrix the covariance matrix of YY, typically denoted Var[Y]=ΣVar[Y] = \boldsymbol{\Sigma} or CoVar[Y]=ΣCoVar[Y] = \boldsymbol{\Sigma}.

Examples

Example

If Yi,,YnY_i, \ldots, Y_n are independent identically distributed, EYi=μEY_i = \mu, VYi=σ2VY_i = \sigma^2, a,bRna,b\in\mathbb{R}^n and U=aYU=a'Y, W=bYW=b'Y, and

T=[UW]T = \begin{bmatrix} U \\ W \end{bmatrix}

then

ET=[ΣaiμΣbiμ]ET = \begin{bmatrix} \Sigma a_i \mu \\ \Sigma b_i \mu \end{bmatrix}
VT=Σ=σ2[Σai2ΣaibiΣaibiΣbi2]VT = \boldsymbol{\Sigma} = \sigma^2 \begin{bmatrix} \Sigma a_i^2 & \Sigma a_i b_i \\ \Sigma a_ib_i & \Sigma b_i^2 \end{bmatrix}
Example

If Y\underline{Y} is a random vector with mean μ\boldsymbol{\mu} and variance-covariance matrix Σ\boldsymbol{\Sigma}, then

E[aY]=aμE[a'Y] = a'\mu

and

Var[aY]=aΣaVar[a'Y] = a' \boldsymbol{\Sigma} a

Transforming Random Vectors

Suppose

Y=(Y1Yn)\mathbf{Y} = \left( \begin{array}{c} Y_1 \\ \vdots \\ Y_n \end{array} \right)

is a random vector with E[Y]=μE[\mathbf{Y}] = \mu and Var[Y]=ΣVar[\mathbf{Y}] = \boldsymbol{\Sigma} where the variance-covariance matrix

Σ=σ2I\boldsymbol{\Sigma} = \sigma^2 I

Details

Note that if Y1,,YnY_1, \ldots, Y_n are independent with common variance σ2\sigma^2 then

Σ=[σ12σ12σ13σ1nσ21σ22σ23σ2nσ31σ32σ32σ3nσn1σn2σn3σn2]=[σ12000σ220σ320000σn2]=σ2[100010100001]=σ2I\boldsymbol{\Sigma} = \left[ \begin{array}{ccccc} \sigma_{1}^{2} & \sigma_{12} & \sigma_{13} & \ldots & \sigma_{1n} \\ \sigma_{21} & \sigma_2^{2} & \sigma_{23} & \ldots & \sigma_{2n} \\ \sigma_{31} &\sigma_{32} &\sigma_3^{2} & \ldots & \sigma_{3n} \\ \vdots & \vdots & \vdots & \ddots & \vdots \\ \sigma_{n1} & \sigma_{n2} & \sigma_{n3} & \ldots & \sigma_n^{2} \end{array} \right] = \left[ \begin{array}{ccccc} \sigma_{1}^{2} & 0 & \ldots & \ldots & 0 \\ 0 & \sigma_2^{2} & \ddots & 0 & \vdots \\ \vdots & \ddots &\sigma_3^{2} & \ddots & \vdots \\ \vdots & 0 & \ddots & \ddots & 0 \\ 0 & \ldots & \ldots & 0 & \sigma_n^{2} \end{array} \right] = \sigma^2 \left[ \begin{array}{ccccc} 1 & 0 & \ldots & \ldots & 0 \\ 0 & 1 & \ddots & 0 & \vdots \\ \vdots & \ddots & 1 & \ddots & \vdots \\ \vdots & 0 & \ddots & \ddots & 0 \\ 0 & \ldots & \ldots & 0 & 1 \end{array} \right] = \sigma^2 I

If AA is an m×nm \times n matrix, then

E[AY]=AμE[A\mathbf{Y}] = A \mathbf{\mu}

and

Var[AY]=AΣAVar[A\mathbf{Y}] = A \boldsymbol{\Sigma} A'

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Some Regression Topics/index.html b/Vectors to Some Regression Topics/Some Regression Topics/index.html index cf34189..b6b7db4 100644 --- a/Vectors to Some Regression Topics/Some Regression Topics/index.html +++ b/Vectors to Some Regression Topics/Some Regression Topics/index.html @@ -4,13 +4,13 @@ Some Regression Topics | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Some Regression Topics/read/index.html b/Vectors to Some Regression Topics/Some Regression Topics/read/index.html index cbd0823..7c42ce6 100644 --- a/Vectors to Some Regression Topics/Some Regression Topics/read/index.html +++ b/Vectors to Some Regression Topics/Some Regression Topics/read/index.html @@ -4,7 +4,7 @@ Some Regression Topics | Computing and Calculus for Applied Statistics - + @@ -12,7 +12,7 @@
Skip to main content

Some Regression Topics

Poisson Regression

Data yiy_i are from a Poisson distribution with mean μi\mu_i and lnμi=β1+β2xi\ln{\mu_i}=\beta_1+\beta_2 x_i. A likelihood function can be written and the parameters can be estimated using maximum likelihood.

The Generalized Linear Model (GLM)

Data yiy_i are from a distribution within the exponential family, with mean μi\mu_i and g(μi)=xiβg(\mu_i)=\textbf{x}'_i\boldsymbol{\beta} for some link function, gg. A likelihood function can now be written and the parameters can be estimated using maximum likelihood.

Details

Data yiy_i are from a distribution within the exponential family, with mean μi\mu_i and g(μi)=xiβg(\mu_i)=\textbf{x}'_i\boldsymbol{\beta} for some link function, gg.

The exponential family includes distributions such as the Gaussian, binomial, Poisson, and gamma (and thus exponential and chi-squared)

The link functions are typically

  • identity (with the Gaussian)

  • log (with the Poisson and the gamma)

  • logistic (with the binomial)

A likelihood function can be set up for each of these models and the parameters can be estimated using maximum likelihood.

The glm package in R has options to estimate parameters in these models.

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/The Gamma Distribution/index.html b/Vectors to Some Regression Topics/The Gamma Distribution/index.html index 28a8287..4198a7f 100644 --- a/Vectors to Some Regression Topics/The Gamma Distribution/index.html +++ b/Vectors to Some Regression Topics/The Gamma Distribution/index.html @@ -4,13 +4,13 @@ The Gamma Distribution | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/The Gamma Distribution/read/index.html b/Vectors to Some Regression Topics/The Gamma Distribution/read/index.html index 768d06d..81035d3 100644 --- a/Vectors to Some Regression Topics/The Gamma Distribution/read/index.html +++ b/Vectors to Some Regression Topics/The Gamma Distribution/read/index.html @@ -4,7 +4,7 @@ The Gamma Distribution | Computing and Calculus for Applied Statistics - + @@ -126,7 +126,7 @@ M834 80h400000v40h-400000z">.

Hence we have shown the χ2\chi^2 distribution on 1 df to be G(α=v2,β=2)G (\alpha = \displaystyle\frac {v}{2}, \beta = 2) when v=1v = 1.

The Sum of Gamma Variables

In the general case if X1XnG(α,β)X_1 \ldots X_n \sim G (\alpha, \beta) are independent identically distributed then X1+X2+XnG(nα,β)X_1 + X_2 + \ldots X_n \sim G (n\alpha, \beta). In particular, if X1,X2,,Xvχ2X_1, X_2, \ldots, X_v \sim \chi^2 independent identically distributed then Σi=1vXiχv2\Sigma_{i=1}^v X_i \sim \chi^2_{v}.

Details

If XX and YY are independent identically distributed G(α,β)G (\alpha, \beta), then

MX(t)=MY(t)=1(1βt)αM_X (t) = M_Y (t) = \displaystyle\frac {1} {(1- \beta t)^\alpha}

and

MX+Y(t)=MX(t)MY(t)=1(1βt)2αM_{X+Y} (t) = M_X (t) M_Y (t) = \displaystyle\frac {1} {(1- \beta t)^{2 \alpha}}

So

X+YG(2α,β)X + Y \sim G (2\alpha, \beta)

In the general case, if X1XnG(α,β)X_1 \ldots X_n \sim G (\alpha, \beta) are independent identically distributed then X1+X2+XnG(nα,β)X_1 + X_2 + \ldots X_n \sim G (n\alpha, \beta). In particular, if X1,X2,,Xvχ2X_1, X_2, \ldots, X_v \sim \chi^2 independent identically distributed, then i=1vXiχv2\displaystyle\sum_{i=1}^v X_i \sim \chi^2_{v}.

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/index.html b/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/index.html index a09b5b8..20cef74 100644 --- a/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/index.html +++ b/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/index.html @@ -4,13 +4,13 @@ The Multivariate Normal Distribution and Related Topics | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read/index.html b/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read/index.html index a916011..db920de 100644 --- a/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read/index.html +++ b/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read/index.html @@ -4,7 +4,7 @@ The Multivariate Normal Distribution and Related Topics | Computing and Calculus for Applied Statistics - + @@ -69,7 +69,7 @@ Then it is easy to derive the density of CYCY which also factors nicely into a product, only one of which contains AYAY, which gives the density for AYAY.

The OLS Estimator

Suppose YN(Xβ,σ2I)Y \sim N(X \beta,\sigma^2 I). The ordinary least squares estimator, when the n×pn \times p matrix is of full rank, pp, where pnp\leq n, is:

β^=(XX)1XY\hat{\beta} = (X'X)^{-1}X'Y

The random variable which describes the process giving the data and estimate is:

b=(XX)1XYb = (X'X)^{-1}X'Y

It follows that

β^N(β,σ2(XX)1)\hat{\beta} \sim N(\beta,\sigma^{2}(X'X)^{-1})

Details

Suppose YN(Xβ,σ2I)Y \sim N(X \beta,\sigma^2I). The ordinary least squares estimator, when the n×pn \times p matrix is of full rank, pp, is:

β^=(XX)1XY\hat{\beta} = (X'X)^{-1}X'Y

The equation below is the random variable which describes the process giving the data and estimate:

b=(XX)1XYb = (X'X)^{-1}X'Y

If B=(XX)1XB = (X'X)^{-1}X', then we know that

BYN(BXβ,B(σ2I)B)BY \sim N(B X \beta, B(\sigma^{2}I)B')

Note that

BXβ=(XX)1XXβ=βBX\beta = (X'X)^{-1}X'X\beta=\beta

and

B(σ2I)B=σ(XX)1X[(XX)1X]=σ2(XX)1XX(XX)1=σ2(XX)1\begin{aligned} B(\sigma^{2}I)B' &= \sigma^{}(X'X)^{-1}X'[(X'X)^{-1}X']' \\ &= \sigma^{2}(X'X)^{-1}X'X(X'X)^{-1} \\ &= \sigma^{2}(X'X)^{-1} \end{aligned}

It follows that

β^N(β,σ2(XX)1)\hat{\beta} \sim N(\beta,\sigma^{2}(X'X)^{-1})

Note

The earlier results regarding the multivariate Gaussian distribution also show that the vector of parameter estimates will be Gaussian even if the original YY-variables are not independent.

- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Vector and Matrix Operations/index.html b/Vectors to Some Regression Topics/Vector and Matrix Operations/index.html index 8ea3012..5792e85 100644 --- a/Vectors to Some Regression Topics/Vector and Matrix Operations/index.html +++ b/Vectors to Some Regression Topics/Vector and Matrix Operations/index.html @@ -4,13 +4,13 @@ Vector and Matrix Operations | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/Vector and Matrix Operations/read/index.html b/Vectors to Some Regression Topics/Vector and Matrix Operations/read/index.html index 0ebcc9c..3047aa2 100644 --- a/Vectors to Some Regression Topics/Vector and Matrix Operations/read/index.html +++ b/Vectors to Some Regression Topics/Vector and Matrix Operations/read/index.html @@ -4,7 +4,7 @@ Vectors and Matrix Operations | Computing and Calculus for Applied Statistics - + @@ -17,7 +17,7 @@ Given the general element cijc_{ij} of n×mn \times m matrix, C=ABC=AB is found by pairing the ithi^{th} row of CC with the jthj^{th} column of BB, and computing the sum of products of the paired terms.

Examples

Example: Matrices in R
> A <- matrix(c(1,3,5,2,4,6),3,2)

> A
[,1] [,2]
[1,] 1 2
[2,] 3 4
[3,] 5 6

> B <- matrix(c(1,1,2,3),2,2)

> B
[,1] [,2]
[1,] 1 2
[2,] 1 3

> A%*%B
[,1] [,2]
[1,] 3 8
[2,] 7 18
[3,] 11 28

More on Matrix Multiplication

Let AA, BB, and CC be m×nm\times n, n×ln\times l, and l×pl\times p matrices, respectively. Then we have

(AB)C=A(BC)(AB)C=A(BC)

In general, matrix multiplication is not commutative, that is ABBAAB\neq BA.

We also have

(AB)=BA(AB)'=B'A'

In particular, (Av)(Av)=vAAv(Av)'(Av)=v'A'Av, when vv is a n×1n\times1 column vector

More obvious are the rules

  1. A+(B+C)=(A+B)+CA+(B+C)=(A+B)+C

  2. k(A+B)=kA+kBk(A+B)=kA+kB

  3. A(B+C)=AB+ACA(B+C)=AB+AC

where kRk\in\mathbb{R} and when the dimensions of the matrices fit.

Linear Equations

Details

General linear equations can be written in the form Ax=bAx=b.

Examples

Example

The set of equations

2x+3y=42x+3y=4

3x+y=23x+y=2

can be written in matrix formulation as

[2331][xy]=[42]\begin{bmatrix} 2 & 3 \\ 3 & 1 \end{bmatrix} \begin{bmatrix} x \\ y \end{bmatrix} = \begin{bmatrix} 4 \\ 2 \end{bmatrix}

i.e. Ax=bA\underline{x} = \underline{b} for an appropriate choice of A,xA, \underline{x} and b\underline{b}.

The Unit Matrix

The n×nn\times n matrix

I=[10001000001]I = \left[ \begin{array}{cccc} 1 & 0 & \ldots & 0 \\ 0 & 1 & 0 & \vdots \\ \vdots & 0 & \dots & 0 \\ 0 & \ldots & 0 & 1 \end{array} \right]

is the identity matrix. This is because if a matrix AA is n×nn\times n

then AI=AA I = A and IA=AI A = A

The Inverse of a Matrix

If AA is an n×nn \times n matrix and BB is a matrix such that

BA=AB=IBA = AB = I

then BB is said to be the inverse of AA, written

B=A1B = A ^{-1}

Note that if AA is an n×nn \times n matrix for which an inverse exists, then the equation Ax=bAx = b can be solved and the solution is x=A1bx = A^{-1} b.

Examples

Example

If matrix AA is:

[2331]\begin{bmatrix} 2 & 3 \\ 3 & 1 \end{bmatrix}

then A1A ^{-1} is:

[17373727]\begin{bmatrix} \displaystyle\frac{-1}{7} & \displaystyle\frac{3}{7} \\ \displaystyle\frac{3}{7} & \displaystyle\frac{-2}{7} \end{bmatrix}
- + \ No newline at end of file diff --git a/Vectors to Some Regression Topics/index.html b/Vectors to Some Regression Topics/index.html index a2788e8..3a3a698 100644 --- a/Vectors to Some Regression Topics/index.html +++ b/Vectors to Some Regression Topics/index.html @@ -4,13 +4,13 @@ Vectors to Some Regression Topics | Computing and Calculus for Applied Statistics - +
Skip to main content
- + \ No newline at end of file diff --git a/assets/js/23374ca6.95001b49.js b/assets/js/23374ca6.95001b49.js new file mode 100644 index 0000000..92ba740 --- /dev/null +++ b/assets/js/23374ca6.95001b49.js @@ -0,0 +1 @@ +"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[8421],{3905:(e,t,n)=>{n.d(t,{Zo:()=>p,kt:()=>f});var r=n(7294);function a(e,t,n){return t in e?Object.defineProperty(e,t,{value:n,enumerable:!0,configurable:!0,writable:!0}):e[t]=n,e}function o(e,t){var n=Object.keys(e);if(Object.getOwnPropertySymbols){var r=Object.getOwnPropertySymbols(e);t&&(r=r.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),n.push.apply(n,r)}return n}function i(e){for(var t=1;t=0||(a[n]=e[n]);return a}(e,t);if(Object.getOwnPropertySymbols){var o=Object.getOwnPropertySymbols(e);for(r=0;r=0||Object.prototype.propertyIsEnumerable.call(e,n)&&(a[n]=e[n])}return a}var s=r.createContext({}),u=function(e){var t=r.useContext(s),n=t;return e&&(n="function"==typeof e?e(t):i(i({},t),e)),n},p=function(e){var t=u(e.components);return r.createElement(s.Provider,{value:t},e.children)},l="mdxType",d={inlineCode:"code",wrapper:function(e){var t=e.children;return r.createElement(r.Fragment,{},t)}},m=r.forwardRef((function(e,t){var n=e.components,a=e.mdxType,o=e.originalType,s=e.parentName,p=c(e,["components","mdxType","originalType","parentName"]),l=u(n),m=a,f=l["".concat(s,".").concat(m)]||l[m]||d[m]||o;return n?r.createElement(f,i(i({ref:t},p),{},{components:n})):r.createElement(f,i({ref:t},p))}));function f(e,t){var n=arguments,a=t&&t.mdxType;if("string"==typeof e||a){var o=n.length,i=new Array(o);i[0]=m;var c={};for(var s in t)hasOwnProperty.call(t,s)&&(c[s]=t[s]);c.originalType=e,c[l]="string"==typeof e?e:a,i[1]=c;for(var u=2;u{n.r(t),n.d(t,{assets:()=>s,contentTitle:()=>i,default:()=>d,frontMatter:()=>o,metadata:()=>c,toc:()=>u});var r=n(7462),a=(n(7294),n(3905));const o={slug:"/"},i="Introduction",c={unversionedId:"README",id:"README",title:"Introduction",description:"This is a landing page for the CCAS (Computing and Calculus for Advanced Statistics) course.",source:"@site/docs/README.md",sourceDirName:".",slug:"/",permalink:"/ccas/",draft:!1,tags:[],version:"current",frontMatter:{slug:"/"},sidebar:"sidebar",next:{title:"Numbers to Indices",permalink:"/ccas/Numbers to Indices/"}},s={},u=[{value:"Licensing and Contributing",id:"licensing-and-contributing",level:2}],p={toc:u},l="wrapper";function d(e){let{components:t,...n}=e;return(0,a.kt)(l,(0,r.Z)({},p,n,{components:t,mdxType:"MDXLayout"}),(0,a.kt)("h1",{id:"introduction"},"Introduction"),(0,a.kt)("p",null,"This is a landing page for the ",(0,a.kt)("inlineCode",{parentName:"p"},"CCAS")," (Computing and Calculus for Advanced Statistics) course.\nHere you will find all the documentation needed for this course.\nIt is meant to be used by teachers, trainers, students and hobbyists who want to learn about topics on calculus and statistics."),(0,a.kt)("p",null,"The course is structured in chapters, each with their own sections.\nEach section presents a particular topic, rich in examples.\nThere is a sizeable focus on the use of the ",(0,a.kt)("a",{parentName:"p",href:"https://www.r-project.org/"},"R programming language for statistical computing")," for demonstrating the topics in a practical manner."),(0,a.kt)("p",null,"Chapters are:"),(0,a.kt)("ul",null,(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("a",{parentName:"li",href:"Numbers%20to%20Indices/Overview/read"},"Numbers to Indices")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("a",{parentName:"li",href:"/ccas/Functions/Overview/read"},"Functions")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("a",{parentName:"li",href:"Multivariate%20to%20Power/Overview/read"},"Multivariate to Power")),(0,a.kt)("li",{parentName:"ul"},(0,a.kt)("a",{parentName:"li",href:"Vectors%20to%20Some%20Regression%20Topics/Overview/read"},"Vectors to Some Regression Topics"))),(0,a.kt)("h2",{id:"licensing-and-contributing"},"Licensing and Contributing"),(0,a.kt)("p",null,"The ",(0,a.kt)("inlineCode",{parentName:"p"},"CCAS")," contents are open educational resources (",(0,a.kt)("a",{parentName:"p",href:"https://en.wikipedia.org/wiki/Open_educational_resources"},"OER"),"), part of the ",(0,a.kt)("a",{parentName:"p",href:"https://open-education-hub.github.io/"},"Open Education Hub project"),";\nthey are hosted on ",(0,a.kt)("a",{parentName:"p",href:"https://github.com/open-education-hub/ccas"},"GitHub"),", licensed under ",(0,a.kt)("a",{parentName:"p",href:"https://creativecommons.org/licenses/by-sa/4.0/"},"CC BY-SA 4.0")," and ",(0,a.kt)("a",{parentName:"p",href:"https://opensource.org/licenses/BSD-3-Clause"},"BSD 3-Clause"),"."),(0,a.kt)("p",null,"If you find an issue or want to contribute, follow the ",(0,a.kt)("a",{parentName:"p",href:"https://github.com/open-education-hub/ccas/blob/main/CONTRIBUTING.md"},"contribution guidelines on GitHub"),"."))}d.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/23374ca6.ddbb2705.js b/assets/js/23374ca6.ddbb2705.js deleted file mode 100644 index c125803..0000000 --- a/assets/js/23374ca6.ddbb2705.js +++ /dev/null @@ -1 +0,0 @@ -"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[8421],{3905:(e,t,r)=>{r.d(t,{Zo:()=>u,kt:()=>m});var n=r(7294);function o(e,t,r){return t in e?Object.defineProperty(e,t,{value:r,enumerable:!0,configurable:!0,writable:!0}):e[t]=r,e}function a(e,t){var r=Object.keys(e);if(Object.getOwnPropertySymbols){var n=Object.getOwnPropertySymbols(e);t&&(n=n.filter((function(t){return Object.getOwnPropertyDescriptor(e,t).enumerable}))),r.push.apply(r,n)}return r}function c(e){for(var t=1;t=0||(o[r]=e[r]);return o}(e,t);if(Object.getOwnPropertySymbols){var a=Object.getOwnPropertySymbols(e);for(n=0;n=0||Object.prototype.propertyIsEnumerable.call(e,r)&&(o[r]=e[r])}return o}var l=n.createContext({}),s=function(e){var t=n.useContext(l),r=t;return e&&(r="function"==typeof e?e(t):c(c({},t),e)),r},u=function(e){var t=s(e.components);return n.createElement(l.Provider,{value:t},e.children)},p="mdxType",f={inlineCode:"code",wrapper:function(e){var t=e.children;return n.createElement(n.Fragment,{},t)}},d=n.forwardRef((function(e,t){var r=e.components,o=e.mdxType,a=e.originalType,l=e.parentName,u=i(e,["components","mdxType","originalType","parentName"]),p=s(r),d=o,m=p["".concat(l,".").concat(d)]||p[d]||f[d]||a;return r?n.createElement(m,c(c({ref:t},u),{},{components:r})):n.createElement(m,c({ref:t},u))}));function m(e,t){var r=arguments,o=t&&t.mdxType;if("string"==typeof e||o){var a=r.length,c=new Array(a);c[0]=d;var i={};for(var l in t)hasOwnProperty.call(t,l)&&(i[l]=t[l]);i.originalType=e,i[p]="string"==typeof e?e:o,c[1]=i;for(var s=2;s{r.r(t),r.d(t,{assets:()=>l,contentTitle:()=>c,default:()=>f,frontMatter:()=>a,metadata:()=>i,toc:()=>s});var n=r(7462),o=(r(7294),r(3905));const a={slug:"/"},c="Intro",i={unversionedId:"README",id:"README",title:"Intro",description:"This is a landing page for the CCAS course.",source:"@site/docs/README.md",sourceDirName:".",slug:"/",permalink:"/ccas/",draft:!1,tags:[],version:"current",frontMatter:{slug:"/"},sidebar:"sidebar",next:{title:"Numbers to Indices",permalink:"/ccas/Numbers to Indices/"}},l={},s=[],u={toc:s},p="wrapper";function f(e){let{components:t,...r}=e;return(0,o.kt)(p,(0,n.Z)({},u,r,{components:t,mdxType:"MDXLayout"}),(0,o.kt)("h1",{id:"intro"},"Intro"),(0,o.kt)("p",null,"This is a landing page for the CCAS course.\nHere you will find all the documentation needed for this course."),(0,o.kt)("p",null,"Best of luck!"))}f.isMDXComponent=!0}}]); \ No newline at end of file diff --git a/assets/js/935f2afb.ad395550.js b/assets/js/935f2afb.64cbbc6c.js similarity index 87% rename from assets/js/935f2afb.ad395550.js rename to assets/js/935f2afb.64cbbc6c.js index 1f8897e..41a1e39 100644 --- a/assets/js/935f2afb.ad395550.js +++ b/assets/js/935f2afb.64cbbc6c.js @@ -1 +1 @@ -"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"sidebar":[{"type":"link","label":"Introduction","href":"/ccas/","docId":"README"},{"type":"category","label":"Numbers to Indices","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Overview/read","docId":"Numbers to Indices/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Overview/"},{"type":"category","label":"Numbers, Arithmetic and Basic Algebra","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read","docId":"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/"},{"type":"category","label":"Data Vectors","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Data Vectors/read","docId":"Numbers to Indices/Data Vectors/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Data Vectors/"},{"type":"category","label":"More on Algebra","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/More on Algebra/read","docId":"Numbers to Indices/More on Algebra/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/More on Algebra/"},{"type":"category","label":"Discrete Random Variables and the Binomial Distribution","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read","docId":"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/"},{"type":"category","label":"Functions","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Functions/read","docId":"Numbers to Indices/Functions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Functions/"},{"type":"category","label":"Polynomials","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Polynomials/read","docId":"Numbers to Indices/Polynomials/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Polynomials/"},{"type":"category","label":"Simple Data Analysis in R","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Simple Data Analysis in R/read","docId":"Numbers to Indices/Simple Data Analysis in R/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Simple Data Analysis in R/"},{"type":"category","label":"Indices and the apply Commands in R","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Indices and the apply Commands in R/read","docId":"Numbers to Indices/Indices and the apply Commands in R/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Indices and the apply Commands in R/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/"},{"type":"category","label":"Functions","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Overview/read","docId":"Functions/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Overview/"},{"type":"category","label":"Functions of Functions and the Exponential Function","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Functions of Functions and the Exponential Function/read","docId":"Functions/Functions of Functions and the Exponential Function/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Functions of Functions and the Exponential Function/"},{"type":"category","label":"Inverse Functions and the Logarithm","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Inverse Functions and the Logarithm/read","docId":"Functions/Inverse Functions and the Logarithm/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Inverse Functions and the Logarithm/"},{"type":"category","label":"Continuity and Limits","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Continuity and Limits/read","docId":"Functions/Continuity and Limits/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Continuity and Limits/"},{"type":"category","label":"Sequences and Series","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Sequences and Series/read","docId":"Functions/Sequences and Series/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Sequences and Series/"},{"type":"category","label":"Slopes of Lines and Curves","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Slopes of Lines and Curves/read","docId":"Functions/Slopes of Lines and Curves/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Slopes of Lines and Curves/"},{"type":"category","label":"Derivatives","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Derivatives/read","docId":"Functions/Derivatives/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Derivatives/"},{"type":"category","label":"Applications of Differentiation","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Applications of Differentiation/read","docId":"Functions/Applications of Differentiation/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Applications of Differentiation/"},{"type":"category","label":"Integrals and Probability Density Functions","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Integrals and Probability Density Functions/read","docId":"Functions/Integrals and Probability Density Functions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Integrals and Probability Density Functions/"},{"type":"category","label":"Principles of Programming","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Principles of Programming/read","docId":"Functions/Principles of Programming/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Principles of Programming/"},{"type":"category","label":"The Central Limit Theorem and Related Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/The Central Limit Theorem and Related Topics/read","docId":"Functions/The Central Limit Theorem and Related Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/The Central Limit Theorem and Related Topics/"},{"type":"category","label":"Miscellanea","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Miscellanea/read","docId":"Functions/Miscellanea/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Miscellanea/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/"},{"type":"category","label":"Multivariate to Power","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Overview/read","docId":"Multivariate to Power/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Overview/"},{"type":"category","label":"Multivariate Probability Distributions","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Multivariate Probability Distributions/read","docId":"Multivariate to Power/Multivariate Probability Distributions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Multivariate Probability Distributions/"},{"type":"category","label":"Some Distributions Related to Normal","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Some Distributions Related to Normal/read","docId":"Multivariate to Power/Some Distributions Related to Normal/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Some Distributions Related to Normal/"},{"type":"category","label":"Estimation, Estimates and Estimators","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Estimation, Estimates and Estimators/read","docId":"Multivariate to Power/Estimation, Estimates and Estimators/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Estimation, Estimates and Estimators/"},{"type":"category","label":"Test of Hypothesis, P Values and Related Concepts","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read","docId":"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/"},{"type":"category","label":"Power and Sample Sizes","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Power and Sample Sizes/read","docId":"Multivariate to Power/Power and Sample Sizes/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Power and Sample Sizes/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/"},{"type":"category","label":"Vectors to Some Regression Topics","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Overview/read","docId":"Vectors to Some Regression Topics/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Overview/"},{"type":"category","label":"Vector and Matrix Operations","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Vector and Matrix Operations/read","docId":"Vectors to Some Regression Topics/Vector and Matrix Operations/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Vector and Matrix Operations/"},{"type":"category","label":"Some Notes on Matrices and Linear Operators","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read","docId":"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/"},{"type":"category","label":"Ranks and Determinants","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Ranks and Determinants/read","docId":"Vectors to Some Regression Topics/Ranks and Determinants/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Ranks and Determinants/"},{"type":"category","label":"Multivariate Calculus","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Multivariate Calculus/read","docId":"Vectors to Some Regression Topics/Multivariate Calculus/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Multivariate Calculus/"},{"type":"category","label":"The Multivariate Normal Distribution and Related Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read","docId":"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/"},{"type":"category","label":"Independence, Expectations and the Moment-Generating Function","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read","docId":"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/"},{"type":"category","label":"The Gamma Distribution","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/The Gamma Distribution/read","docId":"Vectors to Some Regression Topics/The Gamma Distribution/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/The Gamma Distribution/"},{"type":"category","label":"Notes and Examples, The Linear Model","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read","docId":"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/"},{"type":"category","label":"Some Regression Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Some Regression Topics/read","docId":"Vectors to Some Regression Topics/Some Regression Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Some Regression Topics/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/"}]},"docs":{"Functions/Applications of Differentiation/read":{"id":"Functions/Applications of Differentiation/read","title":"Applications of Differentiation","description":"Tracking the Sign of the Derivative","sidebar":"sidebar"},"Functions/Continuity and Limits/read":{"id":"Functions/Continuity and Limits/read","title":"Continuity and Limits","description":"The Concept of Continuity","sidebar":"sidebar"},"Functions/Derivatives/read":{"id":"Functions/Derivatives/read","title":"Derivatives","description":"The Derivative As a Limit","sidebar":"sidebar"},"Functions/Functions of Functions and the Exponential Function/read":{"id":"Functions/Functions of Functions and the Exponential Function/read","title":"Functions of Functions and the Exponential Function","description":"Exponential Growth and Decline","sidebar":"sidebar"},"Functions/Integrals and Probability Density Functions/read":{"id":"Functions/Integrals and Probability Density Functions/read","title":"Integrals and Probability Density Functions","description":"Area Under a Curve","sidebar":"sidebar"},"Functions/Inverse Functions and the Logarithm/read":{"id":"Functions/Inverse Functions and the Logarithm/read","title":"Inverse Functions and the Logarithm","description":"Inverse Function","sidebar":"sidebar"},"Functions/Miscellanea/read":{"id":"Functions/Miscellanea/read","title":"Miscellanea","description":"Simple Probabilities In R","sidebar":"sidebar"},"Functions/Overview/read":{"id":"Functions/Overview/read","title":"Functions of Functions to Miscellanea","description":"1. Functions of Functions and the Exponential Function","sidebar":"sidebar"},"Functions/Principles of Programming/read":{"id":"Functions/Principles of Programming/read","title":"Principles of Programming","description":"Modularity","sidebar":"sidebar"},"Functions/Sequences and Series/read":{"id":"Functions/Sequences and Series/read","title":"Sequences and Series","description":"Sequences","sidebar":"sidebar"},"Functions/Slopes of Lines and Curves/read":{"id":"Functions/Slopes of Lines and Curves/read","title":"Slopes of Lines and Curves","description":"The Slope of a Line","sidebar":"sidebar"},"Functions/The Central Limit Theorem and Related Topics/read":{"id":"Functions/The Central Limit Theorem and Related Topics/read","title":"The Central Limit Theorem and Related Topics","description":"The Central Limit Theorem","sidebar":"sidebar"},"Multivariate to Power/Estimation, Estimates and Estimators/read":{"id":"Multivariate to Power/Estimation, Estimates and Estimators/read","title":"Estimation, Estimates and Estimators","description":"Ordinary Least Squares for a Single Mean","sidebar":"sidebar"},"Multivariate to Power/Multivariate Probability Distributions/read":{"id":"Multivariate to Power/Multivariate Probability Distributions/read","title":"Multivariate Probability Distributions","description":"Joint Probability Distribution","sidebar":"sidebar"},"Multivariate to Power/Overview/read":{"id":"Multivariate to Power/Overview/read","title":"Multivariate to Power","description":"1. Multivariate Probability Distributions","sidebar":"sidebar"},"Multivariate to Power/Power and Sample Sizes/read":{"id":"Multivariate to Power/Power and Sample Sizes/read","title":"Power and Sample Sizes","description":"The Power of a Test","sidebar":"sidebar"},"Multivariate to Power/Some Distributions Related to Normal/read":{"id":"Multivariate to Power/Some Distributions Related to Normal/read","title":"Some Distributions Related to the Normal","description":"The Normal and Sums of Normals","sidebar":"sidebar"},"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read":{"id":"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read","title":"Test of Hypothesis, P Values and Related Concepts","description":"The Principle of the Hypothesis Test","sidebar":"sidebar"},"Numbers to Indices/Data Vectors/read":{"id":"Numbers to Indices/Data Vectors/read","title":"Data Vectors","description":"The Plane","sidebar":"sidebar"},"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read":{"id":"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read","title":"Discrete Random Variables and the Binomial Distribution","description":"Simple Probabilities","sidebar":"sidebar"},"Numbers to Indices/Functions/read":{"id":"Numbers to Indices/Functions/read","title":"Functions","description":"Functions of a Single Variable","sidebar":"sidebar"},"Numbers to Indices/Indices and the apply Commands in R/read":{"id":"Numbers to Indices/Indices and the apply Commands in R/read","title":"Indices and the Apply Commands in R","description":"Giving Names to Elements","sidebar":"sidebar"},"Numbers to Indices/More on Algebra/read":{"id":"Numbers to Indices/More on Algebra/read","title":"More on Algebra","description":"Some Squares","sidebar":"sidebar"},"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read":{"id":"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read","title":"Numbers, Arithmetic and Basic Algebra","description":"Natural Numbers","sidebar":"sidebar"},"Numbers to Indices/Overview/read":{"id":"Numbers to Indices/Overview/read","title":"Numbers to Indices In R","description":"1. Numbers, Arithmetic and Basic Algebra","sidebar":"sidebar"},"Numbers to Indices/Polynomials/read":{"id":"Numbers to Indices/Polynomials/read","title":"Polynomials","description":"The General Polynomial","sidebar":"sidebar"},"Numbers to Indices/Simple Data Analysis in R/read":{"id":"Numbers to Indices/Simple Data Analysis in R/read","title":"Simple Data Analysis in R","description":"Entering Data. Data Frames","sidebar":"sidebar"},"README":{"id":"README","title":"Intro","description":"This is a landing page for the CCAS course.","sidebar":"sidebar"},"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read":{"id":"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read","title":"Independence, Expectations and the Moment Generating Function","description":"Independent Random Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Multivariate Calculus/read":{"id":"Vectors to Some Regression Topics/Multivariate Calculus/read","title":"Multivariate Calculus","description":"Vector Functions of Several Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read":{"id":"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read","title":"Notes and Examples: The Linear Model","description":"Simple Linear Regression In R","sidebar":"sidebar"},"Vectors to Some Regression Topics/Overview/read":{"id":"Vectors to Some Regression Topics/Overview/read","title":"Vectors to Some Regression Topics","description":"1. Vectors and Matrix Operations","sidebar":"sidebar"},"Vectors to Some Regression Topics/Ranks and Determinants/read":{"id":"Vectors to Some Regression Topics/Ranks and Determinants/read","title":"Ranks and Determinants","description":"The Rank of a Matrix","sidebar":"sidebar"},"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read":{"id":"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read","title":"Some Notes on Matrices and Linear Operators","description":"The Matrix As a Linear Operator","sidebar":"sidebar"},"Vectors to Some Regression Topics/Some Regression Topics/read":{"id":"Vectors to Some Regression Topics/Some Regression Topics/read","title":"Some Regression Topics","description":"Poisson Regression","sidebar":"sidebar"},"Vectors to Some Regression Topics/The Gamma Distribution/read":{"id":"Vectors to Some Regression Topics/The Gamma Distribution/read","title":"The Gamma Distribution","description":"The Gamma Distribution","sidebar":"sidebar"},"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read":{"id":"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read","title":"The Multivariate Normal Distribution and Related Topics","description":"Transformations of Random Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Vector and Matrix Operations/read":{"id":"Vectors to Some Regression Topics/Vector and Matrix Operations/read","title":"Vectors and Matrix Operations","description":"Numbers, Vectors, Matrices","sidebar":"sidebar"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[53],{1109:e=>{e.exports=JSON.parse('{"pluginId":"default","version":"current","label":"Next","banner":null,"badge":false,"noIndex":false,"className":"docs-version-current","isLast":true,"docsSidebars":{"sidebar":[{"type":"link","label":"Introduction","href":"/ccas/","docId":"README"},{"type":"category","label":"Numbers to Indices","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Overview/read","docId":"Numbers to Indices/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Overview/"},{"type":"category","label":"Numbers, Arithmetic and Basic Algebra","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read","docId":"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Numbers, Arithmetic and Basic Algebra/"},{"type":"category","label":"Data Vectors","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Data Vectors/read","docId":"Numbers to Indices/Data Vectors/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Data Vectors/"},{"type":"category","label":"More on Algebra","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/More on Algebra/read","docId":"Numbers to Indices/More on Algebra/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/More on Algebra/"},{"type":"category","label":"Discrete Random Variables and the Binomial Distribution","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read","docId":"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Discrete Random Variables and the Binomial Distribution/"},{"type":"category","label":"Functions","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Functions/read","docId":"Numbers to Indices/Functions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Functions/"},{"type":"category","label":"Polynomials","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Polynomials/read","docId":"Numbers to Indices/Polynomials/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Polynomials/"},{"type":"category","label":"Simple Data Analysis in R","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Simple Data Analysis in R/read","docId":"Numbers to Indices/Simple Data Analysis in R/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Simple Data Analysis in R/"},{"type":"category","label":"Indices and the apply Commands in R","items":[{"type":"link","label":"Reading","href":"/ccas/Numbers to Indices/Indices and the apply Commands in R/read","docId":"Numbers to Indices/Indices and the apply Commands in R/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/Indices and the apply Commands in R/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Numbers to Indices/"},{"type":"category","label":"Functions","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Overview/read","docId":"Functions/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Overview/"},{"type":"category","label":"Functions of Functions and the Exponential Function","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Functions of Functions and the Exponential Function/read","docId":"Functions/Functions of Functions and the Exponential Function/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Functions of Functions and the Exponential Function/"},{"type":"category","label":"Inverse Functions and the Logarithm","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Inverse Functions and the Logarithm/read","docId":"Functions/Inverse Functions and the Logarithm/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Inverse Functions and the Logarithm/"},{"type":"category","label":"Continuity and Limits","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Continuity and Limits/read","docId":"Functions/Continuity and Limits/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Continuity and Limits/"},{"type":"category","label":"Sequences and Series","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Sequences and Series/read","docId":"Functions/Sequences and Series/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Sequences and Series/"},{"type":"category","label":"Slopes of Lines and Curves","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Slopes of Lines and Curves/read","docId":"Functions/Slopes of Lines and Curves/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Slopes of Lines and Curves/"},{"type":"category","label":"Derivatives","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Derivatives/read","docId":"Functions/Derivatives/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Derivatives/"},{"type":"category","label":"Applications of Differentiation","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Applications of Differentiation/read","docId":"Functions/Applications of Differentiation/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Applications of Differentiation/"},{"type":"category","label":"Integrals and Probability Density Functions","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Integrals and Probability Density Functions/read","docId":"Functions/Integrals and Probability Density Functions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Integrals and Probability Density Functions/"},{"type":"category","label":"Principles of Programming","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Principles of Programming/read","docId":"Functions/Principles of Programming/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Principles of Programming/"},{"type":"category","label":"The Central Limit Theorem and Related Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/The Central Limit Theorem and Related Topics/read","docId":"Functions/The Central Limit Theorem and Related Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/The Central Limit Theorem and Related Topics/"},{"type":"category","label":"Miscellanea","items":[{"type":"link","label":"Reading","href":"/ccas/Functions/Miscellanea/read","docId":"Functions/Miscellanea/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/Miscellanea/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Functions/"},{"type":"category","label":"Multivariate to Power","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Overview/read","docId":"Multivariate to Power/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Overview/"},{"type":"category","label":"Multivariate Probability Distributions","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Multivariate Probability Distributions/read","docId":"Multivariate to Power/Multivariate Probability Distributions/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Multivariate Probability Distributions/"},{"type":"category","label":"Some Distributions Related to Normal","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Some Distributions Related to Normal/read","docId":"Multivariate to Power/Some Distributions Related to Normal/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Some Distributions Related to Normal/"},{"type":"category","label":"Estimation, Estimates and Estimators","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Estimation, Estimates and Estimators/read","docId":"Multivariate to Power/Estimation, Estimates and Estimators/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Estimation, Estimates and Estimators/"},{"type":"category","label":"Test of Hypothesis, P Values and Related Concepts","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read","docId":"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/"},{"type":"category","label":"Power and Sample Sizes","items":[{"type":"link","label":"Reading","href":"/ccas/Multivariate to Power/Power and Sample Sizes/read","docId":"Multivariate to Power/Power and Sample Sizes/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/Power and Sample Sizes/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Multivariate to Power/"},{"type":"category","label":"Vectors to Some Regression Topics","items":[{"type":"category","label":"Overview","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Overview/read","docId":"Vectors to Some Regression Topics/Overview/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Overview/"},{"type":"category","label":"Vector and Matrix Operations","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Vector and Matrix Operations/read","docId":"Vectors to Some Regression Topics/Vector and Matrix Operations/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Vector and Matrix Operations/"},{"type":"category","label":"Some Notes on Matrices and Linear Operators","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read","docId":"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/"},{"type":"category","label":"Ranks and Determinants","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Ranks and Determinants/read","docId":"Vectors to Some Regression Topics/Ranks and Determinants/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Ranks and Determinants/"},{"type":"category","label":"Multivariate Calculus","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Multivariate Calculus/read","docId":"Vectors to Some Regression Topics/Multivariate Calculus/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Multivariate Calculus/"},{"type":"category","label":"The Multivariate Normal Distribution and Related Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read","docId":"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/"},{"type":"category","label":"Independence, Expectations and the Moment-Generating Function","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read","docId":"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/"},{"type":"category","label":"The Gamma Distribution","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/The Gamma Distribution/read","docId":"Vectors to Some Regression Topics/The Gamma Distribution/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/The Gamma Distribution/"},{"type":"category","label":"Notes and Examples, The Linear Model","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read","docId":"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Notes and Examples, The Linear Model/"},{"type":"category","label":"Some Regression Topics","items":[{"type":"link","label":"Reading","href":"/ccas/Vectors to Some Regression Topics/Some Regression Topics/read","docId":"Vectors to Some Regression Topics/Some Regression Topics/read"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/Some Regression Topics/"}],"collapsed":true,"collapsible":true,"href":"/ccas/Vectors to Some Regression Topics/"}]},"docs":{"Functions/Applications of Differentiation/read":{"id":"Functions/Applications of Differentiation/read","title":"Applications of Differentiation","description":"Tracking the Sign of the Derivative","sidebar":"sidebar"},"Functions/Continuity and Limits/read":{"id":"Functions/Continuity and Limits/read","title":"Continuity and Limits","description":"The Concept of Continuity","sidebar":"sidebar"},"Functions/Derivatives/read":{"id":"Functions/Derivatives/read","title":"Derivatives","description":"The Derivative As a Limit","sidebar":"sidebar"},"Functions/Functions of Functions and the Exponential Function/read":{"id":"Functions/Functions of Functions and the Exponential Function/read","title":"Functions of Functions and the Exponential Function","description":"Exponential Growth and Decline","sidebar":"sidebar"},"Functions/Integrals and Probability Density Functions/read":{"id":"Functions/Integrals and Probability Density Functions/read","title":"Integrals and Probability Density Functions","description":"Area Under a Curve","sidebar":"sidebar"},"Functions/Inverse Functions and the Logarithm/read":{"id":"Functions/Inverse Functions and the Logarithm/read","title":"Inverse Functions and the Logarithm","description":"Inverse Function","sidebar":"sidebar"},"Functions/Miscellanea/read":{"id":"Functions/Miscellanea/read","title":"Miscellanea","description":"Simple Probabilities In R","sidebar":"sidebar"},"Functions/Overview/read":{"id":"Functions/Overview/read","title":"Functions of Functions to Miscellanea","description":"1. Functions of Functions and the Exponential Function","sidebar":"sidebar"},"Functions/Principles of Programming/read":{"id":"Functions/Principles of Programming/read","title":"Principles of Programming","description":"Modularity","sidebar":"sidebar"},"Functions/Sequences and Series/read":{"id":"Functions/Sequences and Series/read","title":"Sequences and Series","description":"Sequences","sidebar":"sidebar"},"Functions/Slopes of Lines and Curves/read":{"id":"Functions/Slopes of Lines and Curves/read","title":"Slopes of Lines and Curves","description":"The Slope of a Line","sidebar":"sidebar"},"Functions/The Central Limit Theorem and Related Topics/read":{"id":"Functions/The Central Limit Theorem and Related Topics/read","title":"The Central Limit Theorem and Related Topics","description":"The Central Limit Theorem","sidebar":"sidebar"},"Multivariate to Power/Estimation, Estimates and Estimators/read":{"id":"Multivariate to Power/Estimation, Estimates and Estimators/read","title":"Estimation, Estimates and Estimators","description":"Ordinary Least Squares for a Single Mean","sidebar":"sidebar"},"Multivariate to Power/Multivariate Probability Distributions/read":{"id":"Multivariate to Power/Multivariate Probability Distributions/read","title":"Multivariate Probability Distributions","description":"Joint Probability Distribution","sidebar":"sidebar"},"Multivariate to Power/Overview/read":{"id":"Multivariate to Power/Overview/read","title":"Multivariate to Power","description":"1. Multivariate Probability Distributions","sidebar":"sidebar"},"Multivariate to Power/Power and Sample Sizes/read":{"id":"Multivariate to Power/Power and Sample Sizes/read","title":"Power and Sample Sizes","description":"The Power of a Test","sidebar":"sidebar"},"Multivariate to Power/Some Distributions Related to Normal/read":{"id":"Multivariate to Power/Some Distributions Related to Normal/read","title":"Some Distributions Related to the Normal","description":"The Normal and Sums of Normals","sidebar":"sidebar"},"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read":{"id":"Multivariate to Power/Test of Hypothesis, P Values and Related Concepts/read","title":"Test of Hypothesis, P Values and Related Concepts","description":"The Principle of the Hypothesis Test","sidebar":"sidebar"},"Numbers to Indices/Data Vectors/read":{"id":"Numbers to Indices/Data Vectors/read","title":"Data Vectors","description":"The Plane","sidebar":"sidebar"},"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read":{"id":"Numbers to Indices/Discrete Random Variables and the Binomial Distribution/read","title":"Discrete Random Variables and the Binomial Distribution","description":"Simple Probabilities","sidebar":"sidebar"},"Numbers to Indices/Functions/read":{"id":"Numbers to Indices/Functions/read","title":"Functions","description":"Functions of a Single Variable","sidebar":"sidebar"},"Numbers to Indices/Indices and the apply Commands in R/read":{"id":"Numbers to Indices/Indices and the apply Commands in R/read","title":"Indices and the Apply Commands in R","description":"Giving Names to Elements","sidebar":"sidebar"},"Numbers to Indices/More on Algebra/read":{"id":"Numbers to Indices/More on Algebra/read","title":"More on Algebra","description":"Some Squares","sidebar":"sidebar"},"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read":{"id":"Numbers to Indices/Numbers, Arithmetic and Basic Algebra/read","title":"Numbers, Arithmetic and Basic Algebra","description":"Natural Numbers","sidebar":"sidebar"},"Numbers to Indices/Overview/read":{"id":"Numbers to Indices/Overview/read","title":"Numbers to Indices In R","description":"1. Numbers, Arithmetic and Basic Algebra","sidebar":"sidebar"},"Numbers to Indices/Polynomials/read":{"id":"Numbers to Indices/Polynomials/read","title":"Polynomials","description":"The General Polynomial","sidebar":"sidebar"},"Numbers to Indices/Simple Data Analysis in R/read":{"id":"Numbers to Indices/Simple Data Analysis in R/read","title":"Simple Data Analysis in R","description":"Entering Data. Data Frames","sidebar":"sidebar"},"README":{"id":"README","title":"Introduction","description":"This is a landing page for the CCAS (Computing and Calculus for Advanced Statistics) course.","sidebar":"sidebar"},"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read":{"id":"Vectors to Some Regression Topics/Independence, Expectations and the Moment-Generating Function/read","title":"Independence, Expectations and the Moment Generating Function","description":"Independent Random Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Multivariate Calculus/read":{"id":"Vectors to Some Regression Topics/Multivariate Calculus/read","title":"Multivariate Calculus","description":"Vector Functions of Several Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read":{"id":"Vectors to Some Regression Topics/Notes and Examples, The Linear Model/read","title":"Notes and Examples: The Linear Model","description":"Simple Linear Regression In R","sidebar":"sidebar"},"Vectors to Some Regression Topics/Overview/read":{"id":"Vectors to Some Regression Topics/Overview/read","title":"Vectors to Some Regression Topics","description":"1. Vectors and Matrix Operations","sidebar":"sidebar"},"Vectors to Some Regression Topics/Ranks and Determinants/read":{"id":"Vectors to Some Regression Topics/Ranks and Determinants/read","title":"Ranks and Determinants","description":"The Rank of a Matrix","sidebar":"sidebar"},"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read":{"id":"Vectors to Some Regression Topics/Some Notes on Matrices and Linear Operators/read","title":"Some Notes on Matrices and Linear Operators","description":"The Matrix As a Linear Operator","sidebar":"sidebar"},"Vectors to Some Regression Topics/Some Regression Topics/read":{"id":"Vectors to Some Regression Topics/Some Regression Topics/read","title":"Some Regression Topics","description":"Poisson Regression","sidebar":"sidebar"},"Vectors to Some Regression Topics/The Gamma Distribution/read":{"id":"Vectors to Some Regression Topics/The Gamma Distribution/read","title":"The Gamma Distribution","description":"The Gamma Distribution","sidebar":"sidebar"},"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read":{"id":"Vectors to Some Regression Topics/The Multivariate Normal Distribution and Related Topics/read","title":"The Multivariate Normal Distribution and Related Topics","description":"Transformations of Random Variables","sidebar":"sidebar"},"Vectors to Some Regression Topics/Vector and Matrix Operations/read":{"id":"Vectors to Some Regression Topics/Vector and Matrix Operations/read","title":"Vectors and Matrix Operations","description":"Numbers, Vectors, Matrices","sidebar":"sidebar"}}}')}}]); \ No newline at end of file diff --git a/assets/js/f71aa3b8.3145a67c.js b/assets/js/f71aa3b8.1f4d454d.js similarity index 52% rename from assets/js/f71aa3b8.3145a67c.js rename to assets/js/f71aa3b8.1f4d454d.js index 4c75718..45f8085 100644 --- a/assets/js/f71aa3b8.3145a67c.js +++ b/assets/js/f71aa3b8.1f4d454d.js @@ -1 +1 @@ -"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[7189],{3454:e=>{e.exports=JSON.parse('{"title":"Numbers to Indices","slug":"/Numbers to Indices/","permalink":"/ccas/Numbers to Indices/","navigation":{"previous":{"title":"Intro","permalink":"/ccas/"},"next":{"title":"Overview","permalink":"/ccas/Numbers to Indices/Overview/"}}}')}}]); \ No newline at end of file +"use strict";(self.webpackChunkccas=self.webpackChunkccas||[]).push([[7189],{3454:e=>{e.exports=JSON.parse('{"title":"Numbers to Indices","slug":"/Numbers to Indices/","permalink":"/ccas/Numbers to Indices/","navigation":{"previous":{"title":"Introduction","permalink":"/ccas/"},"next":{"title":"Overview","permalink":"/ccas/Numbers to Indices/Overview/"}}}')}}]); \ No newline at end of file diff --git a/assets/js/runtime~main.78c8f560.js b/assets/js/runtime~main.e3e1199c.js similarity index 96% rename from assets/js/runtime~main.78c8f560.js rename to assets/js/runtime~main.e3e1199c.js index f0f0c8f..4180d7f 100644 --- a/assets/js/runtime~main.78c8f560.js +++ b/assets/js/runtime~main.e3e1199c.js @@ -1 +1 @@ -(()=>{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",313:"15ce277c",540:"d9e71704",566:"fc34a830",624:"0e5755f3",816:"a3dcb897",833:"b391e50b",835:"493a711e",1080:"ce2c12ce",1119:"c5a8eb9a",1121:"dca5d7be",1301:"a9d6b5d2",1379:"bc30c50d",1642:"0ea4b05a",1677:"6c394818",1690:"bf443929",1835:"d3ec60a9",2293:"2c1d9db2",2329:"608c6e40",2464:"69ca0283",2853:"8fcc7fa3",3085:"1f391b9e",3110:"cecf2df2",3178:"40ed9358",3207:"e9850750",3319:"16710d03",3520:"1751ff68",3697:"3ce087a1",3844:"8c2bbba6",3857:"c099e56d",3919:"72a5c412",3934:"26679b5a",4210:"967d7cfe",4297:"3e3e1c77",4352:"76b59476",4659:"04e30ed3",4702:"1eb8f356",4780:"d8abe429",4833:"8911e95e",4880:"7744abf7",5080:"d7b6806e",5083:"276a1ecb",5194:"d0feb616",5237:"1313f455",5257:"d5a903c9",5283:"c7620130",5384:"29be4871",5423:"e971bc6e",5442:"d3865350",5789:"edd66f99",5839:"d2bca00e",5881:"1906d06b",6147:"81e00fb6",6187:"d2bb9242",6204:"6b22358e",6451:"2b7bc223",6491:"160a0ee4",6634:"0680fa3d",6736:"56852e44",7189:"f71aa3b8",7215:"98a3b28e",7288:"118be430",7326:"d0cfa8e6",7414:"393be207",7636:"d790878d",7717:"585aaffd",7861:"67d8ef5c",7918:"17896441",7944:"068a9793",7952:"e14e26b4",7977:"c25502f7",8228:"831e22d4",8288:"91691405",8313:"f4a2e6fe",8421:"23374ca6",8537:"79f7fde1",8644:"0e78eef0",9149:"653b7531",9444:"fd008b27",9493:"19f7b218",9508:"6ba876f5",9514:"1be78505",9533:"0bef421d",9569:"30999462",9596:"7a38d409",9817:"14eb3368",9934:"424a8fa5"}[e]||e)+"."+{53:"ad395550",313:"f82eebd6",540:"be2d27bc",566:"d0a4a28c",624:"2a9143a5",816:"ce7d859f",833:"04eba05b",835:"4c2d4fbe",1080:"b3b69c86",1119:"16fea8c1",1121:"587605c3",1301:"95179a9a",1379:"41352fde",1642:"2cec99f9",1677:"57967da3",1690:"e58c779e",1835:"4bcd2de5",2293:"0c81193a",2329:"bd0f466a",2464:"36bc00ba",2666:"871383e8",2853:"f22295d3",3085:"c506c24d",3110:"804fd9e7",3178:"5dcdba7b",3207:"88b92bb1",3319:"94097216",3520:"6caa833d",3697:"4e48dec3",3844:"b5f8e998",3857:"347d0a41",3919:"4179b789",3934:"f00c066b",4210:"3a13781a",4297:"bea25426",4352:"d367daf4",4659:"cb4734a1",4702:"71c210ee",4780:"3a0a31f6",4833:"e065afd2",4880:"f90b2971",4972:"c4dc30ed",5080:"d668f19a",5083:"779140e3",5194:"c05f95af",5237:"b1322ab8",5257:"8e282119",5283:"60dce99e",5384:"f4b58a5a",5423:"f04b850c",5442:"90b167ac",5789:"fe3f6317",5839:"f3292a8d",5881:"5a42c609",6147:"39da2d52",6187:"6c70cc0e",6204:"3f1bf443",6451:"db159706",6491:"12ba337d",6634:"bc973d92",6736:"7565180f",7189:"3145a67c",7215:"f89df8c7",7288:"6d2f22eb",7326:"eeb66ec5",7414:"129286a7",7636:"bf825341",7717:"360ede5a",7861:"62ddf154",7918:"04f346c1",7944:"0d19bdde",7952:"2fe6b02d",7977:"3e09c920",8228:"99a6d4ea",8288:"ac8e208d",8313:"995fa231",8421:"ddbb2705",8537:"e78880e1",8644:"0209032d",9149:"7cfac8b9",9444:"4e38ead1",9493:"8f9cd97d",9508:"d15bf303",9514:"8cf05b94",9533:"24fe7000",9569:"f8ac2c97",9596:"d4ca15b6",9817:"58c1e76c",9934:"e33d756d"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="ccas:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/ccas/",r.gca=function(e){return e={17896441:"7918",30999462:"9569",91691405:"8288","935f2afb":"53","15ce277c":"313",d9e71704:"540",fc34a830:"566","0e5755f3":"624",a3dcb897:"816",b391e50b:"833","493a711e":"835",ce2c12ce:"1080",c5a8eb9a:"1119",dca5d7be:"1121",a9d6b5d2:"1301",bc30c50d:"1379","0ea4b05a":"1642","6c394818":"1677",bf443929:"1690",d3ec60a9:"1835","2c1d9db2":"2293","608c6e40":"2329","69ca0283":"2464","8fcc7fa3":"2853","1f391b9e":"3085",cecf2df2:"3110","40ed9358":"3178",e9850750:"3207","16710d03":"3319","1751ff68":"3520","3ce087a1":"3697","8c2bbba6":"3844",c099e56d:"3857","72a5c412":"3919","26679b5a":"3934","967d7cfe":"4210","3e3e1c77":"4297","76b59476":"4352","04e30ed3":"4659","1eb8f356":"4702",d8abe429:"4780","8911e95e":"4833","7744abf7":"4880",d7b6806e:"5080","276a1ecb":"5083",d0feb616:"5194","1313f455":"5237",d5a903c9:"5257",c7620130:"5283","29be4871":"5384",e971bc6e:"5423",d3865350:"5442",edd66f99:"5789",d2bca00e:"5839","1906d06b":"5881","81e00fb6":"6147",d2bb9242:"6187","6b22358e":"6204","2b7bc223":"6451","160a0ee4":"6491","0680fa3d":"6634","56852e44":"6736",f71aa3b8:"7189","98a3b28e":"7215","118be430":"7288",d0cfa8e6:"7326","393be207":"7414",d790878d:"7636","585aaffd":"7717","67d8ef5c":"7861","068a9793":"7944",e14e26b4:"7952",c25502f7:"7977","831e22d4":"8228",f4a2e6fe:"8313","23374ca6":"8421","79f7fde1":"8537","0e78eef0":"8644","653b7531":"9149",fd008b27:"9444","19f7b218":"9493","6ba876f5":"9508","1be78505":"9514","0bef421d":"9533","7a38d409":"9596","14eb3368":"9817","424a8fa5":"9934"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n{"use strict";var e,a,c,f,d,b={},t={};function r(e){var a=t[e];if(void 0!==a)return a.exports;var c=t[e]={id:e,loaded:!1,exports:{}};return b[e].call(c.exports,c,c.exports,r),c.loaded=!0,c.exports}r.m=b,r.c=t,e=[],r.O=(a,c,f,d)=>{if(!c){var b=1/0;for(i=0;i=d)&&Object.keys(r.O).every((e=>r.O[e](c[o])))?c.splice(o--,1):(t=!1,d0&&e[i-1][2]>d;i--)e[i]=e[i-1];e[i]=[c,f,d]},r.n=e=>{var a=e&&e.__esModule?()=>e.default:()=>e;return r.d(a,{a:a}),a},c=Object.getPrototypeOf?e=>Object.getPrototypeOf(e):e=>e.__proto__,r.t=function(e,f){if(1&f&&(e=this(e)),8&f)return e;if("object"==typeof e&&e){if(4&f&&e.__esModule)return e;if(16&f&&"function"==typeof e.then)return e}var d=Object.create(null);r.r(d);var b={};a=a||[null,c({}),c([]),c(c)];for(var t=2&f&&e;"object"==typeof t&&!~a.indexOf(t);t=c(t))Object.getOwnPropertyNames(t).forEach((a=>b[a]=()=>e[a]));return b.default=()=>e,r.d(d,b),d},r.d=(e,a)=>{for(var c in a)r.o(a,c)&&!r.o(e,c)&&Object.defineProperty(e,c,{enumerable:!0,get:a[c]})},r.f={},r.e=e=>Promise.all(Object.keys(r.f).reduce(((a,c)=>(r.f[c](e,a),a)),[])),r.u=e=>"assets/js/"+({53:"935f2afb",313:"15ce277c",540:"d9e71704",566:"fc34a830",624:"0e5755f3",816:"a3dcb897",833:"b391e50b",835:"493a711e",1080:"ce2c12ce",1119:"c5a8eb9a",1121:"dca5d7be",1301:"a9d6b5d2",1379:"bc30c50d",1642:"0ea4b05a",1677:"6c394818",1690:"bf443929",1835:"d3ec60a9",2293:"2c1d9db2",2329:"608c6e40",2464:"69ca0283",2853:"8fcc7fa3",3085:"1f391b9e",3110:"cecf2df2",3178:"40ed9358",3207:"e9850750",3319:"16710d03",3520:"1751ff68",3697:"3ce087a1",3844:"8c2bbba6",3857:"c099e56d",3919:"72a5c412",3934:"26679b5a",4210:"967d7cfe",4297:"3e3e1c77",4352:"76b59476",4659:"04e30ed3",4702:"1eb8f356",4780:"d8abe429",4833:"8911e95e",4880:"7744abf7",5080:"d7b6806e",5083:"276a1ecb",5194:"d0feb616",5237:"1313f455",5257:"d5a903c9",5283:"c7620130",5384:"29be4871",5423:"e971bc6e",5442:"d3865350",5789:"edd66f99",5839:"d2bca00e",5881:"1906d06b",6147:"81e00fb6",6187:"d2bb9242",6204:"6b22358e",6451:"2b7bc223",6491:"160a0ee4",6634:"0680fa3d",6736:"56852e44",7189:"f71aa3b8",7215:"98a3b28e",7288:"118be430",7326:"d0cfa8e6",7414:"393be207",7636:"d790878d",7717:"585aaffd",7861:"67d8ef5c",7918:"17896441",7944:"068a9793",7952:"e14e26b4",7977:"c25502f7",8228:"831e22d4",8288:"91691405",8313:"f4a2e6fe",8421:"23374ca6",8537:"79f7fde1",8644:"0e78eef0",9149:"653b7531",9444:"fd008b27",9493:"19f7b218",9508:"6ba876f5",9514:"1be78505",9533:"0bef421d",9569:"30999462",9596:"7a38d409",9817:"14eb3368",9934:"424a8fa5"}[e]||e)+"."+{53:"64cbbc6c",313:"f82eebd6",540:"be2d27bc",566:"d0a4a28c",624:"2a9143a5",816:"ce7d859f",833:"04eba05b",835:"4c2d4fbe",1080:"b3b69c86",1119:"16fea8c1",1121:"587605c3",1301:"95179a9a",1379:"41352fde",1642:"2cec99f9",1677:"57967da3",1690:"e58c779e",1835:"4bcd2de5",2293:"0c81193a",2329:"bd0f466a",2464:"36bc00ba",2666:"871383e8",2853:"f22295d3",3085:"c506c24d",3110:"804fd9e7",3178:"5dcdba7b",3207:"88b92bb1",3319:"94097216",3520:"6caa833d",3697:"4e48dec3",3844:"b5f8e998",3857:"347d0a41",3919:"4179b789",3934:"f00c066b",4210:"3a13781a",4297:"bea25426",4352:"d367daf4",4659:"cb4734a1",4702:"71c210ee",4780:"3a0a31f6",4833:"e065afd2",4880:"f90b2971",4972:"c4dc30ed",5080:"d668f19a",5083:"779140e3",5194:"c05f95af",5237:"b1322ab8",5257:"8e282119",5283:"60dce99e",5384:"f4b58a5a",5423:"f04b850c",5442:"90b167ac",5789:"fe3f6317",5839:"f3292a8d",5881:"5a42c609",6147:"39da2d52",6187:"6c70cc0e",6204:"3f1bf443",6451:"db159706",6491:"12ba337d",6634:"bc973d92",6736:"7565180f",7189:"1f4d454d",7215:"f89df8c7",7288:"6d2f22eb",7326:"eeb66ec5",7414:"129286a7",7636:"bf825341",7717:"360ede5a",7861:"62ddf154",7918:"04f346c1",7944:"0d19bdde",7952:"2fe6b02d",7977:"3e09c920",8228:"99a6d4ea",8288:"ac8e208d",8313:"995fa231",8421:"95001b49",8537:"e78880e1",8644:"0209032d",9149:"7cfac8b9",9444:"4e38ead1",9493:"8f9cd97d",9508:"d15bf303",9514:"8cf05b94",9533:"24fe7000",9569:"f8ac2c97",9596:"d4ca15b6",9817:"58c1e76c",9934:"e33d756d"}[e]+".js",r.miniCssF=e=>{},r.g=function(){if("object"==typeof globalThis)return globalThis;try{return this||new Function("return this")()}catch(e){if("object"==typeof window)return window}}(),r.o=(e,a)=>Object.prototype.hasOwnProperty.call(e,a),f={},d="ccas:",r.l=(e,a,c,b)=>{if(f[e])f[e].push(a);else{var t,o;if(void 0!==c)for(var n=document.getElementsByTagName("script"),i=0;i{t.onerror=t.onload=null,clearTimeout(s);var d=f[e];if(delete f[e],t.parentNode&&t.parentNode.removeChild(t),d&&d.forEach((e=>e(c))),a)return a(c)},s=setTimeout(l.bind(null,void 0,{type:"timeout",target:t}),12e4);t.onerror=l.bind(null,t.onerror),t.onload=l.bind(null,t.onload),o&&document.head.appendChild(t)}},r.r=e=>{"undefined"!=typeof Symbol&&Symbol.toStringTag&&Object.defineProperty(e,Symbol.toStringTag,{value:"Module"}),Object.defineProperty(e,"__esModule",{value:!0})},r.p="/ccas/",r.gca=function(e){return e={17896441:"7918",30999462:"9569",91691405:"8288","935f2afb":"53","15ce277c":"313",d9e71704:"540",fc34a830:"566","0e5755f3":"624",a3dcb897:"816",b391e50b:"833","493a711e":"835",ce2c12ce:"1080",c5a8eb9a:"1119",dca5d7be:"1121",a9d6b5d2:"1301",bc30c50d:"1379","0ea4b05a":"1642","6c394818":"1677",bf443929:"1690",d3ec60a9:"1835","2c1d9db2":"2293","608c6e40":"2329","69ca0283":"2464","8fcc7fa3":"2853","1f391b9e":"3085",cecf2df2:"3110","40ed9358":"3178",e9850750:"3207","16710d03":"3319","1751ff68":"3520","3ce087a1":"3697","8c2bbba6":"3844",c099e56d:"3857","72a5c412":"3919","26679b5a":"3934","967d7cfe":"4210","3e3e1c77":"4297","76b59476":"4352","04e30ed3":"4659","1eb8f356":"4702",d8abe429:"4780","8911e95e":"4833","7744abf7":"4880",d7b6806e:"5080","276a1ecb":"5083",d0feb616:"5194","1313f455":"5237",d5a903c9:"5257",c7620130:"5283","29be4871":"5384",e971bc6e:"5423",d3865350:"5442",edd66f99:"5789",d2bca00e:"5839","1906d06b":"5881","81e00fb6":"6147",d2bb9242:"6187","6b22358e":"6204","2b7bc223":"6451","160a0ee4":"6491","0680fa3d":"6634","56852e44":"6736",f71aa3b8:"7189","98a3b28e":"7215","118be430":"7288",d0cfa8e6:"7326","393be207":"7414",d790878d:"7636","585aaffd":"7717","67d8ef5c":"7861","068a9793":"7944",e14e26b4:"7952",c25502f7:"7977","831e22d4":"8228",f4a2e6fe:"8313","23374ca6":"8421","79f7fde1":"8537","0e78eef0":"8644","653b7531":"9149",fd008b27:"9444","19f7b218":"9493","6ba876f5":"9508","1be78505":"9514","0bef421d":"9533","7a38d409":"9596","14eb3368":"9817","424a8fa5":"9934"}[e]||e,r.p+r.u(e)},(()=>{var e={1303:0,532:0};r.f.j=(a,c)=>{var f=r.o(e,a)?e[a]:void 0;if(0!==f)if(f)c.push(f[2]);else if(/^(1303|532)$/.test(a))e[a]=0;else{var d=new Promise(((c,d)=>f=e[a]=[c,d]));c.push(f[2]=d);var b=r.p+r.u(a),t=new Error;r.l(b,(c=>{if(r.o(e,a)&&(0!==(f=e[a])&&(e[a]=void 0),f)){var d=c&&("load"===c.type?"missing":c.type),b=c&&c.target&&c.target.src;t.message="Loading chunk "+a+" failed.\n("+d+": "+b+")",t.name="ChunkLoadError",t.type=d,t.request=b,f[1](t)}}),"chunk-"+a,a)}},r.O.j=a=>0===e[a];var a=(a,c)=>{var f,d,b=c[0],t=c[1],o=c[2],n=0;if(b.some((a=>0!==e[a]))){for(f in t)r.o(t,f)&&(r.m[f]=t[f]);if(o)var i=o(r)}for(a&&a(c);n -Intro | Computing and Calculus for Applied Statistics - +Introduction | Computing and Calculus for Applied Statistics +
-

Intro

This is a landing page for the CCAS course. -Here you will find all the documentation needed for this course.

Best of luck!

- +

Introduction

This is a landing page for the CCAS (Computing and Calculus for Advanced Statistics) course. +Here you will find all the documentation needed for this course. +It is meant to be used by teachers, trainers, students and hobbyists who want to learn about topics on calculus and statistics.

The course is structured in chapters, each with their own sections. +Each section presents a particular topic, rich in examples. +There is a sizeable focus on the use of the R programming language for statistical computing for demonstrating the topics in a practical manner.

Chapters are:

Licensing and Contributing

The CCAS contents are open educational resources (OER), part of the Open Education Hub project; +they are hosted on GitHub, licensed under CC BY-SA 4.0 and BSD 3-Clause.

If you find an issue or want to contribute, follow the contribution guidelines on GitHub.

+ \ No newline at end of file diff --git a/markdown-page/index.html b/markdown-page/index.html index 16e589e..78c6632 100644 --- a/markdown-page/index.html +++ b/markdown-page/index.html @@ -4,13 +4,13 @@ Markdown page example | Computing and Calculus for Applied Statistics - +

Markdown page example

You don't need React to write simple standalone pages.

- + \ No newline at end of file