Skip to content

Commit

Permalink
Merge pull request #53 from fmeynadier/doc_update
Browse files Browse the repository at this point in the history
Documentation update
  • Loading branch information
aewallin authored Nov 18, 2016
2 parents 2e0c830 + 9772ae8 commit 644fe9b
Show file tree
Hide file tree
Showing 6 changed files with 126 additions and 47 deletions.
20 changes: 20 additions & 0 deletions README.rst
Original file line number Diff line number Diff line change
Expand Up @@ -101,6 +101,26 @@ or download from pypi::
Usage
=====

New in 2016.11 : simple top-level API, using dedicated classes for data handling and plotting.

::

import allantools # https://github.com/aewallin/allantools/
import numpy as np

# Compute a deviation using the Dataset class
a = allantools.Dataset(data=np.random.rand(1000))
a.compute("mdev")

# Plot it using the Plot class
b = allantools.Plot()
b.plot(a, errorbars=True, grid=True)
# You can override defaults before "show" if needed
b.ax.set_xlabel("Tau (s)")
b.show()

Lower-level access to the algorithms is still possible :

::

import allantools # https://github.com/aewallin/allantools/
Expand Down
48 changes: 24 additions & 24 deletions allantools/allantools.py
Original file line number Diff line number Diff line change
Expand Up @@ -661,7 +661,7 @@ def mtotdev(data, rate=1.0, data_type="phase", taus=None):
Better confidence at long averages for modified Allan
FIXME: bias-correction http://www.wriley.com/CI2.pdf page 6
The variance is scaled up (divided by this number) based on the
noise-type identified.
WPM 0.94
Expand Down Expand Up @@ -708,7 +708,7 @@ def calc_mtotdev_phase(phase, rate, m):
tau0 = 1.0/rate
N = len(phase) # phase data, N points
m = int(m)

n = 0 # number of terms in the sum, for error estimation
dev = 0.0 # the deviation we are computing
err = 0.0 # the error in the deviation
Expand Down Expand Up @@ -1366,19 +1366,19 @@ def tau_generator(data, rate, taus=None, v=False, even=False, maximum_m=-1):

def tau_reduction(ms, rate, n_per_decade):
"""Reduce the number of taus to maximum of n per decade (Helper function)
takes in a tau list and reduces the number of taus to a maximum amount per
decade. This is only usefull if more that the "decade" and "octave" but
less than the "all" taus are wanted. E.g. in to show certain features of
the data one might want 100 points per decade.
NOTE: The algorithm is slightly inaccurate for ms under n_per_decade, and
will also remove some points in this range, which is usually fine.
Typical use would be something like:
(data,m,taus)=tau_generator(data,rate,taus="all")
(m,taus)=tau_reduction(m,rate,n_per_decade)
Parameters
----------
ms: array of integers
Expand All @@ -1397,7 +1397,7 @@ def tau_reduction(ms, rate, n_per_decade):
Reduced list of tau values
"""
ms=np.int64(ms)
keep=np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) -
keep=np.bool8(np.rint(n_per_decade*np.log10(ms[1:])) -
np.rint(n_per_decade*np.log10(ms[:-1])))

ms=ms[keep]
Expand Down Expand Up @@ -1570,7 +1570,7 @@ def edf_greenhall(alpha, d, m, N, overlapping = False, modified = False, verbose
hdev()
ohdev()
"""

if modified:
F=1 # F filter factor, 1 modified variance, m unmodified variance
else:
Expand Down Expand Up @@ -1611,7 +1611,7 @@ def edf_greenhall(alpha, d, m, N, overlapping = False, modified = False, verbose
else:
m_prime = float('inf')
variant = "b"

inv_edf = (1.0/(pow( greenhall_sz(0,m_prime,alpha,d),2)*M))*greenhall_BasicSum(J, M, S, m_prime, alpha, d)
if verbose:
print( "case 2.1%s edf= %3f" % (variant, float(1.0/inv_edf)) )
Expand Down Expand Up @@ -1659,7 +1659,7 @@ def edf_greenhall(alpha, d, m, N, overlapping = False, modified = False, verbose
if verbose:
print( "case 4.2 edf= %3f" % float(1.0/inv_edf) )
return 1.0/inv_edf

print("greenhall_edf() no matching case!")
assert(0) # ERROR

Expand Down Expand Up @@ -1695,17 +1695,17 @@ def greenhall_sz(t, F, alpha, d):
f = greenhall_sx(t-3.0, F, alpha)
g = greenhall_sx(t+3.0, F, alpha)
return a-b-c+dd+e-f-g

assert( 0 ) # ERROR

# this is Eqn (8) from Greenhall2004
def greenhall_sx(t, F, alpha):
if F==float('inf'):
return greenhall_sw(t,alpha+2)
a = 2*greenhall_sw(t, alpha)
b = greenhall_sw( t-1.0/float(F), alpha)
c = greenhall_sw( t+1.0/float(F), alpha)

return pow(F,2)*(a-b-c)

# this is Eqn (7) from Greenhall2004
Expand Down Expand Up @@ -1734,15 +1734,15 @@ def greenhall_sw(t, alpha):
return pow(t,6)*np.log( np.abs(t) )
elif alpha==-4:
return np.abs( pow(t,7) )

assert( 0 ) # ERROR

def greenhall_table3(alpha, d):
assert(alpha==1)
idx = d-1
table3=[ (6.0,4.0), (15.23,12.0), (47.8,40.0) ]
return table3[idx]

def greenhall_table2(alpha, d):
row_idx = int(-alpha+2) # map 2-> row0 and -4-> row6
assert( row_idx in [0,1,2,3,4,5] )
Expand Down Expand Up @@ -1800,11 +1800,11 @@ def edf_mtotdev(N,m,alpha):
def edf_simple(N, m, alpha):
"""Equivalent degrees of freedom.
Simple approximate formulae.
Parameters
----------
N : int
the number of phase samples
the number of phase samples
m : int
averaging factor, tau = m * tau0
alpha: int
Expand Down Expand Up @@ -1869,19 +1869,19 @@ def confidence_intervals(dev, ci, edf):
# for 1-sigma standard error set
# ci = scipy.special.erf(1/math.sqrt(2))
# = 0.68268949213708585

ci_l = min(np.abs(ci), np.abs((ci-1))) / 2
ci_h = 1 - ci_l

# function from scipy, works OK, but scipy is large and slow to build
chi2_l = scipy.stats.chi2.ppf(ci_l, edf)
chi2_h = scipy.stats.chi2.ppf(ci_h, edf)

variance = dev*dev
var_l = float(edf) * variance / chi2_h # NIST SP1065 eqn (45)
var_l = float(edf) * variance / chi2_h # NIST SP1065 eqn (45)
var_h = float(edf) * variance / chi2_l
return (np.sqrt(var_l), np.sqrt(var_h))




Expand Down Expand Up @@ -1951,7 +1951,7 @@ def phase2frequency(phase, rate):

def frequency2fractional(frequency, mean_frequency=-1):
""" Convert frequency in Hz to fractional frequency
Parameters
----------
frequency: np.array
Expand All @@ -1971,5 +1971,5 @@ def frequency2fractional(frequency, mean_frequency=-1):
mu = mean_frequency
y = [(x-mu)/mu for x in frequency]
return y


45 changes: 34 additions & 11 deletions allantools/dataset.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,7 +34,34 @@
class Dataset():
""" Dataset class for Allantools
:Example:
::
import numpy as np
# Load random data
a = allantools.Dataset(data=np.random.rand(1000))
# compute mdev
a.compute("mdev")
print(a.out["stat"])
compute() returns the result of the computation and also stores it in the
object's ``out`` member.
"""
#: input data Dict, will be initialized by __init__()
inp = {"data": None,
"rate": None,
"data_type": None,
"taus": None}

#: output data Dict, to be populated by compute()
out = {"taus": None,
"stat": None,
"stat_err": None,
"stat_n": None,
"stat_unc": None,
"stat_id": None}

def __init__(self, data=None, rate=1.0, data_type="phase", taus=None):
""" Initialize object with input data
Expand All @@ -50,24 +77,18 @@ def __init__(self, data=None, rate=1.0, data_type="phase", taus=None):
taus: np.array
Array of tau values, in seconds, for which to compute statistic.
Optionally set taus=["all"|"octave"|"decade"] for automatic
calculation of taus list
Returns
-------
Dataset()
A Dataset() instance
"""

self.inp = {"data": data,
"rate": rate,
"data_type": data_type,
"taus": taus}
self.out = {"taus": None,
"stat": None,
"stat_err": None,
"stat_n": None,
"stat_unc": None,
"stat_id": None}
self.inp["data"] = data
self.inp["rate"] = rate
self.inp["data_type"] = data_type
self.inp["taus"] = taus

def set_input(self, data,
rate=1.0, data_type="phase", taus=None):
Expand All @@ -94,6 +115,8 @@ def set_input(self, data,
def compute(self, function):
"""Evaluate the passed function with the supplied data.
Stores result in self.out.
Parameters
----------
function: str
Expand Down
11 changes: 8 additions & 3 deletions allantools/plot.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,9 @@
class Plot():
""" A class for plotting data once computed by Allantools
example :
:Example:
::
import allantools
import numpy as np
a = allantools.Dataset(data=np.random.rand(1000))
Expand All @@ -46,10 +48,13 @@ class Plot():
can change them by using standard matplotlib method on self.fig and self.ax
"""
def __init__(self, no_display=False):
""" set ``no_display`` to ``True`` when we don't have an X-window
(e.g. for tests)
"""
try:
import matplotlib
if no_display:
matplotlib.use('Agg') # when we don't want an X-window displayed (e.g. from tests)
matplotlib.use('Agg')
import matplotlib.pyplot as plt
self.plt = plt
except ImportError:
Expand Down Expand Up @@ -89,7 +94,7 @@ def plot(self, atDataset,
def show(self):
"""Calls matplotlib.pyplot.show()
Keeping this separated from "plot" allows to tweak display before
Keeping this separated from ``plot()`` allows to tweak display before
rendering
"""
self.plt.show()
44 changes: 36 additions & 8 deletions docs/api.rst
Original file line number Diff line number Diff line change
Expand Up @@ -5,8 +5,34 @@
API
===

Implemented functions
---------------------
The Dataset() class
-------------------

**New in version 2016.11**

This class allows simple data handling.

.. autoclass:: Dataset
:members:

.. automethod:: __init__


The Plot() class
-------------------

**New in version 2016.11**

This class allows simple data plotting.


.. autoclass:: Plot
:members:

.. automethod:: __init__

Implemented statistics functions
--------------------------------

===================================== ====================================================
Function Description
Expand All @@ -21,16 +47,14 @@ Function Description
``mtotdev()`` Modified total deviation
``ttotdev()`` Time total deviation
``htotdev()`` Hadamard total deviation
``theo1()`` Theo1 deviation
``mtie()`` Maximum Time Interval Error
``tierms()`` Time Interval Error RMS
``gradev()`` Gap resistant overlapping Allan deviation
``theo1()`` Theo1 deviation
``uncertainty_estimate()`` Determine the uncertainty of a given two-sample variance estimate
``three_cornered_hat_phase()`` Three Cornered Hat Method
===================================== ====================================================

General usage
--------------
Low-level access to the statistics functions
--------------------------------------------

The deviation functions are generally of the form::

Expand All @@ -53,6 +77,7 @@ The deviation functions are generally of the form::
Function listing
================


Statistics
----------

Expand Down Expand Up @@ -84,5 +109,8 @@ Utilities
.. autofunction:: frequency2phase
.. autofunction:: phase2frequency
.. autofunction:: phase2radians
.. autofunction:: uncertainty_estimate
.. autofunction:: edf_simple
.. autofunction:: edf_greenhall
.. autofunction:: edf_totdev
.. autofunction:: edf_mtotdev
.. autofunction:: three_cornered_hat_phase
5 changes: 4 additions & 1 deletion docs/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -58,8 +58,9 @@
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.pngmath',
'sphinx.ext.imgmath',
'numpydoc'
]

Expand Down Expand Up @@ -320,3 +321,5 @@

# If true, do not generate a @detailmenu in the "Top" node's menu.
#texinfo_no_detailmenu = False

autodoc_member_order = 'bysource'

0 comments on commit 644fe9b

Please sign in to comment.