+
+ Read the Docs
+ v: ${config.versions.current.slug}
+
+
+
+
+ ${renderLanguages(config)}
+ ${renderVersions(config)}
+ ${renderDownloads(config)}
+
+ On Read the Docs
+
+ Project Home
+
+
+ Builds
+
+
+ Downloads
+
+
+
+ Search
+
+
+
+
+
+
+ Hosted by Read the Docs
+
+
+
+ `;
+
+ // Inject the generated flyout into the body HTML element.
+ document.body.insertAdjacentHTML("beforeend", flyout);
+
+ // Trigger the Read the Docs Addons Search modal when clicking on the "Search docs" input from inside the flyout.
+ document
+ .querySelector("#flyout-search-form")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+ })
+}
+
+if (themeLanguageSelector || themeVersionSelector) {
+ function onSelectorSwitch(event) {
+ const option = event.target.selectedIndex;
+ const item = event.target.options[option];
+ window.location.href = item.dataset.url;
+ }
+
+ document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ const config = event.detail.data();
+
+ const versionSwitch = document.querySelector(
+ "div.switch-menus > div.version-switch",
+ );
+ if (themeVersionSelector) {
+ let versions = config.versions.active;
+ if (config.versions.current.hidden || config.versions.current.type === "external") {
+ versions.unshift(config.versions.current);
+ }
+ const versionSelect = `
+
+ ${versions
+ .map(
+ (version) => `
+
+ ${version.slug}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ versionSwitch.innerHTML = versionSelect;
+ versionSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+
+ const languageSwitch = document.querySelector(
+ "div.switch-menus > div.language-switch",
+ );
+
+ if (themeLanguageSelector) {
+ if (config.projects.translations.length) {
+ // Add the current language to the options on the selector
+ let languages = config.projects.translations.concat(
+ config.projects.current,
+ );
+ languages = languages.sort((a, b) =>
+ a.language.name.localeCompare(b.language.name),
+ );
+
+ const languageSelect = `
+
+ ${languages
+ .map(
+ (language) => `
+
+ ${language.language.name}
+ `,
+ )
+ .join("\n")}
+
+ `;
+
+ languageSwitch.innerHTML = languageSelect;
+ languageSwitch.firstElementChild.addEventListener("change", onSelectorSwitch);
+ }
+ else {
+ languageSwitch.remove();
+ }
+ }
+ });
+}
+
+document.addEventListener("readthedocs-addons-data-ready", function (event) {
+ // Trigger the Read the Docs Addons Search modal when clicking on "Search docs" input from the topnav.
+ document
+ .querySelector("[role='search'] input")
+ .addEventListener("focusin", () => {
+ const event = new CustomEvent("readthedocs-search-show");
+ document.dispatchEvent(event);
+ });
+});
\ No newline at end of file
diff --git a/_static/language_data.js b/_static/language_data.js
new file mode 100644
index 0000000..c7fe6c6
--- /dev/null
+++ b/_static/language_data.js
@@ -0,0 +1,192 @@
+/*
+ * This script contains the language-specific data used by searchtools.js,
+ * namely the list of stopwords, stemmer, scorer and splitter.
+ */
+
+var stopwords = ["a", "and", "are", "as", "at", "be", "but", "by", "for", "if", "in", "into", "is", "it", "near", "no", "not", "of", "on", "or", "such", "that", "the", "their", "then", "there", "these", "they", "this", "to", "was", "will", "with"];
+
+
+/* Non-minified version is copied as a separate JS file, if available */
+
+/**
+ * Porter Stemmer
+ */
+var Stemmer = function() {
+
+ var step2list = {
+ ational: 'ate',
+ tional: 'tion',
+ enci: 'ence',
+ anci: 'ance',
+ izer: 'ize',
+ bli: 'ble',
+ alli: 'al',
+ entli: 'ent',
+ eli: 'e',
+ ousli: 'ous',
+ ization: 'ize',
+ ation: 'ate',
+ ator: 'ate',
+ alism: 'al',
+ iveness: 'ive',
+ fulness: 'ful',
+ ousness: 'ous',
+ aliti: 'al',
+ iviti: 'ive',
+ biliti: 'ble',
+ logi: 'log'
+ };
+
+ var step3list = {
+ icate: 'ic',
+ ative: '',
+ alize: 'al',
+ iciti: 'ic',
+ ical: 'ic',
+ ful: '',
+ ness: ''
+ };
+
+ var c = "[^aeiou]"; // consonant
+ var v = "[aeiouy]"; // vowel
+ var C = c + "[^aeiouy]*"; // consonant sequence
+ var V = v + "[aeiou]*"; // vowel sequence
+
+ var mgr0 = "^(" + C + ")?" + V + C; // [C]VC... is m>0
+ var meq1 = "^(" + C + ")?" + V + C + "(" + V + ")?$"; // [C]VC[V] is m=1
+ var mgr1 = "^(" + C + ")?" + V + C + V + C; // [C]VCVC... is m>1
+ var s_v = "^(" + C + ")?" + v; // vowel in stem
+
+ this.stemWord = function (w) {
+ var stem;
+ var suffix;
+ var firstch;
+ var origword = w;
+
+ if (w.length < 3)
+ return w;
+
+ var re;
+ var re2;
+ var re3;
+ var re4;
+
+ firstch = w.substr(0,1);
+ if (firstch == "y")
+ w = firstch.toUpperCase() + w.substr(1);
+
+ // Step 1a
+ re = /^(.+?)(ss|i)es$/;
+ re2 = /^(.+?)([^s])s$/;
+
+ if (re.test(w))
+ w = w.replace(re,"$1$2");
+ else if (re2.test(w))
+ w = w.replace(re2,"$1$2");
+
+ // Step 1b
+ re = /^(.+?)eed$/;
+ re2 = /^(.+?)(ed|ing)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ re = new RegExp(mgr0);
+ if (re.test(fp[1])) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1];
+ re2 = new RegExp(s_v);
+ if (re2.test(stem)) {
+ w = stem;
+ re2 = /(at|bl|iz)$/;
+ re3 = new RegExp("([^aeiouylsz])\\1$");
+ re4 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re2.test(w))
+ w = w + "e";
+ else if (re3.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+ else if (re4.test(w))
+ w = w + "e";
+ }
+ }
+
+ // Step 1c
+ re = /^(.+?)y$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(s_v);
+ if (re.test(stem))
+ w = stem + "i";
+ }
+
+ // Step 2
+ re = /^(.+?)(ational|tional|enci|anci|izer|bli|alli|entli|eli|ousli|ization|ation|ator|alism|iveness|fulness|ousness|aliti|iviti|biliti|logi)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step2list[suffix];
+ }
+
+ // Step 3
+ re = /^(.+?)(icate|ative|alize|iciti|ical|ful|ness)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ suffix = fp[2];
+ re = new RegExp(mgr0);
+ if (re.test(stem))
+ w = stem + step3list[suffix];
+ }
+
+ // Step 4
+ re = /^(.+?)(al|ance|ence|er|ic|able|ible|ant|ement|ment|ent|ou|ism|ate|iti|ous|ive|ize)$/;
+ re2 = /^(.+?)(s|t)(ion)$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ if (re.test(stem))
+ w = stem;
+ }
+ else if (re2.test(w)) {
+ var fp = re2.exec(w);
+ stem = fp[1] + fp[2];
+ re2 = new RegExp(mgr1);
+ if (re2.test(stem))
+ w = stem;
+ }
+
+ // Step 5
+ re = /^(.+?)e$/;
+ if (re.test(w)) {
+ var fp = re.exec(w);
+ stem = fp[1];
+ re = new RegExp(mgr1);
+ re2 = new RegExp(meq1);
+ re3 = new RegExp("^" + C + v + "[^aeiouwxy]$");
+ if (re.test(stem) || (re2.test(stem) && !(re3.test(stem))))
+ w = stem;
+ }
+ re = /ll$/;
+ re2 = new RegExp(mgr1);
+ if (re.test(w) && re2.test(w)) {
+ re = /.$/;
+ w = w.replace(re,"");
+ }
+
+ // and turn initial Y back to y
+ if (firstch == "y")
+ w = firstch.toLowerCase() + w.substr(1);
+ return w;
+ }
+}
+
diff --git a/_static/minus.png b/_static/minus.png
new file mode 100644
index 0000000..d96755f
Binary files /dev/null and b/_static/minus.png differ
diff --git a/_static/nbsphinx-broken-thumbnail.svg b/_static/nbsphinx-broken-thumbnail.svg
new file mode 100644
index 0000000..4919ca8
--- /dev/null
+++ b/_static/nbsphinx-broken-thumbnail.svg
@@ -0,0 +1,9 @@
+
+
+
+
diff --git a/_static/nbsphinx-code-cells.css b/_static/nbsphinx-code-cells.css
new file mode 100644
index 0000000..a3fb27c
--- /dev/null
+++ b/_static/nbsphinx-code-cells.css
@@ -0,0 +1,259 @@
+/* remove conflicting styling from Sphinx themes */
+div.nbinput.container div.prompt *,
+div.nboutput.container div.prompt *,
+div.nbinput.container div.input_area pre,
+div.nboutput.container div.output_area pre,
+div.nbinput.container div.input_area .highlight,
+div.nboutput.container div.output_area .highlight {
+ border: none;
+ padding: 0;
+ margin: 0;
+ box-shadow: none;
+}
+
+div.nbinput.container > div[class*=highlight],
+div.nboutput.container > div[class*=highlight] {
+ margin: 0;
+}
+
+div.nbinput.container div.prompt *,
+div.nboutput.container div.prompt * {
+ background: none;
+}
+
+div.nboutput.container div.output_area .highlight,
+div.nboutput.container div.output_area pre {
+ background: unset;
+}
+
+div.nboutput.container div.output_area div.highlight {
+ color: unset; /* override Pygments text color */
+}
+
+/* avoid gaps between output lines */
+div.nboutput.container div[class*=highlight] pre {
+ line-height: normal;
+}
+
+/* input/output containers */
+div.nbinput.container,
+div.nboutput.container {
+ display: -webkit-flex;
+ display: flex;
+ align-items: flex-start;
+ margin: 0;
+ width: 100%;
+}
+@media (max-width: 540px) {
+ div.nbinput.container,
+ div.nboutput.container {
+ flex-direction: column;
+ }
+}
+
+/* input container */
+div.nbinput.container {
+ padding-top: 5px;
+}
+
+/* last container */
+div.nblast.container {
+ padding-bottom: 5px;
+}
+
+/* input prompt */
+div.nbinput.container div.prompt pre,
+/* for sphinx_immaterial theme: */
+div.nbinput.container div.prompt pre > code {
+ color: #307FC1;
+}
+
+/* output prompt */
+div.nboutput.container div.prompt pre,
+/* for sphinx_immaterial theme: */
+div.nboutput.container div.prompt pre > code {
+ color: #BF5B3D;
+}
+
+/* all prompts */
+div.nbinput.container div.prompt,
+div.nboutput.container div.prompt {
+ width: 4.5ex;
+ padding-top: 5px;
+ position: relative;
+ user-select: none;
+}
+
+div.nbinput.container div.prompt > div,
+div.nboutput.container div.prompt > div {
+ position: absolute;
+ right: 0;
+ margin-right: 0.3ex;
+}
+
+@media (max-width: 540px) {
+ div.nbinput.container div.prompt,
+ div.nboutput.container div.prompt {
+ width: unset;
+ text-align: left;
+ padding: 0.4em;
+ }
+ div.nboutput.container div.prompt.empty {
+ padding: 0;
+ }
+
+ div.nbinput.container div.prompt > div,
+ div.nboutput.container div.prompt > div {
+ position: unset;
+ }
+}
+
+/* disable scrollbars and line breaks on prompts */
+div.nbinput.container div.prompt pre,
+div.nboutput.container div.prompt pre {
+ overflow: hidden;
+ white-space: pre;
+}
+
+/* input/output area */
+div.nbinput.container div.input_area,
+div.nboutput.container div.output_area {
+ -webkit-flex: 1;
+ flex: 1;
+ overflow: auto;
+}
+@media (max-width: 540px) {
+ div.nbinput.container div.input_area,
+ div.nboutput.container div.output_area {
+ width: 100%;
+ }
+}
+
+/* input area */
+div.nbinput.container div.input_area {
+ border: 1px solid #e0e0e0;
+ border-radius: 2px;
+ /*background: #f5f5f5;*/
+}
+
+/* override MathJax center alignment in output cells */
+div.nboutput.container div[class*=MathJax] {
+ text-align: left !important;
+}
+
+/* override sphinx.ext.imgmath center alignment in output cells */
+div.nboutput.container div.math p {
+ text-align: left;
+}
+
+/* standard error */
+div.nboutput.container div.output_area.stderr {
+ background: #fdd;
+}
+
+/* ANSI colors */
+.ansi-black-fg { color: #3E424D; }
+.ansi-black-bg { background-color: #3E424D; }
+.ansi-black-intense-fg { color: #282C36; }
+.ansi-black-intense-bg { background-color: #282C36; }
+.ansi-red-fg { color: #E75C58; }
+.ansi-red-bg { background-color: #E75C58; }
+.ansi-red-intense-fg { color: #B22B31; }
+.ansi-red-intense-bg { background-color: #B22B31; }
+.ansi-green-fg { color: #00A250; }
+.ansi-green-bg { background-color: #00A250; }
+.ansi-green-intense-fg { color: #007427; }
+.ansi-green-intense-bg { background-color: #007427; }
+.ansi-yellow-fg { color: #DDB62B; }
+.ansi-yellow-bg { background-color: #DDB62B; }
+.ansi-yellow-intense-fg { color: #B27D12; }
+.ansi-yellow-intense-bg { background-color: #B27D12; }
+.ansi-blue-fg { color: #208FFB; }
+.ansi-blue-bg { background-color: #208FFB; }
+.ansi-blue-intense-fg { color: #0065CA; }
+.ansi-blue-intense-bg { background-color: #0065CA; }
+.ansi-magenta-fg { color: #D160C4; }
+.ansi-magenta-bg { background-color: #D160C4; }
+.ansi-magenta-intense-fg { color: #A03196; }
+.ansi-magenta-intense-bg { background-color: #A03196; }
+.ansi-cyan-fg { color: #60C6C8; }
+.ansi-cyan-bg { background-color: #60C6C8; }
+.ansi-cyan-intense-fg { color: #258F8F; }
+.ansi-cyan-intense-bg { background-color: #258F8F; }
+.ansi-white-fg { color: #C5C1B4; }
+.ansi-white-bg { background-color: #C5C1B4; }
+.ansi-white-intense-fg { color: #A1A6B2; }
+.ansi-white-intense-bg { background-color: #A1A6B2; }
+
+.ansi-default-inverse-fg { color: #FFFFFF; }
+.ansi-default-inverse-bg { background-color: #000000; }
+
+.ansi-bold { font-weight: bold; }
+.ansi-underline { text-decoration: underline; }
+
+
+div.nbinput.container div.input_area div[class*=highlight] > pre,
+div.nboutput.container div.output_area div[class*=highlight] > pre,
+div.nboutput.container div.output_area div[class*=highlight].math,
+div.nboutput.container div.output_area.rendered_html,
+div.nboutput.container div.output_area > div.output_javascript,
+div.nboutput.container div.output_area:not(.rendered_html) > img{
+ padding: 5px;
+ margin: 0;
+}
+
+/* fix copybtn overflow problem in chromium (needed for 'sphinx_copybutton') */
+div.nbinput.container div.input_area > div[class^='highlight'],
+div.nboutput.container div.output_area > div[class^='highlight']{
+ overflow-y: hidden;
+}
+
+/* hide copy button on prompts for 'sphinx_copybutton' extension ... */
+.prompt .copybtn,
+/* ... and 'sphinx_immaterial' theme */
+.prompt .md-clipboard.md-icon {
+ display: none;
+}
+
+/* Some additional styling taken form the Jupyter notebook CSS */
+.jp-RenderedHTMLCommon table,
+div.rendered_html table {
+ border: none;
+ border-collapse: collapse;
+ border-spacing: 0;
+ color: black;
+ font-size: 12px;
+ table-layout: fixed;
+}
+.jp-RenderedHTMLCommon thead,
+div.rendered_html thead {
+ border-bottom: 1px solid black;
+ vertical-align: bottom;
+}
+.jp-RenderedHTMLCommon tr,
+.jp-RenderedHTMLCommon th,
+.jp-RenderedHTMLCommon td,
+div.rendered_html tr,
+div.rendered_html th,
+div.rendered_html td {
+ text-align: right;
+ vertical-align: middle;
+ padding: 0.5em 0.5em;
+ line-height: normal;
+ white-space: normal;
+ max-width: none;
+ border: none;
+}
+.jp-RenderedHTMLCommon th,
+div.rendered_html th {
+ font-weight: bold;
+}
+.jp-RenderedHTMLCommon tbody tr:nth-child(odd),
+div.rendered_html tbody tr:nth-child(odd) {
+ background: #f5f5f5;
+}
+.jp-RenderedHTMLCommon tbody tr:hover,
+div.rendered_html tbody tr:hover {
+ background: rgba(66, 165, 245, 0.2);
+}
+
diff --git a/_static/nbsphinx-gallery.css b/_static/nbsphinx-gallery.css
new file mode 100644
index 0000000..365c27a
--- /dev/null
+++ b/_static/nbsphinx-gallery.css
@@ -0,0 +1,31 @@
+.nbsphinx-gallery {
+ display: grid;
+ grid-template-columns: repeat(auto-fill, minmax(160px, 1fr));
+ gap: 5px;
+ margin-top: 1em;
+ margin-bottom: 1em;
+}
+
+.nbsphinx-gallery > a {
+ padding: 5px;
+ border: 1px dotted currentColor;
+ border-radius: 2px;
+ text-align: center;
+}
+
+.nbsphinx-gallery > a:hover {
+ border-style: solid;
+}
+
+.nbsphinx-gallery img {
+ max-width: 100%;
+ max-height: 100%;
+}
+
+.nbsphinx-gallery > a > div:first-child {
+ display: flex;
+ align-items: start;
+ justify-content: center;
+ height: 120px;
+ margin-bottom: 5px;
+}
diff --git a/_static/nbsphinx-no-thumbnail.svg b/_static/nbsphinx-no-thumbnail.svg
new file mode 100644
index 0000000..9dca758
--- /dev/null
+++ b/_static/nbsphinx-no-thumbnail.svg
@@ -0,0 +1,9 @@
+
+
+
+
diff --git a/_static/plus.png b/_static/plus.png
new file mode 100644
index 0000000..7107cec
Binary files /dev/null and b/_static/plus.png differ
diff --git a/_static/pygments.css b/_static/pygments.css
new file mode 100644
index 0000000..84ab303
--- /dev/null
+++ b/_static/pygments.css
@@ -0,0 +1,75 @@
+pre { line-height: 125%; }
+td.linenos .normal { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+span.linenos { color: inherit; background-color: transparent; padding-left: 5px; padding-right: 5px; }
+td.linenos .special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+span.linenos.special { color: #000000; background-color: #ffffc0; padding-left: 5px; padding-right: 5px; }
+.highlight .hll { background-color: #ffffcc }
+.highlight { background: #f8f8f8; }
+.highlight .c { color: #3D7B7B; font-style: italic } /* Comment */
+.highlight .err { border: 1px solid #FF0000 } /* Error */
+.highlight .k { color: #008000; font-weight: bold } /* Keyword */
+.highlight .o { color: #666666 } /* Operator */
+.highlight .ch { color: #3D7B7B; font-style: italic } /* Comment.Hashbang */
+.highlight .cm { color: #3D7B7B; font-style: italic } /* Comment.Multiline */
+.highlight .cp { color: #9C6500 } /* Comment.Preproc */
+.highlight .cpf { color: #3D7B7B; font-style: italic } /* Comment.PreprocFile */
+.highlight .c1 { color: #3D7B7B; font-style: italic } /* Comment.Single */
+.highlight .cs { color: #3D7B7B; font-style: italic } /* Comment.Special */
+.highlight .gd { color: #A00000 } /* Generic.Deleted */
+.highlight .ge { font-style: italic } /* Generic.Emph */
+.highlight .ges { font-weight: bold; font-style: italic } /* Generic.EmphStrong */
+.highlight .gr { color: #E40000 } /* Generic.Error */
+.highlight .gh { color: #000080; font-weight: bold } /* Generic.Heading */
+.highlight .gi { color: #008400 } /* Generic.Inserted */
+.highlight .go { color: #717171 } /* Generic.Output */
+.highlight .gp { color: #000080; font-weight: bold } /* Generic.Prompt */
+.highlight .gs { font-weight: bold } /* Generic.Strong */
+.highlight .gu { color: #800080; font-weight: bold } /* Generic.Subheading */
+.highlight .gt { color: #0044DD } /* Generic.Traceback */
+.highlight .kc { color: #008000; font-weight: bold } /* Keyword.Constant */
+.highlight .kd { color: #008000; font-weight: bold } /* Keyword.Declaration */
+.highlight .kn { color: #008000; font-weight: bold } /* Keyword.Namespace */
+.highlight .kp { color: #008000 } /* Keyword.Pseudo */
+.highlight .kr { color: #008000; font-weight: bold } /* Keyword.Reserved */
+.highlight .kt { color: #B00040 } /* Keyword.Type */
+.highlight .m { color: #666666 } /* Literal.Number */
+.highlight .s { color: #BA2121 } /* Literal.String */
+.highlight .na { color: #687822 } /* Name.Attribute */
+.highlight .nb { color: #008000 } /* Name.Builtin */
+.highlight .nc { color: #0000FF; font-weight: bold } /* Name.Class */
+.highlight .no { color: #880000 } /* Name.Constant */
+.highlight .nd { color: #AA22FF } /* Name.Decorator */
+.highlight .ni { color: #717171; font-weight: bold } /* Name.Entity */
+.highlight .ne { color: #CB3F38; font-weight: bold } /* Name.Exception */
+.highlight .nf { color: #0000FF } /* Name.Function */
+.highlight .nl { color: #767600 } /* Name.Label */
+.highlight .nn { color: #0000FF; font-weight: bold } /* Name.Namespace */
+.highlight .nt { color: #008000; font-weight: bold } /* Name.Tag */
+.highlight .nv { color: #19177C } /* Name.Variable */
+.highlight .ow { color: #AA22FF; font-weight: bold } /* Operator.Word */
+.highlight .w { color: #bbbbbb } /* Text.Whitespace */
+.highlight .mb { color: #666666 } /* Literal.Number.Bin */
+.highlight .mf { color: #666666 } /* Literal.Number.Float */
+.highlight .mh { color: #666666 } /* Literal.Number.Hex */
+.highlight .mi { color: #666666 } /* Literal.Number.Integer */
+.highlight .mo { color: #666666 } /* Literal.Number.Oct */
+.highlight .sa { color: #BA2121 } /* Literal.String.Affix */
+.highlight .sb { color: #BA2121 } /* Literal.String.Backtick */
+.highlight .sc { color: #BA2121 } /* Literal.String.Char */
+.highlight .dl { color: #BA2121 } /* Literal.String.Delimiter */
+.highlight .sd { color: #BA2121; font-style: italic } /* Literal.String.Doc */
+.highlight .s2 { color: #BA2121 } /* Literal.String.Double */
+.highlight .se { color: #AA5D1F; font-weight: bold } /* Literal.String.Escape */
+.highlight .sh { color: #BA2121 } /* Literal.String.Heredoc */
+.highlight .si { color: #A45A77; font-weight: bold } /* Literal.String.Interpol */
+.highlight .sx { color: #008000 } /* Literal.String.Other */
+.highlight .sr { color: #A45A77 } /* Literal.String.Regex */
+.highlight .s1 { color: #BA2121 } /* Literal.String.Single */
+.highlight .ss { color: #19177C } /* Literal.String.Symbol */
+.highlight .bp { color: #008000 } /* Name.Builtin.Pseudo */
+.highlight .fm { color: #0000FF } /* Name.Function.Magic */
+.highlight .vc { color: #19177C } /* Name.Variable.Class */
+.highlight .vg { color: #19177C } /* Name.Variable.Global */
+.highlight .vi { color: #19177C } /* Name.Variable.Instance */
+.highlight .vm { color: #19177C } /* Name.Variable.Magic */
+.highlight .il { color: #666666 } /* Literal.Number.Integer.Long */
\ No newline at end of file
diff --git a/_static/searchtools.js b/_static/searchtools.js
new file mode 100644
index 0000000..2c774d1
--- /dev/null
+++ b/_static/searchtools.js
@@ -0,0 +1,632 @@
+/*
+ * Sphinx JavaScript utilities for the full-text search.
+ */
+"use strict";
+
+/**
+ * Simple result scoring code.
+ */
+if (typeof Scorer === "undefined") {
+ var Scorer = {
+ // Implement the following function to further tweak the score for each result
+ // The function takes a result array [docname, title, anchor, descr, score, filename]
+ // and returns the new score.
+ /*
+ score: result => {
+ const [docname, title, anchor, descr, score, filename, kind] = result
+ return score
+ },
+ */
+
+ // query matches the full name of an object
+ objNameMatch: 11,
+ // or matches in the last dotted part of the object name
+ objPartialMatch: 6,
+ // Additive scores depending on the priority of the object
+ objPrio: {
+ 0: 15, // used to be importantResults
+ 1: 5, // used to be objectResults
+ 2: -5, // used to be unimportantResults
+ },
+ // Used when the priority is not in the mapping.
+ objPrioDefault: 0,
+
+ // query found in title
+ title: 15,
+ partialTitle: 7,
+ // query found in terms
+ term: 5,
+ partialTerm: 2,
+ };
+}
+
+// Global search result kind enum, used by themes to style search results.
+class SearchResultKind {
+ static get index() { return "index"; }
+ static get object() { return "object"; }
+ static get text() { return "text"; }
+ static get title() { return "title"; }
+}
+
+const _removeChildren = (element) => {
+ while (element && element.lastChild) element.removeChild(element.lastChild);
+};
+
+/**
+ * See https://developer.mozilla.org/en-US/docs/Web/JavaScript/Guide/Regular_Expressions#escaping
+ */
+const _escapeRegExp = (string) =>
+ string.replace(/[.*+\-?^${}()|[\]\\]/g, "\\$&"); // $& means the whole matched string
+
+const _displayItem = (item, searchTerms, highlightTerms) => {
+ const docBuilder = DOCUMENTATION_OPTIONS.BUILDER;
+ const docFileSuffix = DOCUMENTATION_OPTIONS.FILE_SUFFIX;
+ const docLinkSuffix = DOCUMENTATION_OPTIONS.LINK_SUFFIX;
+ const showSearchSummary = DOCUMENTATION_OPTIONS.SHOW_SEARCH_SUMMARY;
+ const contentRoot = document.documentElement.dataset.content_root;
+
+ const [docName, title, anchor, descr, score, _filename, kind] = item;
+
+ let listItem = document.createElement("li");
+ // Add a class representing the item's type:
+ // can be used by a theme's CSS selector for styling
+ // See SearchResultKind for the class names.
+ listItem.classList.add(`kind-${kind}`);
+ let requestUrl;
+ let linkUrl;
+ if (docBuilder === "dirhtml") {
+ // dirhtml builder
+ let dirname = docName + "/";
+ if (dirname.match(/\/index\/$/))
+ dirname = dirname.substring(0, dirname.length - 6);
+ else if (dirname === "index/") dirname = "";
+ requestUrl = contentRoot + dirname;
+ linkUrl = requestUrl;
+ } else {
+ // normal html builders
+ requestUrl = contentRoot + docName + docFileSuffix;
+ linkUrl = docName + docLinkSuffix;
+ }
+ let linkEl = listItem.appendChild(document.createElement("a"));
+ linkEl.href = linkUrl + anchor;
+ linkEl.dataset.score = score;
+ linkEl.innerHTML = title;
+ if (descr) {
+ listItem.appendChild(document.createElement("span")).innerHTML =
+ " (" + descr + ")";
+ // highlight search terms in the description
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ }
+ else if (showSearchSummary)
+ fetch(requestUrl)
+ .then((responseData) => responseData.text())
+ .then((data) => {
+ if (data)
+ listItem.appendChild(
+ Search.makeSearchSummary(data, searchTerms, anchor)
+ );
+ // highlight search terms in the summary
+ if (SPHINX_HIGHLIGHT_ENABLED) // set in sphinx_highlight.js
+ highlightTerms.forEach((term) => _highlightText(listItem, term, "highlighted"));
+ });
+ Search.output.appendChild(listItem);
+};
+const _finishSearch = (resultCount) => {
+ Search.stopPulse();
+ Search.title.innerText = _("Search Results");
+ if (!resultCount)
+ Search.status.innerText = Documentation.gettext(
+ "Your search did not match any documents. Please make sure that all words are spelled correctly and that you've selected enough categories."
+ );
+ else
+ Search.status.innerText = Documentation.ngettext(
+ "Search finished, found one page matching the search query.",
+ "Search finished, found ${resultCount} pages matching the search query.",
+ resultCount,
+ ).replace('${resultCount}', resultCount);
+};
+const _displayNextItem = (
+ results,
+ resultCount,
+ searchTerms,
+ highlightTerms,
+) => {
+ // results left, load the summary and display it
+ // this is intended to be dynamic (don't sub resultsCount)
+ if (results.length) {
+ _displayItem(results.pop(), searchTerms, highlightTerms);
+ setTimeout(
+ () => _displayNextItem(results, resultCount, searchTerms, highlightTerms),
+ 5
+ );
+ }
+ // search finished, update title and status message
+ else _finishSearch(resultCount);
+};
+// Helper function used by query() to order search results.
+// Each input is an array of [docname, title, anchor, descr, score, filename, kind].
+// Order the results by score (in opposite order of appearance, since the
+// `_displayNextItem` function uses pop() to retrieve items) and then alphabetically.
+const _orderResultsByScoreThenName = (a, b) => {
+ const leftScore = a[4];
+ const rightScore = b[4];
+ if (leftScore === rightScore) {
+ // same score: sort alphabetically
+ const leftTitle = a[1].toLowerCase();
+ const rightTitle = b[1].toLowerCase();
+ if (leftTitle === rightTitle) return 0;
+ return leftTitle > rightTitle ? -1 : 1; // inverted is intentional
+ }
+ return leftScore > rightScore ? 1 : -1;
+};
+
+/**
+ * Default splitQuery function. Can be overridden in ``sphinx.search`` with a
+ * custom function per language.
+ *
+ * The regular expression works by splitting the string on consecutive characters
+ * that are not Unicode letters, numbers, underscores, or emoji characters.
+ * This is the same as ``\W+`` in Python, preserving the surrogate pair area.
+ */
+if (typeof splitQuery === "undefined") {
+ var splitQuery = (query) => query
+ .split(/[^\p{Letter}\p{Number}_\p{Emoji_Presentation}]+/gu)
+ .filter(term => term) // remove remaining empty strings
+}
+
+/**
+ * Search Module
+ */
+const Search = {
+ _index: null,
+ _queued_query: null,
+ _pulse_status: -1,
+
+ htmlToText: (htmlString, anchor) => {
+ const htmlElement = new DOMParser().parseFromString(htmlString, 'text/html');
+ for (const removalQuery of [".headerlink", "script", "style"]) {
+ htmlElement.querySelectorAll(removalQuery).forEach((el) => { el.remove() });
+ }
+ if (anchor) {
+ const anchorContent = htmlElement.querySelector(`[role="main"] ${anchor}`);
+ if (anchorContent) return anchorContent.textContent;
+
+ console.warn(
+ `Anchored content block not found. Sphinx search tries to obtain it via DOM query '[role=main] ${anchor}'. Check your theme or template.`
+ );
+ }
+
+ // if anchor not specified or not found, fall back to main content
+ const docContent = htmlElement.querySelector('[role="main"]');
+ if (docContent) return docContent.textContent;
+
+ console.warn(
+ "Content block not found. Sphinx search tries to obtain it via DOM query '[role=main]'. Check your theme or template."
+ );
+ return "";
+ },
+
+ init: () => {
+ const query = new URLSearchParams(window.location.search).get("q");
+ document
+ .querySelectorAll('input[name="q"]')
+ .forEach((el) => (el.value = query));
+ if (query) Search.performSearch(query);
+ },
+
+ loadIndex: (url) =>
+ (document.body.appendChild(document.createElement("script")).src = url),
+
+ setIndex: (index) => {
+ Search._index = index;
+ if (Search._queued_query !== null) {
+ const query = Search._queued_query;
+ Search._queued_query = null;
+ Search.query(query);
+ }
+ },
+
+ hasIndex: () => Search._index !== null,
+
+ deferQuery: (query) => (Search._queued_query = query),
+
+ stopPulse: () => (Search._pulse_status = -1),
+
+ startPulse: () => {
+ if (Search._pulse_status >= 0) return;
+
+ const pulse = () => {
+ Search._pulse_status = (Search._pulse_status + 1) % 4;
+ Search.dots.innerText = ".".repeat(Search._pulse_status);
+ if (Search._pulse_status >= 0) window.setTimeout(pulse, 500);
+ };
+ pulse();
+ },
+
+ /**
+ * perform a search for something (or wait until index is loaded)
+ */
+ performSearch: (query) => {
+ // create the required interface elements
+ const searchText = document.createElement("h2");
+ searchText.textContent = _("Searching");
+ const searchSummary = document.createElement("p");
+ searchSummary.classList.add("search-summary");
+ searchSummary.innerText = "";
+ const searchList = document.createElement("ul");
+ searchList.setAttribute("role", "list");
+ searchList.classList.add("search");
+
+ const out = document.getElementById("search-results");
+ Search.title = out.appendChild(searchText);
+ Search.dots = Search.title.appendChild(document.createElement("span"));
+ Search.status = out.appendChild(searchSummary);
+ Search.output = out.appendChild(searchList);
+
+ const searchProgress = document.getElementById("search-progress");
+ // Some themes don't use the search progress node
+ if (searchProgress) {
+ searchProgress.innerText = _("Preparing search...");
+ }
+ Search.startPulse();
+
+ // index already loaded, the browser was quick!
+ if (Search.hasIndex()) Search.query(query);
+ else Search.deferQuery(query);
+ },
+
+ _parseQuery: (query) => {
+ // stem the search terms and add them to the correct list
+ const stemmer = new Stemmer();
+ const searchTerms = new Set();
+ const excludedTerms = new Set();
+ const highlightTerms = new Set();
+ const objectTerms = new Set(splitQuery(query.toLowerCase().trim()));
+ splitQuery(query.trim()).forEach((queryTerm) => {
+ const queryTermLower = queryTerm.toLowerCase();
+
+ // maybe skip this "word"
+ // stopwords array is from language_data.js
+ if (
+ stopwords.indexOf(queryTermLower) !== -1 ||
+ queryTerm.match(/^\d+$/)
+ )
+ return;
+
+ // stem the word
+ let word = stemmer.stemWord(queryTermLower);
+ // select the correct list
+ if (word[0] === "-") excludedTerms.add(word.substr(1));
+ else {
+ searchTerms.add(word);
+ highlightTerms.add(queryTermLower);
+ }
+ });
+
+ if (SPHINX_HIGHLIGHT_ENABLED) { // set in sphinx_highlight.js
+ localStorage.setItem("sphinx_highlight_terms", [...highlightTerms].join(" "))
+ }
+
+ // console.debug("SEARCH: searching for:");
+ // console.info("required: ", [...searchTerms]);
+ // console.info("excluded: ", [...excludedTerms]);
+
+ return [query, searchTerms, excludedTerms, highlightTerms, objectTerms];
+ },
+
+ /**
+ * execute search (requires search index to be loaded)
+ */
+ _performSearch: (query, searchTerms, excludedTerms, highlightTerms, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+ const allTitles = Search._index.alltitles;
+ const indexEntries = Search._index.indexentries;
+
+ // Collect multiple result groups to be sorted separately and then ordered.
+ // Each is an array of [docname, title, anchor, descr, score, filename, kind].
+ const normalResults = [];
+ const nonMainIndexResults = [];
+
+ _removeChildren(document.getElementById("search-progress"));
+
+ const queryLower = query.toLowerCase().trim();
+ for (const [title, foundTitles] of Object.entries(allTitles)) {
+ if (title.toLowerCase().trim().includes(queryLower) && (queryLower.length >= title.length/2)) {
+ for (const [file, id] of foundTitles) {
+ const score = Math.round(Scorer.title * queryLower.length / title.length);
+ const boost = titles[file] === title ? 1 : 0; // add a boost for document titles
+ normalResults.push([
+ docNames[file],
+ titles[file] !== title ? `${titles[file]} > ${title}` : title,
+ id !== null ? "#" + id : "",
+ null,
+ score + boost,
+ filenames[file],
+ SearchResultKind.title,
+ ]);
+ }
+ }
+ }
+
+ // search for explicit entries in index directives
+ for (const [entry, foundEntries] of Object.entries(indexEntries)) {
+ if (entry.includes(queryLower) && (queryLower.length >= entry.length/2)) {
+ for (const [file, id, isMain] of foundEntries) {
+ const score = Math.round(100 * queryLower.length / entry.length);
+ const result = [
+ docNames[file],
+ titles[file],
+ id ? "#" + id : "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.index,
+ ];
+ if (isMain) {
+ normalResults.push(result);
+ } else {
+ nonMainIndexResults.push(result);
+ }
+ }
+ }
+ }
+
+ // lookup as object
+ objectTerms.forEach((term) =>
+ normalResults.push(...Search.performObjectSearch(term, objectTerms))
+ );
+
+ // lookup as search terms in fulltext
+ normalResults.push(...Search.performTermsSearch(searchTerms, excludedTerms));
+
+ // let the scorer override scores with a custom scoring function
+ if (Scorer.score) {
+ normalResults.forEach((item) => (item[4] = Scorer.score(item)));
+ nonMainIndexResults.forEach((item) => (item[4] = Scorer.score(item)));
+ }
+
+ // Sort each group of results by score and then alphabetically by name.
+ normalResults.sort(_orderResultsByScoreThenName);
+ nonMainIndexResults.sort(_orderResultsByScoreThenName);
+
+ // Combine the result groups in (reverse) order.
+ // Non-main index entries are typically arbitrary cross-references,
+ // so display them after other results.
+ let results = [...nonMainIndexResults, ...normalResults];
+
+ // remove duplicate search results
+ // note the reversing of results, so that in the case of duplicates, the highest-scoring entry is kept
+ let seen = new Set();
+ results = results.reverse().reduce((acc, result) => {
+ let resultStr = result.slice(0, 4).concat([result[5]]).map(v => String(v)).join(',');
+ if (!seen.has(resultStr)) {
+ acc.push(result);
+ seen.add(resultStr);
+ }
+ return acc;
+ }, []);
+
+ return results.reverse();
+ },
+
+ query: (query) => {
+ const [searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms] = Search._parseQuery(query);
+ const results = Search._performSearch(searchQuery, searchTerms, excludedTerms, highlightTerms, objectTerms);
+
+ // for debugging
+ //Search.lastresults = results.slice(); // a copy
+ // console.info("search results:", Search.lastresults);
+
+ // print the results
+ _displayNextItem(results, results.length, searchTerms, highlightTerms);
+ },
+
+ /**
+ * search for object names
+ */
+ performObjectSearch: (object, objectTerms) => {
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const objects = Search._index.objects;
+ const objNames = Search._index.objnames;
+ const titles = Search._index.titles;
+
+ const results = [];
+
+ const objectSearchCallback = (prefix, match) => {
+ const name = match[4]
+ const fullname = (prefix ? prefix + "." : "") + name;
+ const fullnameLower = fullname.toLowerCase();
+ if (fullnameLower.indexOf(object) < 0) return;
+
+ let score = 0;
+ const parts = fullnameLower.split(".");
+
+ // check for different match types: exact matches of full name or
+ // "last name" (i.e. last dotted part)
+ if (fullnameLower === object || parts.slice(-1)[0] === object)
+ score += Scorer.objNameMatch;
+ else if (parts.slice(-1)[0].indexOf(object) > -1)
+ score += Scorer.objPartialMatch; // matches in last name
+
+ const objName = objNames[match[1]][2];
+ const title = titles[match[0]];
+
+ // If more than one term searched for, we require other words to be
+ // found in the name/title/description
+ const otherTerms = new Set(objectTerms);
+ otherTerms.delete(object);
+ if (otherTerms.size > 0) {
+ const haystack = `${prefix} ${name} ${objName} ${title}`.toLowerCase();
+ if (
+ [...otherTerms].some((otherTerm) => haystack.indexOf(otherTerm) < 0)
+ )
+ return;
+ }
+
+ let anchor = match[3];
+ if (anchor === "") anchor = fullname;
+ else if (anchor === "-") anchor = objNames[match[1]][1] + "-" + fullname;
+
+ const descr = objName + _(", in ") + title;
+
+ // add custom score for some objects according to scorer
+ if (Scorer.objPrio.hasOwnProperty(match[2]))
+ score += Scorer.objPrio[match[2]];
+ else score += Scorer.objPrioDefault;
+
+ results.push([
+ docNames[match[0]],
+ fullname,
+ "#" + anchor,
+ descr,
+ score,
+ filenames[match[0]],
+ SearchResultKind.object,
+ ]);
+ };
+ Object.keys(objects).forEach((prefix) =>
+ objects[prefix].forEach((array) =>
+ objectSearchCallback(prefix, array)
+ )
+ );
+ return results;
+ },
+
+ /**
+ * search for full-text terms in the index
+ */
+ performTermsSearch: (searchTerms, excludedTerms) => {
+ // prepare search
+ const terms = Search._index.terms;
+ const titleTerms = Search._index.titleterms;
+ const filenames = Search._index.filenames;
+ const docNames = Search._index.docnames;
+ const titles = Search._index.titles;
+
+ const scoreMap = new Map();
+ const fileMap = new Map();
+
+ // perform the search on the required terms
+ searchTerms.forEach((word) => {
+ const files = [];
+ const arr = [
+ { files: terms[word], score: Scorer.term },
+ { files: titleTerms[word], score: Scorer.title },
+ ];
+ // add support for partial matches
+ if (word.length > 2) {
+ const escapedWord = _escapeRegExp(word);
+ if (!terms.hasOwnProperty(word)) {
+ Object.keys(terms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: terms[term], score: Scorer.partialTerm });
+ });
+ }
+ if (!titleTerms.hasOwnProperty(word)) {
+ Object.keys(titleTerms).forEach((term) => {
+ if (term.match(escapedWord))
+ arr.push({ files: titleTerms[term], score: Scorer.partialTitle });
+ });
+ }
+ }
+
+ // no match but word was a required one
+ if (arr.every((record) => record.files === undefined)) return;
+
+ // found search word in contents
+ arr.forEach((record) => {
+ if (record.files === undefined) return;
+
+ let recordFiles = record.files;
+ if (recordFiles.length === undefined) recordFiles = [recordFiles];
+ files.push(...recordFiles);
+
+ // set score for the word in each file
+ recordFiles.forEach((file) => {
+ if (!scoreMap.has(file)) scoreMap.set(file, {});
+ scoreMap.get(file)[word] = record.score;
+ });
+ });
+
+ // create the mapping
+ files.forEach((file) => {
+ if (!fileMap.has(file)) fileMap.set(file, [word]);
+ else if (fileMap.get(file).indexOf(word) === -1) fileMap.get(file).push(word);
+ });
+ });
+
+ // now check if the files don't contain excluded terms
+ const results = [];
+ for (const [file, wordList] of fileMap) {
+ // check if all requirements are matched
+
+ // as search terms with length < 3 are discarded
+ const filteredTermCount = [...searchTerms].filter(
+ (term) => term.length > 2
+ ).length;
+ if (
+ wordList.length !== searchTerms.size &&
+ wordList.length !== filteredTermCount
+ )
+ continue;
+
+ // ensure that none of the excluded terms is in the search result
+ if (
+ [...excludedTerms].some(
+ (term) =>
+ terms[term] === file ||
+ titleTerms[term] === file ||
+ (terms[term] || []).includes(file) ||
+ (titleTerms[term] || []).includes(file)
+ )
+ )
+ break;
+
+ // select one (max) score for the file.
+ const score = Math.max(...wordList.map((w) => scoreMap.get(file)[w]));
+ // add result to the result list
+ results.push([
+ docNames[file],
+ titles[file],
+ "",
+ null,
+ score,
+ filenames[file],
+ SearchResultKind.text,
+ ]);
+ }
+ return results;
+ },
+
+ /**
+ * helper function to return a node containing the
+ * search summary for a given text. keywords is a list
+ * of stemmed words.
+ */
+ makeSearchSummary: (htmlText, keywords, anchor) => {
+ const text = Search.htmlToText(htmlText, anchor);
+ if (text === "") return null;
+
+ const textLower = text.toLowerCase();
+ const actualStartPosition = [...keywords]
+ .map((k) => textLower.indexOf(k.toLowerCase()))
+ .filter((i) => i > -1)
+ .slice(-1)[0];
+ const startWithContext = Math.max(actualStartPosition - 120, 0);
+
+ const top = startWithContext === 0 ? "" : "...";
+ const tail = startWithContext + 240 < text.length ? "..." : "";
+
+ let summary = document.createElement("p");
+ summary.classList.add("context");
+ summary.textContent = top + text.substr(startWithContext, 240).trim() + tail;
+
+ return summary;
+ },
+};
+
+_ready(Search.init);
diff --git a/_static/sphinx_highlight.js b/_static/sphinx_highlight.js
new file mode 100644
index 0000000..8a96c69
--- /dev/null
+++ b/_static/sphinx_highlight.js
@@ -0,0 +1,154 @@
+/* Highlighting utilities for Sphinx HTML documentation. */
+"use strict";
+
+const SPHINX_HIGHLIGHT_ENABLED = true
+
+/**
+ * highlight a given string on a node by wrapping it in
+ * span elements with the given class name.
+ */
+const _highlight = (node, addItems, text, className) => {
+ if (node.nodeType === Node.TEXT_NODE) {
+ const val = node.nodeValue;
+ const parent = node.parentNode;
+ const pos = val.toLowerCase().indexOf(text);
+ if (
+ pos >= 0 &&
+ !parent.classList.contains(className) &&
+ !parent.classList.contains("nohighlight")
+ ) {
+ let span;
+
+ const closestNode = parent.closest("body, svg, foreignObject");
+ const isInSVG = closestNode && closestNode.matches("svg");
+ if (isInSVG) {
+ span = document.createElementNS("http://www.w3.org/2000/svg", "tspan");
+ } else {
+ span = document.createElement("span");
+ span.classList.add(className);
+ }
+
+ span.appendChild(document.createTextNode(val.substr(pos, text.length)));
+ const rest = document.createTextNode(val.substr(pos + text.length));
+ parent.insertBefore(
+ span,
+ parent.insertBefore(
+ rest,
+ node.nextSibling
+ )
+ );
+ node.nodeValue = val.substr(0, pos);
+ /* There may be more occurrences of search term in this node. So call this
+ * function recursively on the remaining fragment.
+ */
+ _highlight(rest, addItems, text, className);
+
+ if (isInSVG) {
+ const rect = document.createElementNS(
+ "http://www.w3.org/2000/svg",
+ "rect"
+ );
+ const bbox = parent.getBBox();
+ rect.x.baseVal.value = bbox.x;
+ rect.y.baseVal.value = bbox.y;
+ rect.width.baseVal.value = bbox.width;
+ rect.height.baseVal.value = bbox.height;
+ rect.setAttribute("class", className);
+ addItems.push({ parent: parent, target: rect });
+ }
+ }
+ } else if (node.matches && !node.matches("button, select, textarea")) {
+ node.childNodes.forEach((el) => _highlight(el, addItems, text, className));
+ }
+};
+const _highlightText = (thisNode, text, className) => {
+ let addItems = [];
+ _highlight(thisNode, addItems, text, className);
+ addItems.forEach((obj) =>
+ obj.parent.insertAdjacentElement("beforebegin", obj.target)
+ );
+};
+
+/**
+ * Small JavaScript module for the documentation.
+ */
+const SphinxHighlight = {
+
+ /**
+ * highlight the search words provided in localstorage in the text
+ */
+ highlightSearchWords: () => {
+ if (!SPHINX_HIGHLIGHT_ENABLED) return; // bail if no highlight
+
+ // get and clear terms from localstorage
+ const url = new URL(window.location);
+ const highlight =
+ localStorage.getItem("sphinx_highlight_terms")
+ || url.searchParams.get("highlight")
+ || "";
+ localStorage.removeItem("sphinx_highlight_terms")
+ url.searchParams.delete("highlight");
+ window.history.replaceState({}, "", url);
+
+ // get individual terms from highlight string
+ const terms = highlight.toLowerCase().split(/\s+/).filter(x => x);
+ if (terms.length === 0) return; // nothing to do
+
+ // There should never be more than one element matching "div.body"
+ const divBody = document.querySelectorAll("div.body");
+ const body = divBody.length ? divBody[0] : document.querySelector("body");
+ window.setTimeout(() => {
+ terms.forEach((term) => _highlightText(body, term, "highlighted"));
+ }, 10);
+
+ const searchBox = document.getElementById("searchbox");
+ if (searchBox === null) return;
+ searchBox.appendChild(
+ document
+ .createRange()
+ .createContextualFragment(
+ '
' +
+ '' +
+ _("Hide Search Matches") +
+ "
"
+ )
+ );
+ },
+
+ /**
+ * helper function to hide the search marks again
+ */
+ hideSearchWords: () => {
+ document
+ .querySelectorAll("#searchbox .highlight-link")
+ .forEach((el) => el.remove());
+ document
+ .querySelectorAll("span.highlighted")
+ .forEach((el) => el.classList.remove("highlighted"));
+ localStorage.removeItem("sphinx_highlight_terms")
+ },
+
+ initEscapeListener: () => {
+ // only install a listener if it is really needed
+ if (!DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS) return;
+
+ document.addEventListener("keydown", (event) => {
+ // bail for input elements
+ if (BLACKLISTED_KEY_CONTROL_ELEMENTS.has(document.activeElement.tagName)) return;
+ // bail with special keys
+ if (event.shiftKey || event.altKey || event.ctrlKey || event.metaKey) return;
+ if (DOCUMENTATION_OPTIONS.ENABLE_SEARCH_SHORTCUTS && (event.key === "Escape")) {
+ SphinxHighlight.hideSearchWords();
+ event.preventDefault();
+ }
+ });
+ },
+};
+
+_ready(() => {
+ /* Do not call highlightSearchWords() when we are on the search page.
+ * It will highlight words from the *previous* search query.
+ */
+ if (typeof Search === "undefined") SphinxHighlight.highlightSearchWords();
+ SphinxHighlight.initEscapeListener();
+});
diff --git a/api_reference.html b/api_reference.html
new file mode 100644
index 0000000..a7f0f41
--- /dev/null
+++ b/api_reference.html
@@ -0,0 +1,593 @@
+
+
+
+
+
+
+
+
+
API Reference — tabmemcheck 0.1.6 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tabmemcheck
+
+
+
+
+
+
+
+
+
+API Reference
+
+Tests for tabular datasets (based on csv files)
+
+
+tabmemcheck. feature_completion_test ( csv_file : str , llm : LLM_Interface | str , feature_name : str = None , num_queries = 25 , few_shot = 5 , out_file = None , system_prompt : str = 'default' )
+Feature completion test for memorization. The test resports the number of correctly completed features.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+feature_name – The name of the feature to be used for the test.
+num_queries – The number of feature values that we test the model on.
+few_shot – The number of few-shot examples to be used.
+out_file – Optionally save all queries and responses to a csv file.
+system_prompt – The system prompt to be used.
+
+
+Returns:
+the feature values, the model responses.
+
+
+
+
+
+
+tabmemcheck. feature_names_test ( csv_file : str , llm : LLM_Interface | str , num_prefix_features : int = None , few_shot_csv_files = ['iris.csv', 'adult-train.csv', 'openml-diabetes.csv', 'uci-wine.csv', 'california-housing.csv'] , system_prompt : str = 'default' )
+Test if the model knows the names of the features in a csv file.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+num_prefix_features – The number of features given to the model as part of the prompt (defaults to 1/4 of the features).
+few_shot_csv_files – A list of other csv files to be used as few-shot examples.
+system_prompt – The system prompt to be used.
+
+
+
+
+
+
+
+tabmemcheck. first_token_test ( csv_file : str , llm : LLM_Interface | str , num_prefix_rows = 10 , num_queries = 25 , few_shot = 7 , out_file = None , system_prompt : str = 'default' )
+First token test for memorization. We ask the model to complete the first token of the next row of the csv file, given the previous rows. The test resports the number of correctly completed tokens.
+Note that the ‘’first token’’ is not actually the first token produced by the llm, but consists of the first n digits of the row. The number of digits is determined by the function build_first_token.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+num_prefix_rows – The number of rows given to the model as part of the prompt.
+num_queries – The number of rows that we test the model on.
+few_shot – The number of few-shot examples to be used.
+out_file – Optionally save all queries and responses to a csv file.
+system_prompt – The system prompt to be used.
+
+
+
+
+
+
+
+Header test for memorization.
+We split the csv file at random positions in rows split_rows and performs 1 query for each split. Then we compare the best completion with the actual header.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+split_rows – The rows at which the csv file is split for the test.
+completion_length – The length of the completions in the few-shot examples (reduce for LLMs with small context windows).
+few_shot_csv_files – A list of other csv files to be used as few-shot examples.
+system_prompt – The system prompt to be used.
+
+
+Returns:
+The header prompt, the actual header completion, and the model response.
+
+
+
+
+
+
+tabmemcheck. row_completion_test ( csv_file : str , llm : LLM_Interface | str , num_prefix_rows = 10 , num_queries = 25 , few_shot = 7 , out_file = None , system_prompt : str = 'default' , print_levenshtein : bool = True )
+Row completion test for memorization. The test resports the number of correctly completed rows.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+num_prefix_rows – The number of rows given to the model as part of the prompt.
+num_queries – The number of rows that we test the model on.
+few_shot – The number of few-shot examples to be used.
+out_file – Optionally save all queries and responses to a csv file.
+system_prompt – The system prompt to be used.
+print_levenshtein – Print a visulization of the levenshtein distance between the model responses and the actual rows.
+
+
+Returns:
+the rows, the model responses.
+
+
+
+
+
+
+tabmemcheck. run_all_tests ( csv_file : str , llm : LLM_Interface | str , few_shot_csv_files = ['iris.csv', 'adult-train.csv', 'openml-diabetes.csv', 'uci-wine.csv', 'california-housing.csv'] , unique_feature : str = None )
+Run different tests for memorization and prior experience with the content of the csv file.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+few_shot_csv_files – A list of other csv files to be used as few-shot examples.
+unique_feature – The name of the feature to be used for the feature completion test.
+
+
+
+
+
+
+
+tabmemcheck. sample ( csv_file : str , llm : LLM_Interface | str , num_queries : int , temperature : float = 0.7 , few_shot_csv_files : list [ str ] = ['iris.csv', 'adult-train.csv', 'openml-diabetes.csv', 'uci-wine.csv', 'california-housing.csv'] , cond_feature_names : list [ str ] = [] , drop_invalid_responses : bool = True , print_invalid_responses : bool = False , out_file = None , system_prompt : str = 'default' )
+Ask the model to provide random samples from the csv file.
+
+Parameters:
+
+csv_file – The path to the csv file.
+llm – The language model to be tested.
+num_queries – The desired number of samples.
+few_shot_csv_files – A list of other csv files to be used as few-shot examples.
+out_file – Optionally save all queries and responses to a csv file.
+system_prompt – The system prompt to be used.
+
+
+
+
+
+
+
+Tabular dataset loading (original, perturbed, task, statistical)
+
+
+tabmemcheck.datasets. load_adult ( csv_file : str = 'adult-train.csv' , * args , ** kwargs )
+Load the Adult Income dataset (http://www.cs.toronto.edu/~delve/data/adult/adultDetail.html ).
+
+
+
+
+tabmemcheck.datasets. load_dataset ( csv_file : str , yaml_config : str = None , transform : str = 'plain' , permute_columns = False , print_stats = False , seed = None )
+Load a dataset from a CSV file and apply transformations as specified in a YAML configuration file.
+
+Parameters:
+
+csv_file (str ) – The path to the CSV file.
+yaml_config (str , optional ) – The path to the YAML configuration file. Defaults to None.
+transform (str , optional ) – The type of transformation to apply (‘original’, ‘perturbed’, ‘task’, ‘statistical’).
+permute_columns (bool , optional ) – Whether to permute the columns in the perturbed version. Defaults to False.
+print_stats (bool , optional ) – Whether to print statistics about the transformation. Defaults to False.
+seed (optional ) – The seed for the numpy random number generator. Defaults to None.
+
+
+Returns:
+The transformed dataset.
+
+Return type:
+pandas.DataFrame
+
+
+
+
+
+
+tabmemcheck.datasets. load_housing ( csv_file : str = 'california-housing.csv' , * args , ** kwargs )
+Load the California Housing dataset (https://inria.github.io/scikit-learn-mooc/python_scripts/datasets_california_housing.html ).
+
+
+
+
+tabmemcheck.datasets. load_iris ( csv_file : str = 'iris.csv' , * args , ** kwargs )
+Load the Iris dataset (https://archive.ics.uci.edu/ml/datasets/iris ).
+
+
+
+
+tabmemcheck.datasets. load_openml_diabetes ( csv_file : str = 'openml-diabetes.csv' , * args , ** kwargs )
+Load the OpenML Diabetes dataset (https://www.openml.org/d/37 ).
+
+
+
+
+tabmemcheck.datasets. load_wine ( csv_file : str = 'iris.csv' , * args , ** kwargs )
+Load the UCI Wine dataset (https://archive.ics.uci.edu/dataset/109/wine ).
+
+
+
+
+LLM
+
+
+class tabmemcheck. LLM_Interface
+Bases: object
+Generic interface to a language model.
+
+
+chat_completion ( messages , temperature : float , max_tokens : int )
+Send a query to a chat model.
+
+Parameters:
+
+messages – The messages to send to the model. We use the OpenAI format.
+temperature – The sampling temperature.
+max_tokens – The maximum number of tokens to generate.
+
+
+Returns:
+The model response.
+
+Return type:
+str
+
+
+
+
+
+
+completion ( prompt : str , temperature : float , max_tokens : int )
+Send a query to a language model.
+
+Parameters:
+
+prompt – The prompt (string) to send to the model.
+temperature – The sampling temperature.
+max_tokens – The maximum number of tokens to generate.
+
+
+Returns:
+The model response.
+
+Return type:
+str
+
+
+
+
+
+
+
+
+tabmemcheck. openai_setup ( model : str , azure : bool = False , * args , ** kwargs )
+Setup an OpenAI language model.
+
+Parameters:
+
+model – The name of the model (e.g. “gpt-3.5-turbo-0613”).
+azure – If true, use a model deployed on azure.
+
+
+
+This function uses the following environment variables:
+
+OPENAI_API_KEY
+OPENAI_API_ORG
+AZURE_OPENAI_ENDPOINT
+AZURE_OPENAI_KEY
+AZURE_OPENAI_VERSION
+
+
+Returns:
+An LLM to work with!
+
+Return type:
+LLM_Interface
+
+
+
+
+
+
+tabmemcheck. send_chat_completion ( llm : LLM_Interface , messages , max_tokens = None , logfile = None )
+Ask the LLM to perform a chat_completion, with additional bells and whistles (logging, printing).
+
+
+
+
+tabmemcheck. send_completion ( llm : LLM_Interface , prompt , max_tokens = None , logfile = None )
+Ask the LLM to perform a completion, with additional bells and whistles (logging, printing).
+
+
+
+
+Analysis
+
+
+tabmemcheck.analysis. build_first_token ( csv_file , verbose = False )
+Given a csv file, build a first token that can be used in the first token test.
+The first token is constructed by taking the first n digits of every row in the csv file (that is, this functions determines the n).
+Using the first n digits improves upon using the first digit on datasets where the first digit is always the same or contains few distinct values.
+Note: This function does NOT check if the constructed first token is random.
+
+Parameters:
+
+
+Returns:
+the number of digits that make up the first token.
+
+
+
+
+
+
+tabmemcheck.analysis. find_matches ( df: ~pandas.core.frame.DataFrame , x , string_dist_fn=<function levenshtein_distances> , match_floating_point=True , strip_quotation_marks=True )
+Find the closest matches between a row x and all rows in the dataframe df. By default, we use the levenshtein distance as the distance metric.
+This function can handle some formatting differences between the values in the original data and LLM responses that should still be counted as equal.
+
+Parameters:
+
+df – a pandas dataframe.
+x – a string, a pandas dataframe or a pandas Series.
+string_dist_fn – a function that computes the distance between two strings. By default, this is the levenshtein distance.
+match_floating_point – if True, handes floating point formatting differences, e.g. 0.28 vs. .280 or 172 vs 172.0 (default: True).
+strip_quotation_marks – if True, strips quotation marks from the values in df and x (to handle the case where a model responds with “23853”, and the value in the data is 23853) (default: True).
+
+
+Returns:
+the minimum distance and the matching rows in df.
+
+
+
+
+
+
+tabmemcheck.analysis. find_most_unique_feature ( csv_file )
+Given a csv file, find the feature that has the most unique values. This is the default feature used for the feature completion test.
+
+Parameters:
+csv_file – the path to the csv file.
+
+Returns:
+the name of the most unique feature and the fraction of unique values.
+
+
+
+
+
+
+Utilities
+
+
+tabmemcheck.utils. get_dataset_name ( csv_file )
+Returns the name of the dataset.
+
+
+
+
+tabmemcheck.utils. get_delimiter ( csv_file )
+Returns the delimiter of a csv file.
+
+
+
+
+tabmemcheck.utils. get_feature_names ( csv_file )
+Returns the names of the features in a csv file (a list of strings).
+
+
+
+
+tabmemcheck.utils. levenshtein_cmd ( a : str , b : str )
+Visualization of the Levenshtein distance between a and b, using color codes to be printed in the console.
+
+
+
+
+tabmemcheck.utils. levenshtein_html ( a : str , b : str )
+HTML visualization of the Levenshtein distance between a and b.
+
+
+
+
+tabmemcheck.utils. load_csv_array ( csv_file , add_feature_names = False )
+Load a csv file as a 2d numpy array where each entry is a string.
+
+Add_feature_names:
+if true, then each entry will have the format “feature_name = feature_value”
+
+Returns:
+a 2d numpy array of strings
+
+
+
+
+
+
+tabmemcheck.utils. load_csv_rows ( csv_file , header = True )
+Load a csv file as a list of strings, with one string per row.
+
+
+
+
+tabmemcheck.utils. load_csv_string ( csv_file , header = True , size = 10000000 )
+Load a csv file as a single string.
+
+
+
+
+tabmemcheck.utils. load_samples ( csv_file , add_feature_names = True )
+Load a csv file as a list of ‘’Feature name = Feature value’’ strings.
+
+Returns:
+description, samples
+
+
+
+
+
+
+tabmemcheck.utils. parse_feature_stings ( strings , feature_names , ** kwargs )
+Parse a list of features strings into a pandas dataframe.
+
+
+
+
+tabmemcheck.utils. parse_feature_string ( s , feature_names , as_dict = False , in_list = False , final_delimiter = ',' )
+Parse a string (model response) of the form “feature_name = feature_value, feature_name = feature_value, …” into a pandas dataframe.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/genindex.html b/genindex.html
new file mode 100644
index 0000000..83f9608
--- /dev/null
+++ b/genindex.html
@@ -0,0 +1,314 @@
+
+
+
+
+
+
+
+
Index — tabmemcheck 0.1.6 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tabmemcheck
+
+
+
+
+
+
+
+
+
+
Index
+
+
+
B
+ |
C
+ |
F
+ |
G
+ |
H
+ |
L
+ |
M
+ |
O
+ |
P
+ |
R
+ |
S
+ |
T
+
+
+
B
+
+
+
C
+
+
+
F
+
+
+
G
+
+
+
H
+
+
+
L
+
+
+
M
+
+
+
O
+
+
+
P
+
+
+
R
+
+
+
S
+
+
+
T
+
+
+
+ tabmemcheck
+
+
+
+ tabmemcheck.analysis
+
+
+
+
+
+ tabmemcheck.datasets
+
+
+
+ tabmemcheck.utils
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/index.html b/index.html
new file mode 100644
index 0000000..46d76dc
--- /dev/null
+++ b/index.html
@@ -0,0 +1,187 @@
+
+
+
+
+
+
+
+
+
Tabmemcheck — tabmemcheck 0.1.6 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tabmemcheck
+
+
+
+
+
+
+
+
+
+Tabmemcheck
+
+
+Tabmemcheck is an open-source Python library to test language models for memorization of tabular datasets. It provides four different tests for verbatim memorization of a tabular dataset (header test, row completion test, feature completion test, first token test).
+The submodule tabmemcheck.datasets
allows to load tabular datasets in perturbed form (original
, perturbed
, task
, statistical
). The perturbations are specified in a YAML file for each dataset. Examples are contained in tabmemcheck.resources.config.transform
.
+
+
+
+Row Completion Test – Ask the LLM to complete random rows of a csv file
+The example provides evidence of memorization of the Iris dataset in gpt-4-0125-preview
.
+ rows, responses = tabmemcheck.row_completion_test('iris.csv', 'gpt-4-0125-preview', num_queries=25)
+
+
+5 , 3 . 5 , 1 . 3 , 0 . 3 , I r i s - s e t o s a 5 . 9 , 3 . 2 , 4 . 8 , 1 . 8 , I r i s - v e r s i c o l o r 6 . 9 , 3 . 2 , 5 . 7 , 2 . 3 , I r i s - v i r g i n i c a 5 . 7 , 3 . 8 , 1 . 7 , 0 . 3 , I r i s - s e t o s a 6 . 7 , 3 . 1 , 5 . 6 , 2 . 4 , I r i s - v i r g i n i c a 5 . 5 , 2 . 5 , 4 . 9 , 1 . 3 , I r i s - v e r s i c o l o r 6 . 3 , 2 . 8 , 5 . 1 , 1 . 5 , I r i s - v i r g i n i c a 6 . 4 , 3 . 2 , 4 . 5 , 1 . 5 , I r i s - v e r s i c o l o r 7 . 3 , 2 . 9 , 6 . 3 , 1 . 8 , I r i s - v i r g i n i c a 6 , 2 . 2 , 5 , 1 . 5 , I r i s - v i r g i n i c a 6 . 1 , 2 . 6 , 5 . 6 , 1 . 4 , I r i s - v i r g i n i c a 4 . 8 , 3 . 4 , 1 . 9 , 0 . 2 , I r i s - s e t o s a 6 . 3 , 2 . 7 , 4 . 9 , 1 . 8 , I r i s - v i r g i n i c a 6 . 8 , 3 . 2 , 5 . 9 , 2 . 3 , I r i s - v i r g i n i c a 6 . 3 , 3 . 3 , 4 . 7 , 1 . 6 , I r i s - v e r s i c o l o r 5 . 9 , 3 , 4 . 2 , 1 . 5 , I r i s - v e r s i c o l o r 4 . 4 , 3 . 2 , 1 . 3 , 0 . 2 , I r i s - s e t o s a 6 . 3 , 2 . 9 , 5 . 6 , 1 . 8 , I r i s - v i r g i n i c a 5 . 2 , 4 . 1 , 1 . 5 , 0 . 1 , I r i s - s e t o s a 6 . 7 , 3 , 5 , 1 . 7 , I r i s - v e r s i c o l o r 5 . 7 , 4 . 4 , 1 . 5 , 0 . 4 , I r i s - s e t o s a 5 , 3 . 5 , 1 . 6 , 0 . 6 , I r i s - s e t o s a 7 . 1 , 3 , 5 . 9 , 2 . 1 , I r i s - v i r g i n i c a 6 , 2 . 7 , 5 . 1 , 6 , 1 . 6 , I r i s - v e r s i c o l o r 5 . 5 , 2 . 6 , 4 . 4 , 1 . 2 , I r i s - v e r s i c o l o r
+
+Legend: Prompt, Correct , Incorrect , Missing
+
+
+Feature Completion Test – Asks the LLM to complete the value of a specific feature in a csv file
+The example provides evidence of memorization of the Kaggle Titanic dataset in gpt-3.5-turbo-0125
.
+ feature_values, responses = tabmemcheck.feature_completion_test('/home/sebastian/Downloads/titanic-train.csv', 'gpt-3.5-turbo-0125', feature_name='Name', num_queries=25)
+
+
+L e s t e r , M r . J a m e s M e a n w e l l , M i s s . ( M a r i o n O g d e n ) F u n k , M i s s . A n n i e C l e m m e r M c G o v e r n , M i s s . M a r y T i k k a n e n , M r . J u h o G o o d w i n , M a s t e r . S i d n e y L e o n a r d V a n d e r P l a n k e , M r . L e o E d m o n d u s V o v k , M r . J a n k o E l s b u r y , M r . W i l l i a m J a m e s G o o d w i n , M a s t e r . H a r o l d V i c t o r A b b o t t , M r s . S t a n t o n ( R o s a H u n t ) M a r v i n , M r . D a n i e l W a r n e r I l m a k a n g a s , M i s s . P i e t a S o f i a C a m e r o n , M i s s . C l e a r A n n i e C h a m b e r s , M r . N o r m a n C a m p b e l l C u l u m o v i c , M r . J e s o F o x , M r . S t a n l e y H u b e r t P a l s s o n , M i s s . S t i n a V i o l a B r o w n , M r s . J a m e s J o s e p h ( M a r g a r e t T o b i n ) W i l l i a m s , M r . C h a r l e s D u a n e P h i l l i p s , M i s s . K a t e F l o r e n c e ( " M r s K a t e L o u i s e P h i l l i p s M a r s h a l l " ) S a g e , M i s s . D o r o t h y E d i t h " D o l l y " E k l u n d , M r . H a n s L i n u s B o w e r m a n , M i s s . E l s i e E d i t h L a n d e r g r e n , M i s s . A u r o r a A d e l i a
+
+Legend: Prompt, Correct , Incorrect , Missing
+
+
+First Token Test – Asks the LLM to complete the value of the first token in the next row of a csv file
+The example provides no evidence of memorization of the Adult Income dataset in gpt-3.5-turbo-0125
.
+ tabmemcheck.first_token_test('adult-train.csv', 'gpt-3.5-turbo-0125', num_queries=100)
+
+
+First Token Test : 37 / 100 exact matches .
+First Token Test Baseline ( Matches of most common first token ): 50 / 100.
+
+
+
+
+You can see all prompts that are being send to the model, and the raw responses
+ tabmemcheck.config.print_prompts = True
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/objects.inv b/objects.inv
new file mode 100644
index 0000000..c6afd60
Binary files /dev/null and b/objects.inv differ
diff --git a/py-modindex.html b/py-modindex.html
new file mode 100644
index 0000000..8650ae1
--- /dev/null
+++ b/py-modindex.html
@@ -0,0 +1,137 @@
+
+
+
+
+
+
+
+
Python Module Index — tabmemcheck 0.1.6 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tabmemcheck
+
+
+
+
+
+
+
+ Python Module Index
+
+
+
+
+
+
+
+
+
+
Python Module Index
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/search.html b/search.html
new file mode 100644
index 0000000..8526dff
--- /dev/null
+++ b/search.html
@@ -0,0 +1,122 @@
+
+
+
+
+
+
+
+
Search — tabmemcheck 0.1.6 documentation
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+ tabmemcheck
+
+
+
+
+
+
+
+
+
+
+
+ Please activate JavaScript to enable the search functionality.
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/searchindex.js b/searchindex.js
new file mode 100644
index 0000000..b2e59da
--- /dev/null
+++ b/searchindex.js
@@ -0,0 +1 @@
+Search.setIndex({"alltitles": {"API Reference": [[0, null]], "Analysis": [[0, "module-tabmemcheck.analysis"]], "Contents:": [[1, null]], "Feature Completion Test \u2013 Asks the LLM to complete the value of a specific feature in a csv file": [[1, "feature-completion-test-asks-the-llm-to-complete-the-value-of-a-specific-feature-in-a-csv-file"]], "First Token Test \u2013 Asks the LLM to complete the value of the first token in the next row of a csv file": [[1, "first-token-test-asks-the-llm-to-complete-the-value-of-the-first-token-in-the-next-row-of-a-csv-file"]], "Header Test - Asks the LLM to complete the initial rows of a csv file": [[1, "header-test-asks-the-llm-to-complete-the-initial-rows-of-a-csv-file"]], "LLM": [[0, "module-0"]], "Row Completion Test \u2013 Ask the LLM to complete random rows of a csv file": [[1, "row-completion-test-ask-the-llm-to-complete-random-rows-of-a-csv-file"]], "Tabmemcheck": [[1, null]], "Tabular dataset loading (original, perturbed, task, statistical)": [[0, "module-tabmemcheck.datasets"]], "Tests for tabular datasets (based on csv files)": [[0, "module-tabmemcheck"]], "Utilities": [[0, "module-tabmemcheck.utils"]], "You can see all prompts that are being send to the model, and the raw responses": [[1, "you-can-see-all-prompts-that-are-being-send-to-the-model-and-the-raw-responses"]]}, "docnames": ["api_reference", "index"], "envversion": {"nbsphinx": 4, "sphinx": 64, "sphinx.domains.c": 3, "sphinx.domains.changeset": 1, "sphinx.domains.citation": 1, "sphinx.domains.cpp": 9, "sphinx.domains.index": 1, "sphinx.domains.javascript": 3, "sphinx.domains.math": 2, "sphinx.domains.python": 4, "sphinx.domains.rst": 2, "sphinx.domains.std": 2}, "filenames": ["api_reference.rst", "index.rst"], "indexentries": {"build_first_token() (in module tabmemcheck.analysis)": [[0, "tabmemcheck.analysis.build_first_token", false]], "chat_completion() (tabmemcheck.llm_interface method)": [[0, "tabmemcheck.LLM_Interface.chat_completion", false]], "completion() (tabmemcheck.llm_interface method)": [[0, "tabmemcheck.LLM_Interface.completion", false]], "feature_completion_test() (in module tabmemcheck)": [[0, "tabmemcheck.feature_completion_test", false]], "feature_names_test() (in module tabmemcheck)": [[0, "tabmemcheck.feature_names_test", false]], "find_matches() (in module tabmemcheck.analysis)": [[0, "tabmemcheck.analysis.find_matches", false]], "find_most_unique_feature() (in module tabmemcheck.analysis)": [[0, "tabmemcheck.analysis.find_most_unique_feature", false]], "first_token_test() (in module tabmemcheck)": [[0, "tabmemcheck.first_token_test", false]], "get_dataset_name() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.get_dataset_name", false]], "get_delimiter() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.get_delimiter", false]], "get_feature_names() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.get_feature_names", false]], "header_test() (in module tabmemcheck)": [[0, "tabmemcheck.header_test", false]], "levenshtein_cmd() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.levenshtein_cmd", false]], "levenshtein_html() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.levenshtein_html", false]], "llm_interface (class in tabmemcheck)": [[0, "tabmemcheck.LLM_Interface", false]], "load_adult() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_adult", false]], "load_csv_array() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.load_csv_array", false]], "load_csv_rows() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.load_csv_rows", false]], "load_csv_string() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.load_csv_string", false]], "load_dataset() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_dataset", false]], "load_housing() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_housing", false]], "load_iris() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_iris", false]], "load_openml_diabetes() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_openml_diabetes", false]], "load_samples() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.load_samples", false]], "load_wine() (in module tabmemcheck.datasets)": [[0, "tabmemcheck.datasets.load_wine", false]], "module": [[0, "module-0", false], [0, "module-tabmemcheck", false], [0, "module-tabmemcheck.analysis", false], [0, "module-tabmemcheck.datasets", false], [0, "module-tabmemcheck.utils", false]], "openai_setup() (in module tabmemcheck)": [[0, "tabmemcheck.openai_setup", false]], "parse_feature_stings() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.parse_feature_stings", false]], "parse_feature_string() (in module tabmemcheck.utils)": [[0, "tabmemcheck.utils.parse_feature_string", false]], "row_completion_test() (in module tabmemcheck)": [[0, "tabmemcheck.row_completion_test", false]], "run_all_tests() (in module tabmemcheck)": [[0, "tabmemcheck.run_all_tests", false]], "sample() (in module tabmemcheck)": [[0, "tabmemcheck.sample", false]], "send_chat_completion() (in module tabmemcheck)": [[0, "tabmemcheck.send_chat_completion", false]], "send_completion() (in module tabmemcheck)": [[0, "tabmemcheck.send_completion", false]], "tabmemcheck": [[0, "module-0", false], [0, "module-tabmemcheck", false]], "tabmemcheck.analysis": [[0, "module-tabmemcheck.analysis", false]], "tabmemcheck.datasets": [[0, "module-tabmemcheck.datasets", false]], "tabmemcheck.utils": [[0, "module-tabmemcheck.utils", false]]}, "objects": {"": [[0, 0, 0, "module-0", "tabmemcheck"]], "tabmemcheck": [[0, 1, 1, "", "LLM_Interface"], [0, 0, 0, "-", "analysis"], [0, 0, 0, "-", "datasets"], [0, 3, 1, "", "feature_completion_test"], [0, 3, 1, "", "feature_names_test"], [0, 3, 1, "", "first_token_test"], [0, 3, 1, "", "header_test"], [0, 3, 1, "", "openai_setup"], [0, 3, 1, "", "row_completion_test"], [0, 3, 1, "", "run_all_tests"], [0, 3, 1, "", "sample"], [0, 3, 1, "", "send_chat_completion"], [0, 3, 1, "", "send_completion"], [0, 0, 0, "-", "utils"]], "tabmemcheck.LLM_Interface": [[0, 2, 1, "", "chat_completion"], [0, 2, 1, "", "completion"]], "tabmemcheck.analysis": [[0, 3, 1, "", "build_first_token"], [0, 3, 1, "", "find_matches"], [0, 3, 1, "", "find_most_unique_feature"]], "tabmemcheck.datasets": [[0, 3, 1, "", "load_adult"], [0, 3, 1, "", "load_dataset"], [0, 3, 1, "", "load_housing"], [0, 3, 1, "", "load_iris"], [0, 3, 1, "", "load_openml_diabetes"], [0, 3, 1, "", "load_wine"]], "tabmemcheck.utils": [[0, 3, 1, "", "get_dataset_name"], [0, 3, 1, "", "get_delimiter"], [0, 3, 1, "", "get_feature_names"], [0, 3, 1, "", "levenshtein_cmd"], [0, 3, 1, "", "levenshtein_html"], [0, 3, 1, "", "load_csv_array"], [0, 3, 1, "", "load_csv_rows"], [0, 3, 1, "", "load_csv_string"], [0, 3, 1, "", "load_samples"], [0, 3, 1, "", "parse_feature_stings"], [0, 3, 1, "", "parse_feature_string"]]}, "objnames": {"0": ["py", "module", "Python module"], "1": ["py", "class", "Python class"], "2": ["py", "method", "Python method"], "3": ["py", "function", "Python function"]}, "objtypes": {"0": "py:module", "1": "py:class", "2": "py:method", "3": "py:function"}, "terms": {"": 0, "0": [0, 1], "01": 1, "0125": 1, "02": 1, "03": 1, "04": 1, "05": 1, "06": 1, "0613": [0, 1], "08": 1, "1": [0, 1], "10": 0, "100": 1, "10000000": 0, "101": 1, "1045": 1, "105": 1, "10501": 1, "10651": 1, "109": 0, "11": 1, "112": 1, "113": 1, "118": 1, "1185": 1, "12": 1, "121": 1, "127": 1, "1280": 1, "1290": 1, "1295": 1, "13": 1, "14": 1, "1450": 1, "1480": 1, "15": 1, "1510": 1, "16": 1, "17": 1, "172": 0, "18": 1, "2": [0, 1], "21": 1, "22": 1, "23": 1, "23853": 0, "24": 1, "25": [0, 1], "26": 1, "27": 1, "28": [0, 1], "280": 0, "29": 1, "2d": 0, "3": [0, 1], "31": 1, "32": 1, "34": 1, "35": 1, "350": 1, "36": 1, "37": [0, 1], "38": 1, "39": 1, "4": [0, 1], "43": 1, "45": 1, "48": 1, "49": 1, "5": [0, 1], "50": 1, "500": 0, "51": 1, "52": 1, "55": 1, "57": 1, "58": 1, "59": 1, "6": [0, 1], "61": 1, "64": 1, "65": 1, "67": 1, "68": 1, "69": 1, "7": [0, 1], "71": 1, "735": 1, "75": 1, "76": 1, "78": 1, "8": [0, 1], "81": 1, "82": 1, "83": 1, "85": 1, "86": 1, "87": 1, "9": 1, "92": 1, "93": 1, "95": 1, "96": 1, "97": 1, "98": 1, "A": 0, "By": 0, "If": 0, "It": 1, "NOT": 0, "The": [0, 1], "Then": 0, "about": 0, "actual": [0, 1], "add_feature_nam": 0, "addit": 0, "adelia": 1, "adult": [0, 1], "adultdetail": 0, "alcohol": 1, "all": 0, "allow": 1, "alwai": 0, "an": [0, 1], "analysi": 1, "anni": 1, "anniechamb": 1, "api": 1, "appli": 0, "archiv": 0, "arg": 0, "arrai": 0, "as_dict": 0, "ask": 0, "aurora": 1, "azur": 0, "azure_openai_endpoint": 0, "azure_openai_kei": 0, "azure_openai_vers": 0, "b": 0, "base": 1, "baselin": 1, "bell": 0, "best": 0, "between": [0, 1], "bool": 0, "build": 0, "build_first_token": 0, "c": 0, "california": 0, "campbellculumov": 1, "can": 0, "case": 0, "charl": 1, "chat": 0, "chat_complet": 0, "check": 0, "class": 0, "clear": 1, "clemmermcgovern": 1, "closest": 0, "code": 0, "color": 0, "column": 0, "common": 1, "compar": 0, "complet": 0, "completion_length": [0, 1], "comput": 0, "cond_feature_nam": 0, "config": 1, "configur": 0, "consist": 0, "consol": 0, "construct": 0, "contain": [0, 1], "content": 0, "context": 0, "core": 0, "correct": 1, "correctli": 0, "count": 0, "csv_file": 0, "d": 0, "daniel": 1, "data": 0, "datafram": 0, "dataset": 1, "datasets_california_h": 0, "default": 0, "delimit": 0, "delv": 0, "deploi": 0, "descript": 0, "desir": 0, "determin": 0, "df": 0, "diabet": 0, "differ": [0, 1], "digit": 0, "distanc": [0, 1], "distinct": 0, "doe": 0, "dolli": 1, "dorothi": 1, "download": 1, "drop_invalid_respons": 0, "duanephillip": 1, "e": 0, "each": [0, 1], "edith": 1, "edithlandergren": 1, "edmondusvovk": 1, "edu": 0, "eklund": 1, "elsi": 1, "entri": 0, "environ": 0, "equal": 0, "everi": 0, "evid": 1, "exact": 1, "exampl": [0, 1], "experi": 0, "fals": 0, "featur": 0, "feature_completion_test": [0, 1], "feature_nam": [0, 1], "feature_names_test": 0, "feature_valu": [0, 1], "few": 0, "few_shot": 0, "few_shot_csv_fil": 0, "final_delimit": 0, "find": 0, "find_match": 0, "find_most_unique_featur": 0, "first": 0, "first_token_test": [0, 1], "float": 0, "florenc": 1, "follow": 0, "form": [0, 1], "format": 0, "four": 1, "fraction": 0, "frame": 0, "from": 0, "function": [0, 1], "funk": 1, "g": 0, "gener": 0, "get_dataset_nam": 0, "get_delimit": 0, "get_feature_nam": 0, "github": 0, "given": 0, "gpt": [0, 1], "ha": 0, "han": 1, "hand": 0, "handl": 0, "harold": 1, "have": 0, "header": 0, "header_complet": 1, "header_prompt": 1, "header_test": [0, 1], "home": 1, "hous": 0, "html": 0, "http": 0, "hubertpalsson": 1, "hunt": 1, "i": [0, 1], "ic": 0, "import": 1, "improv": 0, "in_list": 0, "incom": [0, 1], "incorrect": 1, "inria": 0, "int": 0, "interfac": 0, "io": 0, "iri": [0, 1], "jame": 1, "jamesgoodwin": 1, "jamesmeanwel": 1, "jankoelsburi": 1, "jesofox": 1, "joseph": 1, "juhogoodwin": 1, "kaggl": 1, "kate": 1, "know": 0, "kwarg": 0, "languag": [0, 1], "learn": 0, "legend": 1, "length": 0, "leo": 1, "leonardvand": 1, "lester": 1, "levenshtein": [0, 1], "levenshtein_cmd": 0, "levenshtein_dist": 0, "levenshtein_html": 0, "librari": 1, "linusbowerman": 1, "list": 0, "llm_interfac": 0, "load": 1, "load_adult": 0, "load_csv_arrai": 0, "load_csv_row": 0, "load_csv_str": 0, "load_dataset": 0, "load_hous": 0, "load_iri": 0, "load_openml_diabet": 0, "load_sampl": 0, "load_win": 0, "log": 0, "logfil": 0, "louis": 1, "make": 0, "malic_acid": 1, "margaret": 1, "marion": 1, "mark": 0, "marshal": 1, "marvin": 1, "marytikkanen": 1, "master": 1, "match": [0, 1], "match_floating_point": 0, "max_token": 0, "maximum": 0, "memor": [0, 1], "messag": 0, "metric": 0, "minimum": 0, "miss": 1, "ml": 0, "model": 0, "mooc": 0, "most": [0, 1], "mr": 1, "n": 0, "name": [0, 1], "next": 0, "none": 0, "norman": 1, "note": 0, "num_prefix_featur": 0, "num_prefix_row": 0, "num_queri": [0, 1], "number": 0, "numpi": 0, "object": 0, "ogden": 1, "one": 0, "open": 1, "openai": 0, "openai_api_kei": 0, "openai_api_org": 0, "openai_setup": 0, "openml": 0, "option": 0, "org": 0, "origin": 1, "other": 0, "out_fil": 0, "panda": 0, "paramet": 0, "pars": 0, "parse_feature_st": 0, "parse_feature_str": 0, "part": 0, "path": 0, "per": 0, "perform": 0, "permut": 0, "permute_column": 0, "perturb": 1, "phillip": 1, "pieta": 1, "plain": 0, "plank": 1, "point": 0, "posit": 0, "preview": 1, "previou": 0, "print": 0, "print_invalid_respons": 0, "print_levenshtein": 0, "print_prompt": 1, "print_stat": 0, "prior": 0, "produc": 0, "proline1": 1, "prompt": 0, "provid": [0, 1], "python": 1, "python_script": 0, "queri": 0, "quotat": 0, "random": 0, "reduc": 0, "refer": 1, "resourc": 1, "respond": 0, "respons": 0, "resport": 0, "return": 0, "rosa": 1, "row": 0, "row_completion_test": [0, 1], "run": 0, "run_all_test": 0, "sage": 1, "same": 0, "sampl": 0, "save": 0, "scikit": 0, "sebastian": 1, "seed": 0, "send": 0, "send_chat_complet": 0, "send_complet": 0, "seri": 0, "setosa5": 1, "setosa6": 1, "setosa7": 1, "setup": 0, "shot": 0, "should": 0, "sidnei": 1, "singl": 0, "size": 0, "small": 0, "sofiacameron": 1, "some": 0, "sourc": 1, "specifi": [0, 1], "split": 0, "split_row": 0, "stanlei": 1, "stanton": 1, "statist": 1, "still": 0, "stina": 1, "str": 0, "string": [0, 1], "string_dist_fn": 0, "strip": 0, "strip_quotation_mark": 0, "submodul": 1, "system": 0, "system_prompt": 0, "tabmemcheck": 0, "tabular": 1, "take": 0, "target": 1, "task": 1, "temperatur": 0, "thi": 0, "titan": 1, "tobin": 1, "token": 0, "toronto": 0, "train": [0, 1], "transform": [0, 1], "true": [0, 1], "turbo": [0, 1], "two": 0, "type": 0, "uci": [0, 1], "uniqu": 0, "unique_featur": 0, "up": 0, "upon": 0, "us": 0, "util": 1, "v": 0, "valu": 0, "variabl": 0, "verbatim": 1, "verbos": 0, "versicolor": 1, "versicolor4": 1, "versicolor5": 1, "versicolor6": 1, "versicolor7": 1, "version": 0, "victorabbott": 1, "violabrown": 1, "virginica4": 1, "virginica5": 1, "virginica6": 1, "visual": [0, 1], "visul": 0, "warnerilmakanga": 1, "we": 0, "where": 0, "whether": 0, "which": 0, "whistl": 0, "william": 1, "window": 0, "wine": [0, 1], "work": 0, "www": 0, "x": 0, "yaml": [0, 1], "yaml_config": 0}, "titles": ["API Reference", "Tabmemcheck"], "titleterms": {"all": 1, "analysi": 0, "api": 0, "ar": 1, "ask": 1, "base": 0, "being": 1, "can": 1, "complet": 1, "content": 1, "csv": [0, 1], "dataset": 0, "featur": 1, "file": [0, 1], "first": 1, "header": 1, "initi": 1, "llm": [0, 1], "load": 0, "model": 1, "next": 1, "origin": 0, "perturb": 0, "prompt": 1, "random": 1, "raw": 1, "refer": 0, "respons": 1, "row": 1, "see": 1, "send": 1, "specif": 1, "statist": 0, "tabmemcheck": 1, "tabular": 0, "task": 0, "test": [0, 1], "token": 1, "util": 0, "valu": 1, "you": 1}})
\ No newline at end of file