Skip to content

Commit

Permalink
Merge pull request #82 from oscarbranson/dev
Browse files Browse the repository at this point in the history
0.3.31 - Minor fixes for python updates
  • Loading branch information
oscarbranson authored Sep 11, 2024
2 parents 57cd0c7 + 4cb2a5d commit 31b8e12
Show file tree
Hide file tree
Showing 6 changed files with 77 additions and 28 deletions.
13 changes: 8 additions & 5 deletions .github/workflows/tests.yml
Original file line number Diff line number Diff line change
Expand Up @@ -10,20 +10,23 @@ jobs:
runs-on: ${{ matrix.os }}
strategy:
matrix:
python-version: ["3.8", "3.9", "3.10", "3.11"]
os: [windows-latest, ubuntu-latest, macos-latest]
python-version: ["3.9", "3.10", "3.11"]
os: [windows-latest, ubuntu-latest]

steps:
- uses: actions/checkout@v3
- uses: actions/checkout@v4

- name: Setup python
uses: actions/setup-python@v4
uses: actions/setup-python@v5
with:
python-version: ${{ matrix.python-version }}

- name: ${{ matrix.os }} python ${{ matrix.python-version }}
- name: Install Python dependencies
shell: bash -l {0}
run: |
python3 -m pip install -r requirements.txt
python3 -m pip install -e . --no-deps --force-reinstall
- name: Run tests
run: |
python3 -m unittest
2 changes: 1 addition & 1 deletion CHANGELOG.md
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
# Changelog
All significant changes to the software will be documented here.

## [0.3.30-dev] - LIVE
## [0.3.31-dev] - LIVE

### TODO
- implement different analyte lists for different stages (raw, ratios, mass_fractions), OR improve analyte_checker functionality. This has to propagate throught to filter assignment.
Expand Down
2 changes: 1 addition & 1 deletion latools/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -17,7 +17,7 @@
from .helpers import chemistry
from . import preprocessing

VERSION = '0.3.30'
VERSION = '0.3.31'

def cite(output='text'):
"""
Expand Down
20 changes: 8 additions & 12 deletions latools/latools.py
Original file line number Diff line number Diff line change
Expand Up @@ -67,7 +67,11 @@
# TODO: Add 'smooth all' function.

class analyse(object):
"""
def __init__(self, data_path, errorhunt=False, config='DEFAULT',
dataformat=None, extension='.csv', srm_identifier='STD',
cmap=None, time_format=None, internal_standard='Ca43',
file_structure='multi', names='file_names', srm_file=None, pbar=None, split_kwargs={}):
"""
For processing and analysing whole LA - ICPMS datasets.
Parameters
Expand Down Expand Up @@ -156,14 +160,6 @@ class analyse(object):
cmaps : dict
An analyte - specific colour map, used for plotting.
"""

def __init__(self, data_path, errorhunt=False, config='DEFAULT',
dataformat=None, extension='.csv', srm_identifier='STD',
cmap=None, time_format=None, internal_standard='Ca43',
file_structure='multi', names='file_names', srm_file=None, pbar=None, split_kwargs={}):
"""
For processing and analysing whole LA - ICPMS datasets.
"""
# initialise log
params = {k: v for k, v in locals().items() if k not in ['self', 'pbar']}
self.log = ['__init__ :: args=() kwargs={}'.format(str(params))]
Expand Down Expand Up @@ -279,7 +275,9 @@ def __init__(self, data_path, errorhunt=False, config='DEFAULT',
data.sort(key=lambda d: d.uTime[0])

# process sample names
if (names == 'file_names') | (names == 'metadata_names'):
if file_structure == 'long':
samples = np.array([s.sample for s in data], dtype=object)
elif (names == 'file_names') | (names == 'metadata_names'):
samples = np.array([s.sample.replace(' ', '') for s in data], dtype=object) # get all sample names
# if duplicates, rename them
usamples, ucounts = np.unique(samples, return_counts=True)
Expand All @@ -292,8 +290,6 @@ def __init__(self, data_path, errorhunt=False, config='DEFAULT',
samples[ind] = new # rename in samples
for s, ns in zip([data[i] for i in np.where(ind)[0]], new):
s.sample = ns # rename in D objects
elif file_structure == 'long':
samples = np.array([s.sample for s in data], dtype=object)
else:
samples = np.arange(len(data)) # assign a range of numbers
for i, s in enumerate(samples):
Expand Down
23 changes: 14 additions & 9 deletions latools/preprocessing/split.py
Original file line number Diff line number Diff line change
Expand Up @@ -185,7 +185,7 @@ def long_file(data_file, dataformat, sample_list, analyte='total_counts', savedi
srm_replace.append(s)
sample_list = srm_replace

dataformat = read_dataformat(dataformat, silent=False)
dataformat = read_dataformat(dataformat, silent=True)

_, _, dat, meta = read_data(data_file, dataformat=dataformat, name_mode='file')

Expand All @@ -211,6 +211,10 @@ def long_file(data_file, dataformat, sample_list, analyte='total_counts', savedi
n = int(max(ns))

nsamples = len(sample_list)

print('\n*** Starting Long File Split ***')
print(f'There are {nsamples} in the sample list.')
print(f'With initial settings, we have found {n} ablations in the data.')

# deal with wildcards
if nsamples <= n and mode != 'strict':
Expand Down Expand Up @@ -249,20 +253,21 @@ def long_file(data_file, dataformat, sample_list, analyte='total_counts', savedi
ns[sig] = np.cumsum((sig ^ np.roll(sig, 1)) & sig)[sig]
n = int(max(ns))
print(' (Removed data fragments < {} points long)'.format(min_points))
elif isinstance(min_points, (int, float)):
# minimum point filter
sig = sig & np.roll(sig, min_points)
ns = np.zeros(sig.size)
ns[sig] = np.cumsum((sig ^ np.roll(sig, 1)) & sig)[sig]
n = int(max(ns))
# elif isinstance(min_points, (int, float)):
# print(' -> There are more ablations than samples...')
# # minimum point filter
# sig = sig & np.roll(sig, min_points)
# ns = np.zeros(sig.size)
# ns[sig] = np.cumsum((sig ^ np.roll(sig, 1)) & sig)[sig]
# n = int(max(ns))
else:
print(' -> There are more samples than ablations...')
print(' Check your sample list is correct. If so, consider')
print(' adding autorange_params to change the signal detection.')
return

minn = min([len(sample_list), n])

# calculate split boundaries
bounds = []
lower = 0
Expand Down Expand Up @@ -307,7 +312,7 @@ def long_file(data_file, dataformat, sample_list, analyte='total_counts', savedi

# save output
if passthrough:
print(f"Success! {n} ablations identified.")
print(f"*** Success! {n} ablations identified. ***\n")
for sample, sdat in sections.items():

sanalytes = list(sdat['rawdata'].keys())
Expand Down
45 changes: 45 additions & 0 deletions old_setup.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,45 @@
from setuptools import setup, find_packages

# bcause TravisCI was being a jerK
try:
from latools import __version__
except:
__version__ = "version_missing"

with open("README.md", "r") as fh:
long_description = fh.read()

setup(name='latools',
version=__version__,
description='Tools for LA-ICPMS data analysis.',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/oscarbranson/latools',
author='Oscar Branson',
author_email='[email protected]',
license='MIT',
packages=find_packages(),
classifiers=['Development Status :: 4 - Beta',
'Intended Audience :: Science/Research',
'Topic :: Scientific/Engineering',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 3',
],
python_requires='>3.6',
install_requires=['numpy',
'pandas',
'matplotlib',
'uncertainties',
'scikit-learn',
'scipy',
'Ipython',
'configparser',
'tqdm'
],
package_data={
'latools': ['latools.cfg',
'resources/*',
'resources/data_formats/*',
'resources/test_data/*'],
},
zip_safe=False)

0 comments on commit 31b8e12

Please sign in to comment.