Skip to content

Commit

Permalink
convert floats to int correctly (#211)
Browse files Browse the repository at this point in the history
convert floats to int correctly
  • Loading branch information
JesseLivezey authored Aug 8, 2022
1 parent 6fa30ce commit be6afe7
Show file tree
Hide file tree
Showing 8 changed files with 32 additions and 32 deletions.
2 changes: 1 addition & 1 deletion .flake8
Original file line number Diff line number Diff line change
@@ -1,3 +1,3 @@
[flake8]
ignore = E401,W503,W504
max-line-length = 80
max-line-length = 100
2 changes: 1 addition & 1 deletion docs/source/conf.py
Original file line number Diff line number Diff line change
Expand Up @@ -89,7 +89,7 @@
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
language = 'en'

# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
Expand Down
2 changes: 1 addition & 1 deletion examples/plot_swimmer.py
Original file line number Diff line number Diff line change
Expand Up @@ -73,7 +73,7 @@
# ``min_pts`` should be half of the number of bootstraps.

nboot = 20
min_pts = nboot / 2
min_pts = max(nboot // 2, 1)
ranks = [16]

shape = corrupted.shape
Expand Down
4 changes: 2 additions & 2 deletions src/pyuoi/decomposition/NMF.py
Original file line number Diff line number Diff line change
Expand Up @@ -119,7 +119,7 @@ def __initialize(self, **kwargs):
raise ValueError('dbscan must be an instance, not a class.')
self.cluster = cluster
else:
self.cluster = DBSCAN(min_samples=self.n_boots / 2)
self.cluster = DBSCAN(min_samples=max(self.n_boots // 2, 1))

# initialize non-negative regression solver
if nnreg is None:
Expand Down Expand Up @@ -423,7 +423,7 @@ def __init__(

# create DBSCAN solver
if db_min_samples is None:
db_min_samples = n_boots / 2
db_min_samples = max(n_boots // 2, 1)
dbscan = DBSCAN(eps=db_eps,
min_samples=db_min_samples,
metric=db_metric,
Expand Down
2 changes: 1 addition & 1 deletion tests/test_mpi/test_mpi_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -128,4 +128,4 @@ def test_Gatherv_random_rows():
data = Gatherv_rows(data, comm, root)

if rank == root:
assert(data.shape[0] == np.sum(sizes))
assert data.shape[0] == np.sum(sizes)
4 changes: 2 additions & 2 deletions tests/test_nmf.py
Original file line number Diff line number Diff line change
Expand Up @@ -39,7 +39,7 @@ def test_UoI_NMF_Base_initialization():
assert_array_equal(uoi.ranks, np.arange(2, ranks + 1))
assert uoi.nmf.solver == 'mu'
assert uoi.nmf.beta_loss == 'kullback-leibler'
assert uoi.cluster.min_samples == n_boots / 2
assert uoi.cluster.min_samples == max(n_boots // 2, 1)


@pytest.mark.fast
Expand All @@ -51,7 +51,7 @@ def test_UoI_NMF_initialization():
assert_array_equal(uoi.ranks, np.arange(2, ranks + 1))
assert uoi.nmf.solver == 'mu'
assert uoi.nmf.beta_loss == 'kullback-leibler'
assert uoi.cluster.min_samples == n_boots / 2
assert uoi.cluster.min_samples == max(n_boots // 2, 1)
assert uoi.cons_meth == np.mean


Expand Down
32 changes: 16 additions & 16 deletions tests/test_scores.py
Original file line number Diff line number Diff line change
Expand Up @@ -101,7 +101,7 @@ def test_LinearRegressor_scoring_defaults():
support = np.ones(X.shape[1]).astype(bool)
# r2 - must use test data
uoi = UoI_Lasso(estimation_score='r2')
assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1

score = uoi._score_predictions('r2', fitter, X, y, support,
(train_idxs, test_idxs))
Expand All @@ -111,22 +111,22 @@ def test_LinearRegressor_scoring_defaults():
fitter.predict(X_train[:, support]))
# BIC - must use train data
uoi = UoI_Lasso(estimation_score='BIC')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0
score = -1 * uoi._score_predictions('BIC', fitter, X, y, support,
(train_idxs, test_idxs))
assert_equal(BIC(ll, *X_train.T.shape), score)

# AIC - must use train data
uoi = UoI_Lasso(estimation_score='AIC')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

score = -1 * uoi._score_predictions('AIC', fitter, X, y, support,
(train_idxs, test_idxs))
assert_equal(AIC(ll, X_train.shape[1]), score)

# AICc - must use train data
uoi = UoI_Lasso(estimation_score='AICc')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

score = -1 * uoi._score_predictions('AICc', fitter, X, y, support,
(train_idxs, test_idxs))
Expand Down Expand Up @@ -158,15 +158,15 @@ def test_GeneralizedLinearRegressor_scoring_defaults():

# acc - must use test data
uoi = UoI_L1Logistic(estimation_score='acc')
assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1
uoi.classes_ = np.unique(y)
score = uoi._score_predictions('acc', fitter, X, y, support,
(train_idxs, test_idxs))
assert_equal(accuracy_score(y_test, fitter.predict(X_test)), score)

# log - must use test data. Note the sign difference
uoi = UoI_L1Logistic(estimation_score='log')
assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1
uoi.classes_ = np.unique(y)
score = uoi._score_predictions('log', fitter, X, y, support,
(train_idxs, test_idxs))
Expand All @@ -180,23 +180,23 @@ def test_GeneralizedLinearRegressor_scoring_defaults():
total_ll = ll * X_train.shape[0]
# BIC - must use train data
uoi = UoI_L1Logistic(estimation_score='BIC')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0
uoi.classes_ = np.unique(y)
score = -1 * uoi._score_predictions('BIC', fitter, X, y, support,
(train_idxs, test_idxs))
assert_equal(BIC(total_ll, *X_train.T.shape), score)

# AIC
uoi = UoI_L1Logistic(estimation_score='AIC')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0
uoi.classes_ = np.unique(y)
score = -1 * uoi._score_predictions('AIC', fitter, X, y, support,
(train_idxs, test_idxs))
assert_equal(AIC(total_ll, X_train.shape[1]), score)

# AICc
uoi = UoI_L1Logistic(estimation_score='AICc')
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0
uoi.classes_ = np.unique(y)
score = -1 * uoi._score_predictions('AICc', fitter, X, y, support,
(train_idxs, test_idxs))
Expand All @@ -210,7 +210,7 @@ def test_estimation_target():
uoi = UoI_Lasso(estimation_score='r2', estimation_target='train')

# train gets converted to the index 0
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

# Assess BIC on test data
uoi = UoI_Lasso(estimation_score='BIC', estimation_target='test')
Expand All @@ -219,25 +219,25 @@ def test_estimation_target():
uoi = UoI_ElasticNet(estimation_score='r2', estimation_target='train')

# train gets converted to the index 0
assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

# Assess BIC on test data
uoi = UoI_ElasticNet(estimation_score='BIC', estimation_target='test')

assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1

uoi = UoI_L1Logistic(estimation_score='acc', estimation_target='train')

assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

uoi = UoI_L1Logistic(estimation_score='BIC', estimation_target='test')

assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1

uoi = UoI_Poisson(estimation_score='acc', estimation_target='train')

assert(uoi._estimation_target == 0)
assert uoi._estimation_target == 0

uoi = UoI_Poisson(estimation_score='BIC', estimation_target='test')

assert(uoi._estimation_target == 1)
assert uoi._estimation_target == 1
16 changes: 8 additions & 8 deletions tests/test_uoi_lasso.py
Original file line number Diff line number Diff line change
Expand Up @@ -262,10 +262,10 @@ def test_choice_of_solver():
'''Tests whether one can correctly switch between solvers in UoI Lasso'''

uoi1 = UoI_Lasso(solver='cd')
assert(isinstance(uoi1._selection_lm, Lasso))
assert isinstance(uoi1._selection_lm, Lasso)

uoi2 = UoI_Lasso(solver='pyc')
assert(isinstance(uoi2._selection_lm, PycLasso))
assert isinstance(uoi2._selection_lm, PycLasso)


@pytest.mark.skipif(pycasso is not None, reason='pycasso is installed')
Expand All @@ -275,7 +275,7 @@ def test_pycasso_error():

with pytest.raises(ImportError):
uoi2 = UoI_Lasso(solver='pyc')
assert(isinstance(uoi2._selection_lm, PycLasso))
assert isinstance(uoi2._selection_lm, PycLasso)


@pytest.mark.skipif(pycasso is None, reason='pycasso not installed')
Expand All @@ -285,11 +285,11 @@ def test_pyclasso():
pyclasso = PycLasso()
# Test that we can set params correctly
pyclasso.set_params(fit_intercept=True)
assert(pyclasso.fit_intercept)
assert pyclasso.fit_intercept
pyclasso.set_params(max_iter=500)
assert(pyclasso.max_iter == 500)
assert pyclasso.max_iter == 500
pyclasso.set_params(alphas=np.arange(100))
assert(np.array_equal(pyclasso.alphas, np.arange(100)))
assert np.array_equal(pyclasso.alphas, np.arange(100))

# Test that spurious parameters are rejected
try:
Expand All @@ -313,10 +313,10 @@ def test_pyclasso():
alphas = _alpha_grid(X, y)
pyclasso.set_params(alphas=alphas)
pyclasso.fit(X, y)
assert(np.array_equal(pyclasso.coef_.shape, (100, 3)))
assert np.array_equal(pyclasso.coef_.shape, (100, 3))
y_pred = pyclasso.predict(X)
scores = np.array([r2_score(y, y_pred[:, j]) for j in range(100)])
assert(np.allclose(1, max(scores)))
assert np.allclose(1, max(scores))


@pytest.mark.skipif(pycasso is None, reason='pycasso not installed')
Expand Down

0 comments on commit be6afe7

Please sign in to comment.