Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove unused and duplicate code #420

Merged
merged 2 commits into from
Apr 22, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
6 changes: 0 additions & 6 deletions include/fdeep/convolution.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -59,12 +59,6 @@ namespace internal {
return { shape, filters.size(), biases, use_bias, filter_mats };
}

inline convolution_filter_matrices generate_im2col_single_filter_matrix(
const filter& filter)
{
return generate_im2col_filter_matrix(filter_vec(1, filter));
}

inline tensor init_conv_output_tensor(
std::size_t out_height,
std::size_t out_width,
Expand Down
98 changes: 28 additions & 70 deletions include/fdeep/import_model.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -130,35 +130,36 @@ namespace internal {
return fplus::just(result);
}

inline tensor_shape_variable create_tensor_shape_variable(const nlohmann::json& data)
inline tensor_shape_variable create_tensor_shape_variable_offset(
const nlohmann::json& data, std::size_t offset)
{
assertion(data.is_array(), "tensor_shape_variable needs to be an array");
assertion(data.size() > 0, "need at least one dimension");
if (data.size() == 1)
if (data.size() == 1 + offset)
return tensor_shape_variable(
create_maybe_size_t(data[0]));
if (data.size() == 2)
create_maybe_size_t(data[0 + offset]));
if (data.size() == 2 + offset)
return tensor_shape_variable(
create_maybe_size_t(data[0]),
create_maybe_size_t(data[1]));
if (data.size() == 3)
create_maybe_size_t(data[0 + offset]),
create_maybe_size_t(data[1 + offset]));
if (data.size() == 3 + offset)
return tensor_shape_variable(
create_maybe_size_t(data[0]),
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]));
if (data.size() == 4)
create_maybe_size_t(data[0 + offset]),
create_maybe_size_t(data[1 + offset]),
create_maybe_size_t(data[2 + offset]));
if (data.size() == 4 + offset)
return tensor_shape_variable(
create_maybe_size_t(data[0]),
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]),
create_maybe_size_t(data[3]));
if (data.size() == 5)
create_maybe_size_t(data[0 + offset]),
create_maybe_size_t(data[1 + offset]),
create_maybe_size_t(data[2 + offset]),
create_maybe_size_t(data[3 + offset]));
if (data.size() == 5 + offset)
return tensor_shape_variable(
create_maybe_size_t(data[0]),
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]),
create_maybe_size_t(data[3]),
create_maybe_size_t(data[4]));
create_maybe_size_t(data[0 + offset]),
create_maybe_size_t(data[1 + offset]),
create_maybe_size_t(data[2 + offset]),
create_maybe_size_t(data[3 + offset]),
create_maybe_size_t(data[4 + offset]));

raise_error("tensor_shape_variable needs 1, 2, 3, 4 or 5 dimensions");
return tensor_shape_variable(
Expand All @@ -169,43 +170,14 @@ namespace internal {
fplus::nothing<std::size_t>()); // Is never called
}

inline tensor_shape_variable create_tensor_shape_variable_leading_null(const nlohmann::json& data)
inline tensor_shape_variable create_tensor_shape_variable(const nlohmann::json& data)
{
assertion(data.is_array(), "tensor_shape_variable needs to be an array");
assertion(data.size() > 0, "need at least one dimension");
if (data.size() == 2)
return tensor_shape_variable(
create_maybe_size_t(data[1]));
if (data.size() == 3)
return tensor_shape_variable(
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]));
if (data.size() == 4)
return tensor_shape_variable(
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]),
create_maybe_size_t(data[3]));
if (data.size() == 5)
return tensor_shape_variable(
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]),
create_maybe_size_t(data[3]),
create_maybe_size_t(data[4]));
if (data.size() == 6)
return tensor_shape_variable(
create_maybe_size_t(data[1]),
create_maybe_size_t(data[2]),
create_maybe_size_t(data[3]),
create_maybe_size_t(data[4]),
create_maybe_size_t(data[5]));
return create_tensor_shape_variable_offset(data, 0);
}

raise_error("tensor_shape_variable needs 1, 2, 3, 4 or 5 dimensions");
return tensor_shape_variable(
fplus::nothing<std::size_t>(),
fplus::nothing<std::size_t>(),
fplus::nothing<std::size_t>(),
fplus::nothing<std::size_t>(),
fplus::nothing<std::size_t>()); // Is never called
inline tensor_shape_variable create_tensor_shape_variable_leading_null(const nlohmann::json& data)
{
return create_tensor_shape_variable_offset(data, 1);
}

inline tensor_shape create_tensor_shape(const nlohmann::json& data)
Expand Down Expand Up @@ -390,11 +362,6 @@ namespace internal {
return std::make_shared<model_layer>(name, layers, inputs, outputs);
}

inline void fill_with_zeros(float_vec& xs)
{
std::fill(std::begin(xs), std::end(xs), static_cast<float_type>(0));
}

inline padding create_padding(const std::string& padding_str)
{
return fplus::throw_on_nothing(error("no padding"),
Expand Down Expand Up @@ -1067,15 +1034,6 @@ namespace internal {
return data;
}

inline std::string json_object_get_activation_with_default(const nlohmann::json& config,
const std::string& default_activation)
{
if (json_obj_has_member(config, "activation")) {
return get_activation_type(config["activation"]);
}
return default_activation;
}

inline activation_layer_ptr create_activation_layer_type_name(
const get_param_f& get_param,
const nlohmann::json& data,
Expand Down
10 changes: 0 additions & 10 deletions include/fdeep/recurrent_ops.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -17,11 +17,6 @@ namespace internal {
template <int Count>
using RowVector = Eigen::Matrix<float_type, 1, Count>;

inline float_type linear_activation(float_type x)
{
return x;
}

inline float_type tanh_activation(float_type x)
{
return std::tanh(x);
Expand Down Expand Up @@ -49,11 +44,6 @@ namespace internal {
return (x / static_cast<float_type>(6)) + static_cast<float_type>(0.5);
}

inline float_type relu_activation(float_type x)
{
return std::max<float_type>(x, 0);
}

inline float_type selu_activation(float_type x)
{
const float_type alpha = static_cast<float_type>(1.6732632423543772848170429916717);
Expand Down
29 changes: 0 additions & 29 deletions include/fdeep/tensor.hpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,16 +187,6 @@ namespace internal {
return t.get(tensor_pos(static_cast<std::size_t>(0)));
}

inline tensor from_singleton_value(float_type value)
{
return tensor(tensor_shape(static_cast<std::size_t>(1)), value);
}

inline tensor tensor_with_changed_rank(const tensor& t, std::size_t rank)
{
return tensor(tensor_shape_with_changed_rank(t.shape(), rank), t.as_vector());
}

template <typename F>
tensor transform_tensor(F f, const tensor& m)
{
Expand Down Expand Up @@ -900,17 +890,6 @@ namespace internal {
return tensor(ts.front().shape(), std::move(result_values));
}

// When using this function, make sure the data pointer is not invalidated
// before the last access to the returned matrix happens.
inline MappedRowMajorMatrixXf eigen_row_major_mat_from_shared_values(std::size_t height,
std::size_t width, float_type* data)
{
return MappedRowMajorMatrixXf(
data,
static_cast<EigenIndex>(height),
static_cast<EigenIndex>(width));
}

inline RowMajorMatrixXf eigen_row_major_mat_from_values(std::size_t height,
std::size_t width, const float_vec& values)
{
Expand All @@ -920,14 +899,6 @@ namespace internal {
return m;
}

inline shared_float_vec eigen_row_major_mat_to_values(const RowMajorMatrixXf& m)
{
shared_float_vec result = fplus::make_shared_ref<float_vec>();
result->resize(static_cast<std::size_t>(m.rows() * m.cols()));
std::memcpy(result->data(), m.data(), result->size() * sizeof(float_type));
return result;
}

inline tensor resize2d_nearest(const tensor& in_vol, const shape2& target_size)
{
tensor out_vol(tensor_shape(target_size.height_, target_size.width_, in_vol.shape().depth_), 0);
Expand Down