diff --git a/PyHa/IsoAutio.py b/PyHa/IsoAutio.py index 81cb493..a62edb3 100644 --- a/PyHa/IsoAutio.py +++ b/PyHa/IsoAutio.py @@ -990,7 +990,7 @@ def generate_automated_labels_microfaune( if annotations.empty: annotations = new_entry else: - annotations = annotations.append(new_entry) + annotations = pd.concat([annotations, new_entry]) except KeyboardInterrupt: exit("Keyboard interrupt") except BaseException as e: @@ -1127,7 +1127,7 @@ def generate_automated_labels_tweetynet( if annotations.empty: annotations = new_entry else: - annotations = annotations.append(new_entry) + annotations = pd.concat([annotations, new_entry]) except KeyboardInterrupt: exit("Keyboard interrupt") except BaseException as e: diff --git a/PyHa/birdnet_lite/analyze.py b/PyHa/birdnet_lite/analyze.py index 863f0e2..5366c90 100644 --- a/PyHa/birdnet_lite/analyze.py +++ b/PyHa/birdnet_lite/analyze.py @@ -83,13 +83,14 @@ def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5): def readAudioData(path, overlap, sample_rate=48000): print('READING AUDIO DATA...', end=' ', flush=True) - - # Open file with librosa (uses ffmpeg or libav) + print("Path: ", path) + # Open file with librosa (uses ffmanaeg or libav) try: sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast') clip_length = librosa.get_duration(y=sig, sr=rate) - except: - return 0 + except Exception as e: + print(e) + return (0, 0) # Split audio into 3-second chunks chunks = splitSignal(sig, rate, overlap) diff --git a/PyHa/microfaune_package/microfaune/audio.py b/PyHa/microfaune_package/microfaune/audio.py index c1af3c0..dd6f075 100644 --- a/PyHa/microfaune_package/microfaune/audio.py +++ b/PyHa/microfaune_package/microfaune/audio.py @@ -124,7 +124,7 @@ def create_spec(data, fs, n_mels=32, n_fft=2048, hop_len=1024): """ # Calculate spectrogram S = librosa.feature.melspectrogram( - data, sr=fs, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels) + y=data, sr=fs, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels) S = S.astype(np.float32) # Convert power to dB diff --git a/PyHa/statistics.py b/PyHa/statistics.py index bfd3066..72ec75d 100644 --- a/PyHa/statistics.py +++ b/PyHa/statistics.py @@ -263,7 +263,7 @@ def automated_labeling_statistics( if statistics_df.empty: statistics_df = clip_stats_df else: - statistics_df = statistics_df.append(clip_stats_df) + statistics_df = pd.concat([statistics_df,clip_stats_df]) elif stats_type == "IoU": IoU_Matrix = clip_IoU(clip_automated_df, clip_manual_df) clip_stats_df = matrix_IoU_Scores( @@ -271,17 +271,17 @@ def automated_labeling_statistics( if statistics_df.empty: statistics_df = clip_stats_df else: - statistics_df = statistics_df.append(clip_stats_df) + statistics_df = pd.concat([statistics_df, clip_stats_df]) except BaseException as e: num_errors += 1 #print("Something went wrong with: " + clip) - #print(e) + print(e) continue if num_processed % 50 == 0: print("Processed", num_processed, "clips in", int((time.time() - start_time) * 10) / 10.0, 'seconds') start_time = time.time() if num_errors > 0: - checkVerbose("Something went wrong with" + num_errors + "clips out of" + str(len(clips)) + "clips", verbose) + checkVerbose(f"Something went wrong with {num_errors} clips out of {len(clips)} clips", verbose) statistics_df.reset_index(inplace=True, drop=True) return statistics_df @@ -751,7 +751,7 @@ def dataset_Catch(automated_df, manual_df): if manual_df_with_Catch.empty: manual_df_with_Catch = clip_manual_df else: - manual_df_with_Catch = manual_df_with_Catch.append(clip_manual_df) + manual_df_with_Catch = pd.concat([manual_df_with_Catch,clip_manual_df]) # Resetting the indices manual_df_with_Catch.reset_index(inplace=True, drop=True) return manual_df_with_Catch @@ -825,7 +825,7 @@ def clip_statistics( clip_statistics = automated_labeling_statistics(temp_automated_class_df, temp_manual_class_df, stats_type = stats_type, threshold = threshold) else: temp_df = automated_labeling_statistics(temp_automated_class_df, temp_manual_class_df, stats_type = stats_type, threshold = threshold) - clip_statistics = clip_statistics.append(temp_df) + clip_statistics = pd.concat([clip_statistics,temp_df]) clip_statistics.reset_index(inplace=True,drop=True) return clip_statistics @@ -860,6 +860,6 @@ def class_statistics(clip_statistics): class_statistics = global_statistics(class_df, manual_id = class_) else: temp_df = global_statistics(class_df, manual_id = class_) - class_statistics = class_statistics.append(temp_df) + class_statistics = pd.concat([class_statistics,temp_df]) class_statistics.reset_index(inplace=True,drop=True) return class_statistics diff --git a/PyHa/tweetynet_package/tweetynet/Load_data_functions.py b/PyHa/tweetynet_package/tweetynet/Load_data_functions.py index 260742a..4f8ee3a 100644 --- a/PyHa/tweetynet_package/tweetynet/Load_data_functions.py +++ b/PyHa/tweetynet_package/tweetynet/Load_data_functions.py @@ -216,12 +216,12 @@ def predictions_to_kaleidoscope(predictions, SIGNAL, audio_dir, audio_file, manu raise BaseException("No birds were detected!!") if offset.iloc[0] != 0: - kaleidoscope_df.append(pd.DataFrame({"OFFSET": [0], "DURATION": [offset.iloc[0]]})) - kaleidoscope_df.append(intermediary_df[intermediary_df["DURATION"] >= 2*time_bin_seconds]) + pd.concat([kaleidoscope_df,pd.DataFrame({"OFFSET": [0], "DURATION": [offset.iloc[0]]})]) + pd.concat([kaleidoscope_df,intermediary_df[intermediary_df["DURATION"] >= 2*time_bin_seconds]]) if offset.iloc[-1] < predictions.iloc[-1]["time_bins"]: - kaleidoscope_df.append(pd.DataFrame({"OFFSET": [offset.iloc[-1]], "DURATION": [predictions.iloc[-1]["time_bins"] + - predictions.iloc[1]["time_bins"]]})) + pd.concat([kaleidoscope_df,pd.DataFrame({"OFFSET": [offset.iloc[-1]], "DURATION": [predictions.iloc[-1]["time_bins"] + + predictions.iloc[1]["time_bins"]]})]) kaleidoscope_df = pd.concat(kaleidoscope_df) kaleidoscope_df = kaleidoscope_df.reset_index(drop=True) diff --git a/PyHa/tweetynet_package/tweetynet/TweetyNetModel.py b/PyHa/tweetynet_package/tweetynet/TweetyNetModel.py index 013cd61..5627194 100644 --- a/PyHa/tweetynet_package/tweetynet/TweetyNetModel.py +++ b/PyHa/tweetynet_package/tweetynet/TweetyNetModel.py @@ -111,7 +111,7 @@ def predict(self, test_dataset, model_weights=None, norm=False): bins = st_time + (int(uids[0].split("_")[0])*window_size) d = {"uid": uids[0], "pred": pred, "label": labels, "time_bins": bins} new_preds = pd.DataFrame(d) - predictions = predictions.append(new_preds) + predictions = pd.concat([predictions, new_preds]) if norm: local_score = self.normalize(local_score, 0, 1) diff --git a/poetry.lock b/poetry.lock index 5b8c56b..28d9055 100644 --- a/poetry.lock +++ b/poetry.lock @@ -3326,6 +3326,26 @@ requests = ">=2.0.0" [package.extras] rsa = ["oauthlib[signedtoken] (>=3.0.0)"] +[[package]] +name = "resampy" +version = "0.4.2" +description = "Efficient signal resampling" +optional = false +python-versions = "*" +files = [ + {file = "resampy-0.4.2-py3-none-any.whl", hash = "sha256:4340b6c4e685a865621dfcf016e2a3dd49d865446b6025e30fe88567f22e052e"}, + {file = "resampy-0.4.2.tar.gz", hash = "sha256:0a469e6ddb89956f4fd6c88728300e4bbd186fae569dd4fd17dae51a91cbaa15"}, +] + +[package.dependencies] +numba = ">=0.53" +numpy = ">=1.17" + +[package.extras] +design = ["optuna (>=2.10.0)"] +docs = ["numpydoc", "sphinx (!=1.3.1)"] +tests = ["pytest (<8)", "pytest-cov", "scipy (>=1.0)"] + [[package]] name = "rfc3339-validator" version = "0.1.4" @@ -4571,4 +4591,4 @@ testing = ["big-O", "jaraco.functools", "jaraco.itertools", "more-itertools", "p [metadata] lock-version = "2.0" python-versions = ">=3.8,<3.11" -content-hash = "7e0749012410af6844709f8ea8097f02662b75029845f23974cda591350a7572" +content-hash = "c3c477d794e35255310d81b8c3da8ba286d9c6782a724496af086809b6637402" diff --git a/pyproject.toml b/pyproject.toml index 5735088..787f532 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -59,4 +59,5 @@ tensorflow-cpu-aws = { version = "^2.13.0", platform = "linux", markers = "platf tensorflow-io-gcs-filesystem = [ { version = ">= 0.23.1", markers = "platform_machine!='arm64' or platform_system!='Darwin'" }, { version = "< 0.32.0", markers = "platform_system == 'Windows'" } -] \ No newline at end of file +] +resampy = "^0.4.2"