Skip to content

Commit

Permalink
Import fixes (#160)
Browse files Browse the repository at this point in the history
* Replaced .append with pd.concat for pandas upgrade
* Added resampy to poetry to fix librosa dependencies
* Fixed return type mismatches for Microfaune and BirdNET
---------
Co-authored-by: TQ Zhang <[email protected]>
  • Loading branch information
TQZhang04 authored Nov 17, 2023
1 parent 7d0aa42 commit 3ac09a5
Show file tree
Hide file tree
Showing 8 changed files with 43 additions and 21 deletions.
4 changes: 2 additions & 2 deletions PyHa/IsoAutio.py
Original file line number Diff line number Diff line change
Expand Up @@ -990,7 +990,7 @@ def generate_automated_labels_microfaune(
if annotations.empty:
annotations = new_entry
else:
annotations = annotations.append(new_entry)
annotations = pd.concat([annotations, new_entry])
except KeyboardInterrupt:
exit("Keyboard interrupt")
except BaseException as e:
Expand Down Expand Up @@ -1127,7 +1127,7 @@ def generate_automated_labels_tweetynet(
if annotations.empty:
annotations = new_entry
else:
annotations = annotations.append(new_entry)
annotations = pd.concat([annotations, new_entry])
except KeyboardInterrupt:
exit("Keyboard interrupt")
except BaseException as e:
Expand Down
9 changes: 5 additions & 4 deletions PyHa/birdnet_lite/analyze.py
Original file line number Diff line number Diff line change
Expand Up @@ -83,13 +83,14 @@ def splitSignal(sig, rate, overlap, seconds=3.0, minlen=1.5):
def readAudioData(path, overlap, sample_rate=48000):

print('READING AUDIO DATA...', end=' ', flush=True)

# Open file with librosa (uses ffmpeg or libav)
print("Path: ", path)
# Open file with librosa (uses ffmanaeg or libav)
try:
sig, rate = librosa.load(path, sr=sample_rate, mono=True, res_type='kaiser_fast')
clip_length = librosa.get_duration(y=sig, sr=rate)
except:
return 0
except Exception as e:
print(e)
return (0, 0)
# Split audio into 3-second chunks
chunks = splitSignal(sig, rate, overlap)

Expand Down
2 changes: 1 addition & 1 deletion PyHa/microfaune_package/microfaune/audio.py
Original file line number Diff line number Diff line change
Expand Up @@ -124,7 +124,7 @@ def create_spec(data, fs, n_mels=32, n_fft=2048, hop_len=1024):
"""
# Calculate spectrogram
S = librosa.feature.melspectrogram(
data, sr=fs, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels)
y=data, sr=fs, n_fft=n_fft, hop_length=hop_len, n_mels=n_mels)
S = S.astype(np.float32)

# Convert power to dB
Expand Down
14 changes: 7 additions & 7 deletions PyHa/statistics.py
Original file line number Diff line number Diff line change
Expand Up @@ -263,25 +263,25 @@ def automated_labeling_statistics(
if statistics_df.empty:
statistics_df = clip_stats_df
else:
statistics_df = statistics_df.append(clip_stats_df)
statistics_df = pd.concat([statistics_df,clip_stats_df])
elif stats_type == "IoU":
IoU_Matrix = clip_IoU(clip_automated_df, clip_manual_df)
clip_stats_df = matrix_IoU_Scores(
IoU_Matrix, clip_manual_df, threshold)
if statistics_df.empty:
statistics_df = clip_stats_df
else:
statistics_df = statistics_df.append(clip_stats_df)
statistics_df = pd.concat([statistics_df, clip_stats_df])
except BaseException as e:
num_errors += 1
#print("Something went wrong with: " + clip)
#print(e)
print(e)
continue
if num_processed % 50 == 0:
print("Processed", num_processed, "clips in", int((time.time() - start_time) * 10) / 10.0, 'seconds')
start_time = time.time()
if num_errors > 0:
checkVerbose("Something went wrong with" + num_errors + "clips out of" + str(len(clips)) + "clips", verbose)
checkVerbose(f"Something went wrong with {num_errors} clips out of {len(clips)} clips", verbose)
statistics_df.reset_index(inplace=True, drop=True)
return statistics_df

Expand Down Expand Up @@ -751,7 +751,7 @@ def dataset_Catch(automated_df, manual_df):
if manual_df_with_Catch.empty:
manual_df_with_Catch = clip_manual_df
else:
manual_df_with_Catch = manual_df_with_Catch.append(clip_manual_df)
manual_df_with_Catch = pd.concat([manual_df_with_Catch,clip_manual_df])
# Resetting the indices
manual_df_with_Catch.reset_index(inplace=True, drop=True)
return manual_df_with_Catch
Expand Down Expand Up @@ -825,7 +825,7 @@ def clip_statistics(
clip_statistics = automated_labeling_statistics(temp_automated_class_df, temp_manual_class_df, stats_type = stats_type, threshold = threshold)
else:
temp_df = automated_labeling_statistics(temp_automated_class_df, temp_manual_class_df, stats_type = stats_type, threshold = threshold)
clip_statistics = clip_statistics.append(temp_df)
clip_statistics = pd.concat([clip_statistics,temp_df])
clip_statistics.reset_index(inplace=True,drop=True)
return clip_statistics

Expand Down Expand Up @@ -860,6 +860,6 @@ def class_statistics(clip_statistics):
class_statistics = global_statistics(class_df, manual_id = class_)
else:
temp_df = global_statistics(class_df, manual_id = class_)
class_statistics = class_statistics.append(temp_df)
class_statistics = pd.concat([class_statistics,temp_df])
class_statistics.reset_index(inplace=True,drop=True)
return class_statistics
8 changes: 4 additions & 4 deletions PyHa/tweetynet_package/tweetynet/Load_data_functions.py
Original file line number Diff line number Diff line change
Expand Up @@ -216,12 +216,12 @@ def predictions_to_kaleidoscope(predictions, SIGNAL, audio_dir, audio_file, manu
raise BaseException("No birds were detected!!")

if offset.iloc[0] != 0:
kaleidoscope_df.append(pd.DataFrame({"OFFSET": [0], "DURATION": [offset.iloc[0]]}))
kaleidoscope_df.append(intermediary_df[intermediary_df["DURATION"] >= 2*time_bin_seconds])
pd.concat([kaleidoscope_df,pd.DataFrame({"OFFSET": [0], "DURATION": [offset.iloc[0]]})])
pd.concat([kaleidoscope_df,intermediary_df[intermediary_df["DURATION"] >= 2*time_bin_seconds]])

if offset.iloc[-1] < predictions.iloc[-1]["time_bins"]:
kaleidoscope_df.append(pd.DataFrame({"OFFSET": [offset.iloc[-1]], "DURATION": [predictions.iloc[-1]["time_bins"] +
predictions.iloc[1]["time_bins"]]}))
pd.concat([kaleidoscope_df,pd.DataFrame({"OFFSET": [offset.iloc[-1]], "DURATION": [predictions.iloc[-1]["time_bins"] +
predictions.iloc[1]["time_bins"]]})])

kaleidoscope_df = pd.concat(kaleidoscope_df)
kaleidoscope_df = kaleidoscope_df.reset_index(drop=True)
Expand Down
2 changes: 1 addition & 1 deletion PyHa/tweetynet_package/tweetynet/TweetyNetModel.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,7 +111,7 @@ def predict(self, test_dataset, model_weights=None, norm=False):
bins = st_time + (int(uids[0].split("_")[0])*window_size)
d = {"uid": uids[0], "pred": pred, "label": labels, "time_bins": bins}
new_preds = pd.DataFrame(d)
predictions = predictions.append(new_preds)
predictions = pd.concat([predictions, new_preds])

if norm:
local_score = self.normalize(local_score, 0, 1)
Expand Down
22 changes: 21 additions & 1 deletion poetry.lock

Some generated files are not rendered by default. Learn more about how customized files appear on GitHub.

3 changes: 2 additions & 1 deletion pyproject.toml
Original file line number Diff line number Diff line change
Expand Up @@ -59,4 +59,5 @@ tensorflow-cpu-aws = { version = "^2.13.0", platform = "linux", markers = "platf
tensorflow-io-gcs-filesystem = [
{ version = ">= 0.23.1", markers = "platform_machine!='arm64' or platform_system!='Darwin'" },
{ version = "< 0.32.0", markers = "platform_system == 'Windows'" }
]
]
resampy = "^0.4.2"

0 comments on commit 3ac09a5

Please sign in to comment.