diff --git a/keras_cv/models/feature_extractor/clip/clip_processor.py b/keras_cv/models/feature_extractor/clip/clip_processor.py index 1771e05ffa..1a07a3d710 100644 --- a/keras_cv/models/feature_extractor/clip/clip_processor.py +++ b/keras_cv/models/feature_extractor/clip/clip_processor.py @@ -23,6 +23,7 @@ except ImportError: keras_nlp = None + @keras_cv_export("keras_cv.models.feature_extractor.CLIPProcessor") class CLIPProcessor: """ diff --git a/keras_cv/models/feature_extractor/clip/clip_tokenizer.py b/keras_cv/models/feature_extractor/clip/clip_tokenizer.py index 9219d7cf15..c0aab3cfa0 100644 --- a/keras_cv/models/feature_extractor/clip/clip_tokenizer.py +++ b/keras_cv/models/feature_extractor/clip/clip_tokenizer.py @@ -149,8 +149,9 @@ def process_unseen_tokens(): self._bpe_merge_and_update_cache(unseen_tokens) return self.cache.lookup(flat_tokens) - # If `has_unseen_words == True`, it means not all tokens are in cache, - # we will process the unseen tokens. Otherwise return the cache lookup. + # If `has_unseen_words == True`, it means not all tokens are, + # in cache we will process the unseen tokens. Otherwise + # return the cache lookup. tokenized_words = tf.cond( has_unseen_words, process_unseen_tokens, diff --git a/keras_cv/utils/conditional_imports.py b/keras_cv/utils/conditional_imports.py index 0f0ea2b890..2ae1ec88b0 100644 --- a/keras_cv/utils/conditional_imports.py +++ b/keras_cv/utils/conditional_imports.py @@ -83,4 +83,4 @@ def assert_keras_nlp_installed(symbol_name): f"{symbol_name} requires the `keras_nlp` package. " "Please install the package using " "`pip install keras_nlp`." - ) \ No newline at end of file + )