Skip to content

Commit

Permalink
Fix docstring for sphinx autodoc
Browse files Browse the repository at this point in the history
  • Loading branch information
tanmayv25 committed Nov 29, 2023
1 parent f68f859 commit 9c9a336
Show file tree
Hide file tree
Showing 18 changed files with 151 additions and 99 deletions.
4 changes: 3 additions & 1 deletion src/python/library/tritonclient/_plugin.py
Original file line number Diff line number Diff line change
Expand Up @@ -30,7 +30,8 @@

class InferenceServerClientPlugin(ABC):
"""Every Triton Client Plugin should extend this class.
Each plugin needs to implement the `__call__` method.
Each plugin needs to implement the :py:meth:`__call__` method.
"""

@abstractmethod
Expand All @@ -42,5 +43,6 @@ def __call__(self, request):
----------
request : Request
The request object.
"""
pass
2 changes: 1 addition & 1 deletion src/python/library/tritonclient/_request.py
Original file line number Diff line number Diff line change
Expand Up @@ -29,7 +29,7 @@
class Request:
"""A request object.
Attributes
Parameters
----------
headers : dict
A dictionary containing the request headers.
Expand Down
12 changes: 12 additions & 0 deletions src/python/library/tritonclient/grpc/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,6 +32,7 @@
from tritonclient.utils import *

from .._plugin import InferenceServerClientPlugin
from .._request import Request
from ._client import MAX_GRPC_MESSAGE_SIZE, InferenceServerClient, KeepAliveOptions
from ._infer_input import InferInput
from ._infer_result import InferResult
Expand Down Expand Up @@ -59,3 +60,14 @@
"use versions <1.43.0 or >=1.51.1 to avoid leaks "
"(see https://github.com/grpc/grpc/issues/28513)."
)

__all__ = [
"InferenceServerClientPlugin",
"Request",
"InferenceServerClient",
"InferInput",
"InferRequestedOutput",
"InferResult",
"KeepAliveOptions",
"InferenceServerException"
]
33 changes: 20 additions & 13 deletions src/python/library/tritonclient/grpc/_client.py
Original file line number Diff line number Diff line change
Expand Up @@ -982,6 +982,7 @@ def update_log_settings(
):
"""Update the global log settings.
Returns the log settings after the update.
Parameters
----------
settings: dict
Expand All @@ -1005,11 +1006,13 @@ def update_log_settings(
InferenceServerExeption with message "Deadline Exceeded" when the
specified time elapses. The default value is None which means
client will wait for the response from the server.
Returns
-------
dict or protobuf message
The JSON dict or LogSettingsResponse message holding
the updated log settings.
Raises
------
InferenceServerException
Expand Down Expand Up @@ -1047,6 +1050,7 @@ def update_log_settings(

def get_log_settings(self, headers=None, as_json=False, client_timeout=None):
"""Get the global log settings.
Parameters
----------
headers: dict
Expand All @@ -1067,15 +1071,18 @@ def get_log_settings(self, headers=None, as_json=False, client_timeout=None):
InferenceServerExeption with message "Deadline Exceeded" when the
specified time elapses. The default value is None which means
client will wait for the response from the server.
Returns
-------
dict or protobuf message
The JSON dict or LogSettingsResponse message holding
the log settings.
Raises
------
InferenceServerException
If unable to get the log settings or has timed out.
"""
metadata = self._get_metadata(headers)
try:
Expand Down Expand Up @@ -1460,14 +1467,14 @@ def infer(
model_name: str
The name of the model to run inference.
inputs : list
A list of InferInput objects, each describing data for a input
A list of :py:class:`InferInput` objects, each describing data for a input
tensor required by the model.
model_version : str
The version of the model to run inference. The default value
is an empty string which means then the server will choose
a version based on the model and internal policy.
outputs : list
A list of InferRequestedOutput objects, each describing how the output
A list of :py:class:`InferRequestedOutput` objects, each describing how the output
data must be returned. If not specified all outputs produced
by the model will be returned using default settings.
request_id : str
Expand Down Expand Up @@ -1590,12 +1597,12 @@ def async_infer(
model_name: str
The name of the model to run inference.
inputs : list
A list of InferInput objects, each describing data for a input
A list of :py:class:`InferInput` objects, each describing data for a input
tensor required by the model.
callback : function
Python function that is invoked once the request is completed.
The function must reserve the last two arguments (result, error)
to hold InferResult and InferenceServerException
to hold :py:class:`InferResult` and :py:class:`InferenceServerException`
objects respectively which will be provided to the function when
executing the callback. The ownership of these objects will be given
to the user. The 'error' would be None for a successful inference.
Expand All @@ -1604,7 +1611,7 @@ def async_infer(
is an empty string which means then the server will choose
a version based on the model and internal policy.
outputs : list
A list of InferRequestedOutput objects, each describing how the output
A list of :py:class:`InferRequestedOutput` objects, each describing how the output
data must be returned. If not specified all outputs produced
by the model will be returned using default settings.
request_id : str
Expand Down Expand Up @@ -1668,13 +1675,13 @@ def async_infer(
Computations represented by a Future may be yet to be begun,
ongoing, or have already completed.
Note
----
This object can be used to cancel the inference request like
below:
----------
future = async_infer(...)
ret = future.cancel()
----------
>>> future = async_infer(...)
>>> ret = future.cancel()
Raises
------
Expand Down Expand Up @@ -1745,8 +1752,8 @@ def start_stream(
callback : function
Python function that is invoked upon receiving response from
the underlying stream. The function must reserve the last two
arguments (result, error) to hold InferResult and
InferenceServerException objects respectively
arguments (result, error) to hold :py:class:`InferResult` and
:py:class:`InferenceServerException` objects respectively
which will be provided to the function when executing the callback.
The ownership of these objects will be given to the user. The 'error'
would be None for a successful inference.
Expand Down Expand Up @@ -1830,14 +1837,14 @@ def async_stream_infer(
model_name: str
The name of the model to run inference.
inputs : list
A list of InferInput objects, each describing data for a input
A list of :py:class:`InferInput` objects, each describing data for a input
tensor required by the model.
model_version: str
The version of the model to run inference. The default value
is an empty string which means then the server will choose
a version based on the model and internal policy.
outputs : list
A list of InferRequestedOutput objects, each describing how the output
A list of :py:class:`InferRequestedOutput` objects, each describing how the output
data must be returned. If not specified all outputs produced
by the model will be returned using default settings.
request_id : str
Expand Down
2 changes: 1 addition & 1 deletion src/python/library/tritonclient/grpc/_infer_result.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@


class InferResult:
"""An object of InferResult class holds the response of
"""An object of :py:class:`InferResult` class holds the response of
an inference request and provide methods to retrieve
inference results.
Expand Down
6 changes: 4 additions & 2 deletions src/python/library/tritonclient/grpc/_infer_stream.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,11 +45,13 @@ class _InferStream:
callback : function
Python function that is invoked upon receiving response from
the underlying stream. The function must reserve the last two
arguments (result, error) to hold InferResult and
InferenceServerException objects respectively which will be
arguments (result, error) to hold :py:class:`InferResult` and
:py:class:`InferenceServerException` objects respectively which will be
provided to the function when executing the callback. The
ownership of these objects will be given to the user. The
'error' would be None for a successful inference.
verbose : bool
Enables verbose mode if set True.
"""

def __init__(self, callback, verbose):
Expand Down
4 changes: 2 additions & 2 deletions src/python/library/tritonclient/grpc/_requested_output.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,7 +31,7 @@


class InferRequestedOutput:
"""An object of InferRequestedOutput class is used to describe a
"""An object of :py:class:`InferRequestedOutput` class is used to describe a
requested output tensor for an inference request.
Parameters
Expand Down Expand Up @@ -89,7 +89,7 @@ def set_shared_memory(self, region_name, byte_size, offset=0):

def unset_shared_memory(self):
"""Clears the shared memory option set by the last call to
InferRequestedOutput.set_shared_memory(). After call to this
:py:meth:`InferRequestedOutput.set_shared_memory()`. After call to this
function requested output will no longer be returned in a
shared memory region.
"""
Expand Down
6 changes: 3 additions & 3 deletions src/python/library/tritonclient/grpc/_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,7 @@


def get_error_grpc(rpc_error):
"""Convert a gRPC error to an InferenceServerException.
"""Convert a gRPC error to an :py:class:`InferenceServerException`.
Parameters
----------
Expand All @@ -51,7 +51,7 @@ def get_error_grpc(rpc_error):


def get_cancelled_error(msg=None):
"""Get InferenceServerException object for a cancelled RPC.
"""Get :py:class:`InferenceServerException` object for a cancelled RPC.
Returns
-------
Expand All @@ -63,7 +63,7 @@ def get_cancelled_error(msg=None):


def raise_error_grpc(rpc_error):
"""Raise an InferenceServerException from a gRPC error.
"""Raise an :py:class:`InferenceServerException` from a gRPC error.
Parameters
----------
Expand Down
Loading

0 comments on commit 9c9a336

Please sign in to comment.