Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

🐺 fix python tokenizer path #178

Open
wants to merge 8 commits into
base: main
Choose a base branch
from
Open
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension


Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
4 changes: 2 additions & 2 deletions Dockerfile
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
FROM nvcr.io/nvidia/tritonserver:22.07-py3
FROM nvcr.io/nvidia/tritonserver:23.06-py3

# see .dockerignore to check what is transfered

Expand All @@ -8,7 +8,7 @@ RUN apt-get update && \
python3-distutils \
python3-venv \
python3-pip \
apt-get clean
&& apt-get clean

ARG UID=1000
ARG GID=1000
Expand Down
2 changes: 1 addition & 1 deletion demo/generative-model/gpt2.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -146,7 +146,7 @@
"import tensorrt as trt\n",
"import torch\n",
"from tensorrt import ICudaEngine\n",
"from tensorrt.tensorrt import Logger, Runtime\n",
"from tensorrt import Logger, Runtime\n",
"from transformers import AutoTokenizer, BatchEncoding, GPT2LMHeadModel, AutoModelForCausalLM\n",
"from transformers.modeling_outputs import BaseModelOutputWithPastAndCrossAttentions\n",
"from transformer_deploy.utils.generative_model import GPTModelWrapper\n",
Expand Down
2 changes: 1 addition & 1 deletion demo/quantization/quantization_end_to_end.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -228,7 +228,7 @@
"import torch\n",
"import transformers\n",
"from datasets import load_dataset, load_metric\n",
"from tensorrt.tensorrt import IExecutionContext, Logger, Runtime\n",
"from tensorrt import IExecutionContext, Logger, Runtime\n",
"\n",
"from transformers import (\n",
" AutoModelForSequenceClassification,\n",
Expand Down
2 changes: 1 addition & 1 deletion demo/torchdynamo/benchmark.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -183,7 +183,7 @@
"\n",
"import gc\n",
"import tensorrt as trt\n",
"from tensorrt.tensorrt import ICudaEngine, Logger, Runtime\n",
"from tensorrt import ICudaEngine, Logger, Runtime\n",
"import onnx\n",
"from transformer_deploy.backends.trt_utils import build_engine, save_engine"
]
Expand Down
2 changes: 1 addition & 1 deletion demo/torchdynamo/dynamo_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -25,7 +25,7 @@
from matplotlib.axes import Axes
from matplotlib.figure import Figure
from onnxruntime import GraphOptimizationLevel
from tensorrt.tensorrt import Runtime
from tensorrt import Runtime
from torch._C._autograd import ProfilerActivity
from torchdynamo.eval_frame import OptimizeContext
from transformers import PreTrainedModel
Expand Down
4 changes: 2 additions & 2 deletions docs/python.md
Original file line number Diff line number Diff line change
Expand Up @@ -21,7 +21,7 @@ Now we need to convert to TensorRT:

```python
import tensorrt as trt
from tensorrt.tensorrt import Logger, Runtime
from tensorrt import Logger, Runtime

from transformer_deploy.backends.trt_utils import build_engine

Expand Down Expand Up @@ -50,7 +50,7 @@ Now the engine is ready, we can prepare the inference:

```python
import torch
from tensorrt.tensorrt import IExecutionContext
from tensorrt import IExecutionContext

from transformer_deploy.backends.trt_utils import get_binding_idxs

Expand Down
2 changes: 1 addition & 1 deletion requirements_gpu.txt
Original file line number Diff line number Diff line change
@@ -1,5 +1,5 @@
onnxruntime-gpu==1.13.1
nvidia-tensorrt==8.4.1.5
onnx_graphsurgeon
polygraphy
cupy-cuda117
tensorrt==8.6.1
2 changes: 1 addition & 1 deletion src/transformer_deploy/backends/trt_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -24,7 +24,7 @@
import tensorrt as trt
import torch
from tensorrt import ICudaEngine, IExecutionContext, ILayer, INetworkDefinition, Logger, Runtime
from tensorrt.tensorrt import Builder, IBuilderConfig, IElementWiseLayer, IOptimizationProfile, IReduceLayer, OnnxParser
from tensorrt import Builder, IBuilderConfig, IElementWiseLayer, IOptimizationProfile, IReduceLayer, OnnxParser


@dataclass
Expand Down
2 changes: 1 addition & 1 deletion src/transformer_deploy/convert.py
Original file line number Diff line number Diff line change
Expand Up @@ -295,7 +295,7 @@ def get_pytorch_infer(model: PreTrainedModel, cuda: bool, task: str):
logging.info("preparing TensorRT (FP16) benchmark")
try:
import tensorrt as trt
from tensorrt.tensorrt import ICudaEngine, Logger, Runtime
from tensorrt import ICudaEngine, Logger, Runtime

from transformer_deploy.backends.trt_utils import build_engine, load_engine, save_engine
except ImportError:
Expand Down
2 changes: 1 addition & 1 deletion src/transformer_deploy/t5_utils/conversion_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -630,7 +630,7 @@ def onnx_to_tensorrt_model(
runtime, onnx_model_path, trt_logger, workspace_size, quantization, tensorrt_model_path, **kwargs
) -> Callable[[Dict[str, torch.Tensor]], Dict[str, torch.Tensor]]:
try:
from tensorrt.tensorrt import ICudaEngine
from tensorrt import ICudaEngine

from transformer_deploy.backends.trt_utils import build_engine, load_engine, save_engine

Expand Down
2 changes: 1 addition & 1 deletion src/transformer_deploy/utils/python_tokenizer.py
Original file line number Diff line number Diff line change
Expand Up @@ -43,7 +43,7 @@ def initialize(self, args: Dict[str, str]) -> None:
"""
# more variables in https://github.com/triton-inference-server/python_backend/blob/main/src/python.cc

path: str = str(Path(args["model_repository"]).parent.absolute())
path: str = str(Path(args["model_repository"]).absolute() / args["model_version"])
self.tokenizer = AutoTokenizer.from_pretrained(path)
model_config = AutoConfig.from_pretrained(path)
self.model_input_names = self.tokenizer.model_input_names
Expand Down