Skip to content

Commit

Permalink
Support Clara v3.0 Inference API for py/cpp client (#65)
Browse files Browse the repository at this point in the history
* Support Clara v3.0 Inference API for py/cpp client

* add missing aiaa-inference.cpp

* fixed comments

Co-authored-by: Sachidanand Alle <[email protected]>
  • Loading branch information
SachidanandAlle and SachidanandAlle authored Mar 16, 2020
1 parent 6bddd5a commit 313da51
Show file tree
Hide file tree
Showing 12 changed files with 271 additions and 24 deletions.
15 changes: 15 additions & 0 deletions cpp-client/include/nvidia/aiaa/client.h
Original file line number Diff line number Diff line change
Expand Up @@ -173,6 +173,21 @@ class AIAA_CLIENT_API Client {
int deepgrow(const Model &model, const PointSet &foregroundPointSet, const PointSet &backgroundPointSet, const std::string &inputImageFile,
const std::string &outputImageFile, const std::string &sessionId = "") const;

/*!
@brief This API is used to run generic inference on input image
@param[in] model Model to be used
@param[in] params Json String which will be an input for AIAA to run the model inference
@param[in] inputImageFile Input image filename which will be sent to AIAA for inference action
@param[in] outputImageFile Output image file where Result mask is stored
@param[in] sessionId If *session_id* is not empty then *inputImageFile* will be ignored
@retval JSON string representing the output response from AIAA
@throw nvidia.aiaa.error.101 in case of connect error
@throw nvidia.aiaa.error.103 if case of ITK error related to image processing
*/
std::string inference(const Model &model, const std::string &params, const std::string &inputImageFile, const std::string &outputImageFile,
const std::string &sessionId = "") const;
/*!
@brief 3D binary mask to polygon representation conversion
@param[in] pointRatio Point Ratio
Expand Down
12 changes: 7 additions & 5 deletions cpp-client/include/nvidia/aiaa/exception.h
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,8 @@ namespace aiaa {
nvidia.aiaa.error.103 | Failed to process ITK Operations.
nvidia.aiaa.error.104 | Invalid Arguments.
nvidia.aiaa.error.105 | AIAA Session Timeout.
nvidia.aiaa.error.106 | System/Unknown Error.
nvidia.aiaa.error.106 | AIAA Response Error.
nvidia.aiaa.error.107 | System/Unknown Error.
*/

class exception : public std::exception {
Expand All @@ -62,13 +63,14 @@ class exception : public std::exception {
RESPONSE_PARSE_ERROR = 102, /// Failed to parse AIAA Server Response
ITK_PROCESS_ERROR = 103, /// Failed to process ITK Operations
INVALID_ARGS_ERROR = 104, /// Invalid Arguments
AIAA_SESSION_TIMEOUT = 105, /// AIAA Session Timeout
SYSTEM_ERROR = 106, /// System/Unknown Error
AIAA_SESSION_TIMEOUT = 105, /// AIAA Session Timeout,
AIAA_RESPONSE_ERROR = 106, /// AIAA Response Error
SYSTEM_ERROR = 107, /// System/Unknown Error
};

/// Message String for each enum type
const std::string messages[6] = { "Failed to communicate to AIAA Server", "Failed to parse AIAA Server Response",
"Failed to process ITK Operations", "Invalid Arguments", "AIAA Session Timeout", "System/Unknown Error" };
const std::string messages[7] = { "Failed to communicate to AIAA Server", "Failed to parse AIAA Server Response",
"Failed to process ITK Operations", "Invalid Arguments", "AIAA Session Timeout", "AIAA Response Error", "System/Unknown Error" };

/// returns the explanatory string
const char* what() const noexcept override {
Expand Down
27 changes: 27 additions & 0 deletions cpp-client/src/client.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -43,6 +43,7 @@ const std::string EP_MODELS = "/v1/models";
const std::string EP_DEXTRA_3D = "/v1/dextr3d";
const std::string EP_DEEPGROW = "/v1/deepgrow";
const std::string EP_SEGMENTATION = "/v1/segmentation";
const std::string EP_INFERENCE = "/v1/inference";
const std::string EP_MASK_TO_POLYGON = "/v1/mask2polygon";
const std::string EP_FIX_POLYGON = "/v1/fixpolygon";
const std::string EP_SESSION = "/session/";
Expand Down Expand Up @@ -223,6 +224,32 @@ int Client::deepgrow(const Model &model, const PointSet &foregroundPointSet, con
return 0;
}

std::string Client::inference(const Model &model, const std::string &params, const std::string &inputImageFile, const std::string &outputImageFile,
const std::string &sessionId) const {
if (model.name.empty()) {
AIAA_LOG_WARN("Selected model is EMPTY");
throw exception(exception::INVALID_ARGS_ERROR, "Model is EMPTY");
}

AIAA_LOG_DEBUG("Model: " << model.toJson());
AIAA_LOG_DEBUG("Params: " << params);
AIAA_LOG_DEBUG("InputImageFile: " << inputImageFile);
AIAA_LOG_DEBUG("OutputImageFile: " << outputImageFile);
AIAA_LOG_DEBUG("SessionId: " << sessionId);

std::string m = CurlUtils::encode(model.name);
std::string uri = serverUri + EP_INFERENCE + "?model=" + m;

std::string inputImage = inputImageFile;
if (!sessionId.empty()) {
uri += "&session_id=" + CurlUtils::encode(sessionId);
inputImage = "";
}

std::string paramsStr = params.empty() ? "{}" : params;
return CurlUtils::doMethod("POST", uri, paramsStr, inputImage, outputImageFile, timeoutInSec);
}

PolygonsList Client::maskToPolygon(int pointRatio, const std::string &inputImageFile) const {
std::string uri = serverUri + EP_MASK_TO_POLYGON;
std::string paramStr = "{\"more_points\":" + Utils::lexical_cast<std::string>(pointRatio) + "}";
Expand Down
17 changes: 10 additions & 7 deletions cpp-client/src/curlutils.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -187,18 +187,21 @@ std::string CurlUtils::doMethod(const std::string &method, const std::string &ur
std::istream &is = session.receiveResponse(res);
AIAA_LOG_DEBUG("Status: " << res.getStatus() << "; Reason: " << res.getReason() << "; Content-type: " << res.getContentType());

std::stringstream response;
Poco::StreamCopier::copyStream(is, response);

if (res.getStatus() == 440) {
throw exception(exception::AIAA_SESSION_TIMEOUT, res.getReason().c_str());
throw exception(exception::AIAA_SESSION_TIMEOUT, response.str().c_str());
}
if (res.getStatus() != 200) {
throw exception(exception::AIAA_SERVER_ERROR, res.getReason().c_str());
AIAA_LOG_INFO("Response: " << response.str());
throw exception(exception::AIAA_RESPONSE_ERROR, (res.getReason() + " => " + response.str()).c_str());
}

std::stringstream response;
Poco::StreamCopier::copyStream(is, response);

if (res.getContentType().find("multipart/form-data") == std::string::npos) {
AIAA_LOG_INFO("Expected Multipart Response but received: " << res.getContentType());
if (res.getContentType().find("multipart") == std::string::npos) {
if (!resultFileName.empty()) {
AIAA_LOG_INFO("Expected Multipart Response but received: " << res.getContentType());
}
AIAA_LOG_DEBUG("Received response from server: \n" << response.str());

textReponse = response.str();
Expand Down
4 changes: 4 additions & 0 deletions cpp-client/tools/CMakeLists.txt
Original file line number Diff line number Diff line change
Expand Up @@ -70,6 +70,9 @@ target_link_libraries(nvidiaAIAASession NvidiaAIAAClient ${CMAKE_DL_LIBS})
add_executable(nvidiaAIAADeepgrow aiaa/aiaa-deepgrow.cpp)
target_link_libraries(nvidiaAIAADeepgrow NvidiaAIAAClient ${CMAKE_DL_LIBS})

add_executable(nvidiaAIAAInference aiaa/aiaa-inference.cpp)
target_link_libraries(nvidiaAIAAInference NvidiaAIAAClient ${CMAKE_DL_LIBS})

# Install Targets
install(TARGETS DicomToNifti DESTINATION bin)
install(TARGETS NiftiToDicom DESTINATION bin)
Expand All @@ -82,3 +85,4 @@ install(TARGETS nvidiaAIAAMaskPolygon DESTINATION bin)
install(TARGETS nvidiaAIAASampling3D DESTINATION bin)
install(TARGETS nvidiaAIAASession DESTINATION bin)
install(TARGETS nvidiaAIAADeepgrow DESTINATION bin)
install(TARGETS nvidiaAIAAInference DESTINATION bin)
96 changes: 96 additions & 0 deletions cpp-client/tools/aiaa/aiaa-inference.cpp
Original file line number Diff line number Diff line change
@@ -0,0 +1,96 @@
/*
* Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
*
* Redistribution and use in source and binary forms, with or without
* modification, are permitted provided that the following conditions
* are met:
* * Redistributions of source code must retain the above copyright
* notice, this list of conditions and the following disclaimer.
* * Redistributions in binary form must reproduce the above copyright
* notice, this list of conditions and the following disclaimer in the
* documentation and/or other materials provided with the distribution.
* * Neither the name of NVIDIA CORPORATION nor the names of its
* contributors may be used to endorse or promote products derived
* from this software without specific prior written permission.
*
* THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS ``AS IS'' AND ANY
* EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
* IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
* PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
* CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
* EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
* PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
* PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY
* OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
* (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
* OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
*/

#include <nvidia/aiaa/client.h>
#include <nvidia/aiaa/utils.h>

#include "../commonutils.h"
#include <chrono>

int main(int argc, char **argv) {
if (argc < 2 || cmdOptionExists(argv, argv + argc, "-h")) {
std::cout << "Usage:: <COMMAND> <OPTIONS>\n"
" |-h (Help) Print this information |\n"
" |-server Server URI {default: http://0.0.0.0:5000} |\n"
" *|-model Model Name [either -label or -model is required] |\n"
" |-params Input Params (JSON) |\n"
" *|-image Input Image File |\n"
" *|-session Session ID |\n"
" |-output Output Image File |\n"
" |-timeout Timeout In Seconds {default: 60} |\n"
" |-ts Print API Latency |\n";
return 0;
}

std::string serverUri = getCmdOption(argv, argv + argc, "-server", "http://0.0.0.0:5000");
std::string model = getCmdOption(argv, argv + argc, "-model");

std::string params = getCmdOption(argv, argv + argc, "-params");
std::string inputImageFile = getCmdOption(argv, argv + argc, "-image");
std::string sessionId = getCmdOption(argv, argv + argc, "-session");
std::string outputImageFile = getCmdOption(argv, argv + argc, "-output");

int timeout = nvidia::aiaa::Utils::lexical_cast<int>(getCmdOption(argv, argv + argc, "-timeout", "60"));
bool printTs = cmdOptionExists(argv, argv + argc, "-ts") ? true : false;

if (model.empty()) {
std::cerr << "Model is required\n";
return -1;
}
if (inputImageFile.empty() && sessionId.empty()) {
std::cerr << "Input Image file is missing (Either session-id or input image should be provided)\n";
return -1;
}

try {
nvidia::aiaa::Client client(serverUri, timeout);

nvidia::aiaa::Model m;
m = client.model(model);

if (m.name.empty()) {
std::cerr << "Couldn't find a model for name: " << model << "\n";
return -1;
}

auto begin = std::chrono::high_resolution_clock::now();
std::string resultJson = client.inference(m, params, inputImageFile, outputImageFile, sessionId);
std::cout << "Result (JSON): " << resultJson << std::endl;

auto end = std::chrono::high_resolution_clock::now();
auto ms = std::chrono::duration_cast<std::chrono::milliseconds>(end - begin).count();

if (printTs) {
std::cout << "API Latency (in milli sec): " << ms << std::endl;
}
return 0;
} catch (nvidia::aiaa::exception &e) {
std::cerr << "nvidia::aiaa::exception => nvidia.aiaa.error." << e.id << "; description: " << e.name() << "; reason: " << e.what() << std::endl;
}
return -1;
}
31 changes: 31 additions & 0 deletions docs/tools.rst
Original file line number Diff line number Diff line change
Expand Up @@ -225,6 +225,37 @@ Example
-fpoints [[283,204,105]] \
-output tmp_out.nii.gz
Inference
------------

Provides implementation for ``nvidia::aiaa::Client::inference()`` API.
For more details refer `aiaa-inference.cpp <https://github.com/NVIDIA/ai-assisted-annotation-client/blob/master/cpp-client/tools/aiaa/aiaa-inference.cpp>`_

Following are the options available

.. csv-table::
:header: Option,Description,Default,Example
:widths: auto

-h,Prints the help information,,
-server,Server URI for AIAA Server,,-server http://0.0.0.0:5000
-model,Model Name,,-model clara_xray_classification_chest_amp
-params,Params JSON,,-params {}
-image,Input image filename where image,,-image input.png
-output,File name to store output image result from AIAA server,,-output output.png
-session,Session ID instead of -image option,,-session "9ad970be-530e-11ea-84e3-0242ac110007"

Example

.. code-block:: bash
nvidiaAIAAInference \
-server http://0.0.0.0:5000 \
-model clara_xray_classification_chest_amp \
-image AC07112011207153620_v2.png \
-params {}
Mask To Polygon
------------------

Expand Down
8 changes: 8 additions & 0 deletions py-client/aas_tests.json
Original file line number Diff line number Diff line change
Expand Up @@ -57,6 +57,14 @@
"image_in": "test-data/image.nii.gz",
"image_out": "test-data/output/deepgrow_spleen.nii.gz"
},
{
"name": "test_inference",
"disabled": false,
"api": "inference",
"model": "clara_xray_classification_chest_amp",
"params": {},
"image_in": "test-data/AC07112011207153620_v2.png"
},
{
"name": "test_mask2polygon_spleen",
"disabled": false,
Expand Down
Loading

0 comments on commit 313da51

Please sign in to comment.