Skip to content

Commit

Permalink
Merge pull request #9 from iwatake2222/feat/contain_prebuilt_libs
Browse files Browse the repository at this point in the history
Feat/contain prebuilt libs
  • Loading branch information
iwatake2222 authored Jan 10, 2021
2 parents ecd5ca9 + 05429d8 commit 76180ec
Show file tree
Hide file tree
Showing 17 changed files with 281 additions and 52 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
.vscode/
build/
ThirdParty/
third_party/
resource/
3 changes: 3 additions & 0 deletions .gitmodules
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
[submodule "ThirdParty/tensorflow"]
path = ThirdParty/tensorflow
url = https://github.com/tensorflow/tensorflow
17 changes: 15 additions & 2 deletions 00_doc/class_diagram.drawio
Original file line number Diff line number Diff line change
@@ -1,4 +1,4 @@
<mxfile host="65bd71144e" modified="2020-12-27T09:02:34.596Z" agent="5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Code/1.46.1 Chrome/78.0.3904.130 Electron/7.3.1 Safari/537.36" etag="Ge7SQ_ebyERRGwPVspIJ" version="13.10.0" type="embed">
<mxfile host="65bd71144e" modified="2021-01-10T01:48:26.266Z" agent="5.0 (Windows NT 10.0; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Code/1.46.1 Chrome/78.0.3904.130 Electron/7.3.1 Safari/537.36" etag="uZrn58mdqdMWxrp378wF" version="13.10.0" type="embed">
<diagram id="2nVCpC3mRS13LhFPs2z5" name="Page-1">
<mxGraphModel dx="1877" dy="1993" grid="1" gridSize="10" guides="1" tooltips="1" connect="1" arrows="1" fold="1" page="1" pageScale="1" pageWidth="827" pageHeight="1169" math="0" shadow="0">
<root>
Expand Down Expand Up @@ -119,7 +119,7 @@
</mxGeometry>
</mxCell>
<mxCell id="52" value="&lt;p style=&quot;margin: 4px 0px 0px ; text-align: center&quot;&gt;&lt;b&gt;&lt;u&gt;HELPER_TYPE&lt;/u&gt;&lt;/b&gt;&lt;br&gt;&lt;/p&gt;&lt;hr&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;TENSOR_RT,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;TENSORFLOW_LITE,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;TENSORFLOW_LITE_EDGETPU,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;TENSORFLOW_LITE_GPU,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;TENSORFLOW_LITE_XNNPACK,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;NCNN,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;MNN,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;OPEN_CV,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;OPEN_CV_GPU,&lt;/p&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;" parent="1" vertex="1">
<mxGeometry x="-110" y="100" width="210" height="170" as="geometry"/>
<mxGeometry x="-110" y="59" width="210" height="170" as="geometry"/>
</mxCell>
<mxCell id="54" value="&lt;p style=&quot;margin: 4px 0px 0px ; text-align: center&quot;&gt;&lt;b&gt;&lt;u&gt;DATA_TYPE&lt;/u&gt;&lt;/b&gt;&lt;br&gt;&lt;/p&gt;&lt;hr&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;DATA_TYPE_IMAGE,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;DATA_TYPE_BLOB_NHWC,&lt;/p&gt;&lt;p style=&quot;margin: 0px ; margin-left: 8px&quot;&gt;DATA_TYPE_BLOB_NCHW,&lt;/p&gt;" style="verticalAlign=top;align=left;overflow=fill;fontSize=12;fontFamily=Helvetica;html=1;" parent="1" vertex="1">
<mxGeometry x="470" y="448" width="210" height="80" as="geometry"/>
Expand Down Expand Up @@ -158,6 +158,19 @@
<mxPoint x="270" y="-130" as="targetPoint"/>
</mxGeometry>
</mxCell>
<mxCell id="73" value="ThirdParty/&lt;br&gt;pre-built library" style="html=1;" vertex="1" parent="1">
<mxGeometry x="100" y="448" width="110" height="50" as="geometry"/>
</mxCell>
<mxCell id="74" value="" style="rounded=0;whiteSpace=wrap;html=1;fillColor=none;dashed=1;dashPattern=1 2;" vertex="1" parent="1">
<mxGeometry x="40" y="250" width="660" height="150" as="geometry"/>
</mxCell>
<mxCell id="76" value="link" style="endArrow=open;endFill=1;endSize=12;html=1;entryX=0.5;entryY=0;entryDx=0;entryDy=0;exitX=0.25;exitY=1;exitDx=0;exitDy=0;dashed=1;" edge="1" parent="1" source="74" target="73">
<mxGeometry x="-0.4963" y="11" width="160" relative="1" as="geometry">
<mxPoint x="660" y="144" as="sourcePoint"/>
<mxPoint x="790" y="163" as="targetPoint"/>
<mxPoint as="offset"/>
</mxGeometry>
</mxCell>
</root>
</mxGraphModel>
</diagram>
Expand Down
Binary file modified 00_doc/class_diagram.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
61 changes: 32 additions & 29 deletions InferenceHelper/CMakeLists.txt
Original file line number Diff line number Diff line change
@@ -1,14 +1,15 @@
cmake_minimum_required(VERSION 3.0)

set(LibraryName "InferenceHelper")
set(THIRD_PARTY_DIR ${CMAKE_CURRENT_LIST_DIR}/../ThirdParty/)

set(INFERENCE_HELPER_ENABLE_PRE_PROCESS_BY_OPENCV on CACHE BOOL "Enable PreProcess by OpenCV? [on/off]")
set(INFERENCE_HELPER_ENABLE_OPENCV on CACHE BOOL "With OpenCV? [on/off]")
set(INFERENCE_HELPER_ENABLE_TENSORRT off CACHE BOOL "With TensorRT? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE off CACHE BOOL "With Tflite? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU off CACHE BOOL "With Tflite Delegate EdgeTPU? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU off CACHE BOOL "With Tflite Delegate GPU? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK off CACHE BOOL "With Tflite Delegate XNNPACK? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU off CACHE BOOL "With Tflite Delegate GPU? [on/off]")
set(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU off CACHE BOOL "With Tflite Delegate EdgeTPU? [on/off]")
set(INFERENCE_HELPER_ENABLE_TENSORRT off CACHE BOOL "With TensorRT? [on/off]")
set(INFERENCE_HELPER_ENABLE_NCNN off CACHE BOOL "With Ncnn? [on/off]")
set(INFERENCE_HELPER_ENABLE_MNN off CACHE BOOL "With Mnn? [on/off]")

Expand All @@ -18,13 +19,16 @@ set(SRC InferenceHelper.h InferenceHelper.cpp)
if(INFERENCE_HELPER_ENABLE_OPENCV)
set(SRC ${SRC} InferenceHelperOpenCV.h InferenceHelperOpenCV.cpp)
endif()

if(INFERENCE_HELPER_ENABLE_TFLITE OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
set(SRC ${SRC} InferenceHelperTensorflowLite.h InferenceHelperTensorflowLite.cpp)
endif()

if(INFERENCE_HELPER_ENABLE_TENSORRT)
set(SRC ${SRC} InferenceHelperTensorRt.h InferenceHelperTensorRt.cpp)
set(SRC ${SRC} TensorRT/logger.cpp TensorRT/BatchStream.h TensorRT/common.h TensorRT/EntropyCalibrator.h TensorRT/logger.h TensorRT/logging.h)
endif()
if(INFERENCE_HELPER_ENABLE_TFLITE OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
set(SRC ${SRC} InferenceHelperTensorflowLite.h InferenceHelperTensorflowLite.cpp)
endif()

if(INFERENCE_HELPER_ENABLE_NCNN)
set(SRC ${SRC} InferenceHelperNcnn.h InferenceHelperNcnn.cpp)
endif()
Expand Down Expand Up @@ -52,28 +56,6 @@ if(INFERENCE_HELPER_ENABLE_OPENCV)
add_definitions(-DINFERENCE_HELPER_ENABLE_OPENCV)
endif()

# For TensorRT
if(INFERENCE_HELPER_ENABLE_TENSORRT)
find_package(CUDA)
if(CUDA_FOUND)
target_link_libraries(${LibraryName}
${CUDA_LIBRARIES}
nvinfer
nvonnxparser
nvinfer_plugin
cudnn
)
target_include_directories(${LibraryName} PUBLIC
${CUDA_INCLUDE_DIRS}
TensorRT
)
add_definitions(-DINFERENCE_HELPER_ENABLE_TENSORRT)
message("CUDA_INCLUDE_DIRS: ${CUDA_INCLUDE_DIRS}")
else()
message(WARNING, "Cannot find CUDA")
endif()
endif()

# For Tensorflow Lite
if(INFERENCE_HELPER_ENABLE_TFLITE OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_XNNPACK OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_GPU OR INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
include(${THIRD_PARTY_DIR}/cmakes/tflite.cmake)
Expand Down Expand Up @@ -108,6 +90,28 @@ if(INFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
add_definitions(-DINFERENCE_HELPER_ENABLE_TFLITE_DELEGATE_EDGETPU)
endif()

# For TensorRT
if(INFERENCE_HELPER_ENABLE_TENSORRT)
find_package(CUDA)
if(CUDA_FOUND)
target_link_libraries(${LibraryName}
${CUDA_LIBRARIES}
nvinfer
nvonnxparser
nvinfer_plugin
cudnn
)
target_include_directories(${LibraryName} PUBLIC
${CUDA_INCLUDE_DIRS}
TensorRT
)
add_definitions(-DINFERENCE_HELPER_ENABLE_TENSORRT)
message("CUDA_INCLUDE_DIRS: ${CUDA_INCLUDE_DIRS}")
else()
message(WARNING, "Cannot find CUDA")
endif()
endif()

# For NCNN
if(INFERENCE_HELPER_ENABLE_NCNN)
include(${THIRD_PARTY_DIR}/cmakes/ncnn.cmake)
Expand All @@ -116,7 +120,6 @@ if(INFERENCE_HELPER_ENABLE_NCNN)
add_definitions(-DINFERENCE_HELPER_ENABLE_NCNN)
endif()


# For MNN
if(INFERENCE_HELPER_ENABLE_MNN)
include(${THIRD_PARTY_DIR}/cmakes/MNN.cmake)
Expand Down
14 changes: 7 additions & 7 deletions InferenceHelper/InferenceHelper.h
Original file line number Diff line number Diff line change
Expand Up @@ -101,29 +101,29 @@ class OutputTensorInfo : public TensorInfo {
data = nullptr;
quant.scale = 0;
quant.zeroPoint = 0;
m_dataFp32.reset();
m_dataFp32 = nullptr;
}

~OutputTensorInfo() {
if (m_dataFp32) {
m_dataFp32.reset();
if (m_dataFp32 != nullptr) {
delete[] m_dataFp32;
}
}

float* getDataAsFloat() { /* Returned pointer should be with const, but returning pointer without const is convenient to create cv::Mat */
if (tensorType == TENSOR_TYPE_UINT8) {
int32_t dataNum = 1;
dataNum = tensorDims.batch * tensorDims.channel * tensorDims.height * tensorDims.width;
if (!m_dataFp32) {
m_dataFp32.reset(new float[dataNum]);
if (m_dataFp32 == nullptr) {
m_dataFp32 = new float[dataNum];
}
#pragma omp parallel
for (int32_t i = 0; i < dataNum; i++) {
const uint8_t* valUint8 = static_cast<const uint8_t*>(data);
float valFloat = (valUint8[i] - quant.zeroPoint) * quant.scale;
m_dataFp32[i] = valFloat;
}
return m_dataFp32.get();
return m_dataFp32;
} else if (tensorType == TENSOR_TYPE_FP32) {
return static_cast<float*>(data);
} else {
Expand All @@ -139,7 +139,7 @@ class OutputTensorInfo : public TensorInfo {
} quant; // [Out] Parameters for dequantization (convert uint8 to float)

private:
std::shared_ptr<float[]> m_dataFp32;
float* m_dataFp32;
};


Expand Down
6 changes: 5 additions & 1 deletion InferenceHelper/InferenceHelperTensorflowLite.cpp
Original file line number Diff line number Diff line change
Expand Up @@ -85,7 +85,11 @@ int32_t InferenceHelperTensorflowLite::initialize(const std::string& modelFilena
if (num_devices > 0) {
const auto& device = devices.get()[0];
m_delegate = edgetpu_create_delegate(device.type, device.path, nullptr, 0);
m_interpreter->ModifyGraphWithDelegate(m_delegate);
if (m_delegate) {
m_interpreter->ModifyGraphWithDelegate(m_delegate);
} else {
PRINT_E("[WARNING] Failed to create Edge TPU delegate\n");
}
} else {
PRINT_E("[WARNING] Edge TPU is not found\n");
}
Expand Down
2 changes: 1 addition & 1 deletion NOTICE.md
Original file line number Diff line number Diff line change
Expand Up @@ -11,7 +11,7 @@ This project utilizes the following OSS (Open Source Software):
- https://github.com/google-coral/libedgetpu
- Copyright 2019 Google LLC
- Licensed under the Apache License, Version 2.0
- Modification: no
- Modification: yes
- Pre-built binary file is generated from this project

- TensorRT
Expand Down
45 changes: 33 additions & 12 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -6,12 +6,31 @@

## Supported frameworks
- TensorFlow Lite
- TensorFlow Lite with delegate (GPU, XNNPACK, EdgeTPU)
- TensorFlow Lite with delegate (XNNPACK, GPU, EdgeTPU)
- TensorRT
- OpenCV(dnn)
- ncnn
- MNN

## Supported targets
- Windows 10 (Visual Studio 2017 x64, Visual Studio 2019 x64)
- Linux (x64, armv7, aarch64)
- Android (armv7, aarch64)

## Tested Environment
| Framework | Windows (x64) | Linux (x64) | Linux (armv7) | Linux (aarch64) | Android (aarch64) |
|---------------------------|--------------------------|---------------|---------------|------------------|-------------------|
| OpenCV(dnn) | OK | OK | OK | OK | not tested |
| TensorFlow Lite | OK | OK | OK | OK | OK |
| TensorFlow Lite + XNNPACK | OK | OK | OK | OK | OK |
| TensorFlow Lite + GPU | not supported | OK | not tested | OK | OK |
| TensorFlow Lite + EdgeTPU | OK | not tested | OK | OK | OK |
| TensorRT | not tested | not tested | not tested | OK | not supported |
| ncnn | OK | OK | OK | OK | OK |
| MNN | OK | OK | OK | OK | OK |
| Note | Visual Studio 2017, 2019 | Xubuntu 18.04 | Raspberry Pi | Jetson Xavier NX | Pixel 4a |


## Sample project
https://github.com/iwatake2222/InferenceHelper_Sample

Expand All @@ -21,24 +40,26 @@ https://github.com/iwatake2222/InferenceHelper_Sample
- https://github.com/iwatake2222/play_with_ncnn
- https://github.com/iwatake2222/play_with_mnn

## Tested Environment
- Windows 10 (Visual Studio 2017 x64)
- Linux (Xubuntu 18.04 x64)
- Linux (Jetson Xavier NX)

# Usage
## Installation
- Add this repository into your project (Using `git submodule` is recommended)
- This class requires pre-built deep learning framework library and appropreate cmake variables need to be set
- Please see the sample project
- Download prebuilt libraries
- Download prebuilt libraries (ThirdParty.zip) from https://github.com/iwatake2222/InferenceHelper/releases/
- Extract it to `ThirdParty`

## Project settings in CMake
- CMake variables
- `THIRD_PARTY_DIR` : set the directory containing pre-built deep learning framework libraries
### (For Tensorflow Lite)
- After adding or cloning this repository, you need to download header files
```
git submodule init
git submodule update
cd ThirdParty/tensorflow
chmod +x tensorflow/lite/tools/make/download_dependencies.sh
tensorflow/lite/tools/make/download_dependencies.sh
```

## Project settings in CMake
- Add InferenceHelper and CommonHelper to your project
```cmake
set(THIRD_PARTY_DIR ${CMAKE_CURRENT_LIST_DIR}/../../third_party/)
set(INFERENCE_HELPER_DIR ${CMAKE_CURRENT_LIST_DIR}/../../InferenceHelper/)
add_subdirectory(${INFERENCE_HELPER_DIR}/CommonHelper CommonHelper)
target_include_directories(${LibraryName} PUBLIC ${INFERENCE_HELPER_DIR}/CommonHelper)
Expand Down
35 changes: 35 additions & 0 deletions ThirdParty/cmakes/MNN.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,35 @@
if(DEFINED ANDROID_ABI)
# set(MNN_LIB ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/android/${ANDROID_ABI}/libMNN.so)
add_library(MNN SHARED IMPORTED GLOBAL)
set_target_properties(
MNN
PROPERTIES IMPORTED_LOCATION
${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/android/${ANDROID_ABI}/lib/libMNN.so
)
set(MNN_LIB MNN)
set(MNN_INC ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/android/${ANDROID_ABI}/include)
elseif(MSVC_VERSION)
file(COPY ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/Debug/MNN.dll DESTINATION ${CMAKE_BINARY_DIR}/Debug)
file(COPY ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/RelWithDebInfo/MNN.dll DESTINATION ${CMAKE_BINARY_DIR}/RelWithDebInfo)
file(COPY ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/Release/MNN.dll DESTINATION ${CMAKE_BINARY_DIR}/Release)
file(COPY ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/MinSizeRel/MNN.dll DESTINATION ${CMAKE_BINARY_DIR}/MinSizeRel)
# file(COPY ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/MNNd.dll DESTINATION ${CMAKE_BINARY_DIR})
set(MNN_LIB
$<$<CONFIG:Debug>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/Debug/MNN.lib>
$<$<CONFIG:RelWithDebInfo>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/RelWithDebInfo/MNN.lib>
$<$<CONFIG:Release>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/Release/MNN.lib>
$<$<CONFIG:MinSizeRel>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/lib/MinSizeRel/MNN.lib>
)
set(MNN_INC ${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_windows/VS2017/include)
else()
set(MNN_LIB
$<$<STREQUAL:${BUILD_SYSTEM},x64_linux>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_linux/lib/libMNN.so>
$<$<STREQUAL:${BUILD_SYSTEM},armv7>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/armv7/lib/libMNN.so>
$<$<STREQUAL:${BUILD_SYSTEM},aarch64>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/aarch64/lib/libMNN.so>
)
set(MNN_INC
$<$<STREQUAL:${BUILD_SYSTEM},x64_linux>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/x64_linux/include>
$<$<STREQUAL:${BUILD_SYSTEM},armv7>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/armv7/include>
$<$<STREQUAL:${BUILD_SYSTEM},aarch64>:${CMAKE_CURRENT_LIST_DIR}/../MNN_prebuilt/aarch64/include>
)
endif()
23 changes: 23 additions & 0 deletions ThirdParty/cmakes/ncnn.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,23 @@
if(DEFINED ANDROID_ABI)
set(NCNN_LIB ${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/android/${ANDROID_ABI}/lib/libncnn.a)
set(NCNN_INC ${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/android/${ANDROID_ABI}/include/ncnn)
elseif(MSVC_VERSION)
set(NCNN_LIB
$<$<CONFIG:Debug>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_windows/lib/ncnnd.lib>
$<$<CONFIG:RelWithDebInfo>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_windows/lib/ncnnRelWithDebInfo.lib>
$<$<CONFIG:Release>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_windows/lib/ncnn.lib>
$<$<CONFIG:MinSizeRel>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_windows/lib/ncnnMinSizeRel.lib>
)
set(NCNN_INC ${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_windows/include/ncnn)
else()
set(NCNN_LIB
$<$<STREQUAL:${BUILD_SYSTEM},x64_linux>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_linux/lib/libncnn.a>
$<$<STREQUAL:${BUILD_SYSTEM},armv7>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/armv7/lib/libncnn.a>
$<$<STREQUAL:${BUILD_SYSTEM},aarch64>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/aarch64/lib/libncnn.a>
)
set(NCNN_INC
$<$<STREQUAL:${BUILD_SYSTEM},x64_linux>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/x64_linux/include/ncnn>
$<$<STREQUAL:${BUILD_SYSTEM},armv7>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/armv7/include/ncnn>
$<$<STREQUAL:${BUILD_SYSTEM},aarch64>:${CMAKE_CURRENT_LIST_DIR}/../ncnn_prebuilt/aarch64/include/ncnn>
)
endif()
30 changes: 30 additions & 0 deletions ThirdParty/cmakes/tflite.cmake
Original file line number Diff line number Diff line change
@@ -0,0 +1,30 @@
set(TFLITE_INC
${CMAKE_CURRENT_LIST_DIR}/../tensorflow
${CMAKE_CURRENT_LIST_DIR}/../tensorflow/tensorflow/lite/tools/make/downloads/flatbuffers/include
${CMAKE_CURRENT_LIST_DIR}/../tensorflow/tensorflow/lite/tools/make/downloads/absl
)

if(DEFINED ANDROID_ABI)
# set(TFLITE_LIB ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/android/${ANDROID_ABI}/libtensorflowlite.so)
add_library(TFLITE SHARED IMPORTED GLOBAL)
set_target_properties(
TFLITE
PROPERTIES IMPORTED_LOCATION
${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/android/${ANDROID_ABI}/libtensorflowlite.so
)
set(TFLITE_LIB TFLITE)
elseif(MSVC_VERSION)
set(TFLITE_LIB ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/x64_windows/libtensorflowlite.so.if.lib)
file(COPY ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/x64_windows/libtensorflowlite.so DESTINATION ${CMAKE_BINARY_DIR})
else()
if(${BUILD_SYSTEM} STREQUAL "x64_linux")
set(TFLITE_LIB ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/x64_linux/libtensorflowlite.so)
elseif(${BUILD_SYSTEM} STREQUAL "armv7")
set(TFLITE_LIB ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/armv7/libtensorflowlite.so)
elseif(${BUILD_SYSTEM} STREQUAL "aarch64")
set(TFLITE_LIB ${CMAKE_CURRENT_LIST_DIR}/../tensorflow_prebuilt/aarch64/libtensorflowlite.so)
else()
message(FATAL_ERROR "[tflite] unsupported platform")
endif()
file(COPY ${TFLITE_LIB} DESTINATION ${CMAKE_BINARY_DIR})
endif()
Loading

0 comments on commit 76180ec

Please sign in to comment.