Merge pull request #28 from cassiebreviu/main #7
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
name: Test and Evaulate Contoso-Support Promptflow | |
on: | |
workflow_dispatch: | |
push: | |
branches: [ main ] | |
env: | |
GROUP: ${{secrets.GROUP}} | |
WORKSPACE: ${{secrets.WORKSPACE}} | |
SUBSCRIPTION: ${{secrets.SUBSCRIPTION}} | |
RUN_NAME: contoso_support | |
EVAL_RUN_NAME: contoso_support_eval | |
jobs: | |
login-runpf-evalpf-assertpf-registermodel: | |
runs-on: ubuntu-latest | |
steps: | |
- name: Check out repo | |
uses: actions/checkout@v2 | |
- name: Install az ml extension | |
run: az extension add -n ml -y | |
- name: Azure login | |
uses: azure/login@v1 | |
with: | |
creds: ${{secrets.AZURE_CREDENTIALS}} | |
- name: Set up Python | |
uses: actions/setup-python@v2 | |
with: | |
python-version: '3.11.4' | |
- name: List current directory | |
run: ls | |
- name: Install promptflow | |
run: pip install -r contoso-support/requirements.txt | |
- name: Run promptflow | |
run: | | |
pfazure run create -f contoso-support/run.yml --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} --stream > run_info.txt | |
cat run_info.txt | |
- name: List current directory | |
run: ls | |
- name: Set run name | |
run: | | |
echo "RUN_NAME=$(python deployment/llmops-helper/parse_run_output.py run_info.txt)" >> "$GITHUB_ENV" | |
- name: Show the current run name | |
run: echo "Run name is:" ${{env.RUN_NAME}} | |
- name: Show promptflow results | |
run: pfazure run show-details --name ${{env.RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} | |
- name: Run promptflow evaluations | |
run: | | |
pfazure run create -f contoso-support/run_evaluation_multi.yml --run ${{env.RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} --stream > eval_info.txt | |
cat eval_info.txt | |
- name: Get eval run name | |
run: echo "EVAL_RUN_NAME=$(python deployment/llmops-helper/parse_run_output.py eval_info.txt)" >> "$GITHUB_ENV" | |
- name: Show the current eval run name | |
run: echo "Eval run name is:" ${{env.EVAL_RUN_NAME}} | |
- name: Show promptflow details | |
run: pfazure run show-details --name ${{env.EVAL_RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} | |
- name: Show promptflow metrics | |
run: | | |
pfazure run show-metrics --name ${{env.EVAL_RUN_NAME}} --subscription ${{env.SUBSCRIPTION}} -g ${{env.GROUP}} -w ${{env.WORKSPACE}} > eval_result.json | |
cat eval_result.json | |
- name: List current directory | |
run: ls | |
- name: Get assert eval results | |
id: jobMetricAssert | |
run: | | |
# NOTE The number after the file is the threshold score to pass the assertion. | |
export ASSERT=$(python deployment/llmops-helper/assert.py eval_result.json 3) # NOTE <file>.json is the file name and decimal is the threshold for the assertion | |
echo "::debug::Assert has returned the following value: $ASSERT" | |
# assert.py will return True or False, but bash expects lowercase. | |
if ${ASSERT,,} ; then | |
echo "::debug::Prompt flow run met the quality bar and can be deployed." | |
echo "::set-output name=result::true" | |
else | |
echo "::warning::Prompt flow run didn't meet quality bar." | |
echo "::set-output name=result::false" | |
fi | |
- name: Show the assert result | |
run: echo "Assert result is:" ${{ steps.jobMetricAssert.outputs.result }} | |
- name: Register promptflow model | |
if: ${{ steps.jobMetricAssert.outputs.result == 'true' }} | |
run: az ml model create --file deployment/support-model.yaml -g ${{env.GROUP}} -w ${{env.WORKSPACE}} |