Skip to content

Commit

Permalink
support save image meta, now only png and fooocus scheme support
Browse files Browse the repository at this point in the history
customize the file name of the output image
the output image name change to job id
update version
  • Loading branch information
mrhan1993 committed Apr 15, 2024
1 parent ad2de65 commit 72df59a
Show file tree
Hide file tree
Showing 8 changed files with 162 additions and 8 deletions.
2 changes: 1 addition & 1 deletion fooocus_api_version.py
Original file line number Diff line number Diff line change
@@ -1 +1 @@
version = '0.4.0.3'
version = '0.4.0.4'
10 changes: 10 additions & 0 deletions fooocusapi/models/common/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -87,6 +87,16 @@ class DescribeImageType(str, Enum):
anime = 'Anime'


class ImageMetaScheme(str, Enum):
"""Scheme for save image meta
Attributes:
Fooocus: json format
A111: string
"""
Fooocus = 'fooocus'
A111 = 'a111'


def style_selection_parser(style_selections: str) -> List[str]:
"""
Parse style selections, Convert to list
Expand Down
116 changes: 116 additions & 0 deletions fooocusapi/models/common/image_meta.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,116 @@
"""
Image meta schema
"""
from typing import List

from fooocus_version import version
from pydantic import BaseModel


class ImageMeta(BaseModel):
"""
Image meta data model
"""

metadata_scheme: str = "fooocus"

base_model: str
base_model_hash: str

prompt: str
full_prompt: List[str]
prompt_expansion: str

negative_prompt: str
full_negative_prompt: List[str]

performance: str

style: str

refiner_model: str = "None"
refiner_switch: float = 0.5

loras: List[list]

resolution: str

sampler: str = "dpmpp_2m_sde_gpu"
scheduler: str = "karras"
seed: str
adm_guidance: str
guidance_scale: int
sharpness: int
steps: int

version: str = version

def __repr__(self):
return ""


def loras_parser(loras: list) -> list:
"""
Parse lora list
"""
return [
[
lora[0].rsplit('.', maxsplit=1)[:1][0],
lora[1],
"hash_not_calculated",
] for lora in loras if lora[0] != 'None' and lora[0] is not None]


def image_parse(
async_tak: object,
task: dict
) -> dict | str:
"""
Parse image meta data
Generate meta data for image from task and async task object
Args:
async_tak: async task obj
task: task obj
Returns:
dict: image meta data
"""
req_param = async_tak.req_param
meta = ImageMeta(
metadata_scheme=req_param.meta_scheme,
base_model=req_param.base_model_name.rsplit('.', maxsplit=1)[:1][0],
base_model_hash='',
prompt=req_param.prompt,
full_prompt=task['positive'],
prompt_expansion=task['expansion'],
negative_prompt=req_param.negative_prompt,
full_negative_prompt=task['negative'],
performance=req_param.performance_selection,
style=str(req_param.style_selections),
refiner_model=req_param.refiner_model_name,
refiner_switch=req_param.refiner_switch,
loras=loras_parser(req_param.loras),
resolution=str(tuple([int(n) for n in req_param.aspect_ratios_selection.split('*')])),
sampler=req_param.advanced_params.sampler_name,
scheduler=req_param.advanced_params.scheduler_name,
seed=str(task['task_seed']),
adm_guidance=str((
req_param.advanced_params.adm_scaler_positive,
req_param.advanced_params.adm_scaler_negative,
req_param.advanced_params.adm_scaler_end)),
guidance_scale=req_param.guidance_scale,
sharpness=req_param.sharpness,
steps=-1,
version=version
)
if meta.metadata_scheme not in ["fooocus", "a111"]:
meta.metadata_scheme = "fooocus"
if meta.metadata_scheme == "fooocus":
meta_dict = meta.model_dump()
for i, lora in enumerate(meta.loras):
attr_name = f"lora_combined_{i+1}"
lr = [str(x) for x in lora]
meta_dict[attr_name] = f"{lr[0]} : {lr[1]}"
else:
meta_dict = meta.model_dump()
return meta_dict
4 changes: 4 additions & 0 deletions fooocusapi/models/common/requests.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,6 +26,7 @@

default_aspect_ratio = default_aspect_ratio.split(" ")[0].replace("×", "*")


class QueryJobRequest(BaseModel):
"""Query job request"""
job_id: str = Field(description="Job ID to query")
Expand Down Expand Up @@ -90,7 +91,10 @@ class CommonRequest(BaseModel):
refiner_switch: float = Field(default=default_refiner_switch, description="Refiner Switch At", ge=0.1, le=1.0)
loras: List[Lora] = Field(default=default_loras_model)
advanced_params: AdvancedParams = AdvancedParams()
save_meta: bool = Field(default=True, description="Save meta data")
meta_scheme: str = Field(defaut='fooocus', description="Meta data scheme, one of [fooocus, a111]")
save_extension: str = Field(default='png', description="Save extension, one of [png, jpg, webp]")
save_name: str = Field(default='', description="Image name for output image, default is a uuid")
read_wildcards_in_order: bool = Field(default=False, description="Read wildcards in order")
require_base64: bool = Field(default=False, description="Return base64 data of generated image")
async_process: bool = Field(default=False, description="Set to true will run async and return job info for retrieve generation result later")
Expand Down
6 changes: 6 additions & 0 deletions fooocusapi/parameters.py
Original file line number Diff line number Diff line change
Expand Up @@ -165,6 +165,9 @@ def __init__(
image_prompts: List[Tuple[np.ndarray, float, float, str]],
advanced_params: List[any] | None,
save_extension: str,
save_meta: bool,
meta_scheme: str,
save_name: str,
require_base64: bool,
):
self.prompt = prompt
Expand Down Expand Up @@ -192,6 +195,9 @@ def __init__(
self.inpaint_additional_prompt = inpaint_additional_prompt
self.image_prompts = image_prompts
self.save_extension = save_extension
self.save_meta = save_meta
self.meta_scheme = meta_scheme
self.save_name = save_name
self.require_base64 = require_base64
self.advanced_params = advanced_params

Expand Down
3 changes: 3 additions & 0 deletions fooocusapi/utils/api_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -204,6 +204,9 @@ def req_to_params(req: Text2ImgRequest) -> ImageGenerationParams:
inpaint_additional_prompt=inpaint_additional_prompt,
image_prompts=image_prompts,
advanced_params=advanced_params,
save_meta=req.save_meta,
meta_scheme=req.meta_scheme,
save_name=req.save_name,
save_extension=req.save_extension,
require_base64=req.require_base64,
)
Expand Down
5 changes: 2 additions & 3 deletions fooocusapi/utils/file_utils.py
Original file line number Diff line number Diff line change
Expand Up @@ -47,8 +47,6 @@ def save_output_file(
current_time = datetime.datetime.now()
date_string = current_time.strftime("%Y-%m-%d")

image_name = str(uuid.uuid4()) if image_name == '' else image_name

filename = os.path.join(date_string, image_name + '.' + extension)
file_path = os.path.join(output_dir, filename)

Expand All @@ -61,7 +59,8 @@ def save_output_file(
meta = None
if extension == 'png':
meta = PngInfo()
meta.add_text("params", json.dumps(image_meta))
meta.add_text("parameters", json.dumps(image_meta))
meta.add_text("fooocus_scheme", image_meta['metadata_scheme'])

os.makedirs(os.path.dirname(file_path), exist_ok=True)
Image.fromarray(img).save(file_path, format=extension,
Expand Down
24 changes: 20 additions & 4 deletions fooocusapi/worker.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
import numpy as np
import torch

from fooocusapi.models.common.image_meta import image_parse
from modules.patch import PatchSettings, patch_settings, patch_all
from modules.sdxl_styles import apply_arrays
from modules.flags import Performance
Expand Down Expand Up @@ -125,9 +126,23 @@ def yield_result(_, imgs, tasks, extension='png'):

results = []
for i, im in enumerate(imgs):
if async_task.req_param.save_name == '':
image_name = f"{async_task.job_id}-{str(i)}"
else:
image_name = f"{async_task.req_param.save_name}-{str(i)}"
seed = -1 if len(tasks) == 0 else tasks[i]['task_seed']
img_filename = save_output_file(img=im, extension=extension)
results.append(ImageGenerationResult(im=img_filename, seed=str(seed), finish_reason=GenerationFinishReason.success))
img_meta = image_parse(
async_tak=async_task,
task=tasks[i])
img_filename = save_output_file(
img=im,
image_name=image_name,
image_meta=img_meta,
extension=extension)
results.append(ImageGenerationResult(
im=img_filename,
seed=str(seed),
finish_reason=GenerationFinishReason.success))
async_task.set_result(results, False)
worker_queue.finish_task(async_task.job_id)
logger.std_info(f"[Task Queue] Finish task, job_id={async_task.job_id}")
Expand Down Expand Up @@ -160,7 +175,10 @@ def yield_result(_, imgs, tasks, extension='png'):
performance_selection = Performance(params.performance_selection)
aspect_ratios_selection = params.aspect_ratios_selection
image_number = params.image_number
save_metadata_to_images = params.save_meta
metadata_scheme = params.meta_scheme
save_extension = params.save_extension
save_name = params.save_name
image_seed = refresh_seed(params.image_seed)
read_wildcards_in_order = False
sharpness = params.sharpness
Expand Down Expand Up @@ -221,8 +239,6 @@ def yield_result(_, imgs, tasks, extension='png'):
invert_mask_checkbox = adp.invert_mask_checkbox
inpaint_erode_or_dilate = adp.inpaint_erode_or_dilate

save_metadata_to_images = False
metadata_scheme = 'fooocus'

cn_tasks = {x: [] for x in flags.ip_list}
for img_prompt in params.image_prompts:
Expand Down

0 comments on commit 72df59a

Please sign in to comment.