diff --git a/README.md b/README.md index da3c03d..d7cd96c 100644 --- a/README.md +++ b/README.md @@ -11,11 +11,14 @@ Our AI plugins offer the following key features: ## List of Plugins -### Stable Diffusion plugin for GIMP +### drawing Stable Diffusion plugin for GIMP - [Overview and usage](plugins/gimp/stable-diffusion/README.md) - [Quick Installation](plugins/gimp/stable-diffusion/docs/install.md) - [Build Instructions](plugins/gimp/stable-diffusion/docs/build.md) -## License Information +### drawing AUTOMATIC1111 stable-diffusion-webui Extension +- [Overview and Installation](plugins/stable-diffusion-webui/qairt_accelerate/README.md) + +## License Information This project is licensed under the [BSD-3-Clause License](https://spdx.org/licenses/BSD-3-Clause.html). For the full license text, please refer to the [LICENSE](LICENSE) file in this repository. diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/README.md b/plugins/stable-diffusion-webui/qairt_accelerate/README.md new file mode 100644 index 0000000..0a34380 --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/README.md @@ -0,0 +1,42 @@ + +# drawing AUTOMATIC1111 stable-diffusion-webui Extension + +Stable Diffusion WebUI can now be run on Qualcomm X-Elite NPU with [Qualcomm AI Runtime (QAIRT)](https://www.qualcomm.com/developer/software/qualcomm-ai-engine-direct-sdk). QAIRT support is provided through an custom extension script. The custom script uses QAIRT python APIs to run context binaries (.bin) generated by QAIRT SDK. We provide optimal performance by using QAIRT to run AI models on Qualcomm X-Elite NPU. These models are hosted on [Qualcomm AI hub](https://aihub.qualcomm.com/compute/models/stable_diffusion_v1_5_quantized?searchTerm=stable). + +![sd1_5](docs/resources/qairt.gif) +This clip is at 2x speed + + +> **_NOTE:_** Majority of the AUTOMATIC1111's features are not supported by this extension as of now and the extension is in active development. New feature support will be added incrementally. We actively welcome feedback and contributions from the community. + +## Supported features + +* Original txt2img mode with SD 1.5 and 2.1 +* Sampling methods: DPM++ 2M +* Upscaling methods: ESRGAN-x4 + +## Instructions to run WebUI with QAIRT (Windows): + +### Step 1: Download AUTOMATIC1111 stable-diffusion-webui +Run below commands in Windows PowerShell terminal. + +``` +# Make sure Python version is >=3.10.6 and <3.10.14 +git clone https://github.com/AUTOMATIC1111/stable-diffusion-webui.git +cd stable-diffusion-webui +``` + +### Step 2: Download Extension +Download `stable-diffusion-webui-qairt-extension.zip` from the latest release. +unzip and place the `qairt_accelerate` extension under `stable-diffusion-webui\extensions` + +### Step 3: Launch the WebUI + +``` +$env:TORCH_INDEX_URL="https://download.pytorch.org/whl/cpu" +$env:WEBUI_LAUNCH_LIVE_OUTPUT=1 + +.\webui.bat --skip-torch-cuda-test --no-half --precision full --ui-config-file .\extensions\qairt_accelerate\ui-config.json +``` + +The steps above will create a virtual environment and install the required packages into this environment. \ No newline at end of file diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/common_utils.py b/plugins/stable-diffusion-webui/qairt_accelerate/common_utils.py new file mode 100644 index 0000000..78aa615 --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/common_utils.py @@ -0,0 +1,59 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + +import requests +import os +import subprocess + +def download_url(url, save_path, chunk_size=128): + r = requests.get(url, stream=True) + with open(save_path, "wb") as fd: + for chunk in r.iter_content(chunk_size=chunk_size): + fd.write(chunk) + +def run_command(command, live: bool = True): + try: + env = os.environ.copy() + env['PYTHONPATH'] = f"{os.path.abspath('.')}{os.pathsep}{env.get('PYTHONPATH', '')}" + + stdout = run(command, errdesc=f"Error running command", live=live).strip() + if stdout: + print(stdout) + except Exception as e: + print(str(e)) + exit() + +def run(command, desc=None, errdesc=None, custom_env=None, live: bool = True) -> str: + if desc is not None: + print(desc) + + run_kwargs = { + "args": command, + "shell": True, + "env": os.environ if custom_env is None else custom_env, + "errors": 'ignore', + } + + if not live: + run_kwargs["stdout"] = run_kwargs["stderr"] = subprocess.PIPE + + result = subprocess.run(**run_kwargs) + + if result.returncode != 0: + error_bits = [ + f"{errdesc or 'Error running command'}.", + f"Command: {command}", + f"Error code: {result.returncode}", + ] + if result.stdout: + error_bits.append(f"stdout: {result.stdout}") + if result.stderr: + error_bits.append(f"stderr: {result.stderr}") + raise RuntimeError("\n".join(error_bits)) + + return (result.stdout or "") diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/main_icon.png b/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/main_icon.png new file mode 100644 index 0000000..5bdfa0a Binary files /dev/null and b/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/main_icon.png differ diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/qairt.gif b/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/qairt.gif new file mode 100644 index 0000000..75ef63e Binary files /dev/null and b/plugins/stable-diffusion-webui/qairt_accelerate/docs/resources/qairt.gif differ diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/install.py b/plugins/stable-diffusion-webui/qairt_accelerate/install.py new file mode 100644 index 0000000..253aac6 --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/install.py @@ -0,0 +1,137 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + +import launch +import os +import qairt_constants as consts +import platform + + +# check python version to be 3.10.6+. As qai_appbuilder is built on 3.10.6 +if (not platform.python_version().startswith("3.10.")) or (int(platform.python_version().split(".")[2])<6): + raise Exception("Python version needs to be >=3.10.6 and <3.10.14") + +if not launch.is_installed("qai_appbuilder"): + launch.run_pip( + f"install {consts.QAI_APPBUILDER_WHEEL}", + "python QNN", + ) +if not launch.is_installed("diffusers"): + launch.run_pip("install diffusers", "diffusers") +if not launch.is_installed("onnx"): + launch.run_pip("install onnx", "onnx") + +from qairt_sd_pipeline import model_path_1_5, model_path_2_1 +from huggingface_hub import hf_hub_download +import common_utils as utils +import zipfile +import shutil + +def download_qairt_sdk(): + # Setup QAIRT SDK + if not os.path.isdir(consts.QNN_SDK_ROOT): + os.makedirs(consts.QAIRT_DIR, exist_ok=True) + print(f"Downloading QAIRT SDK...") + utils.download_url(consts.QNN_SDK_DOWNLOAD_URL, consts.SDK_SAVE_PATH) + print(f"QAIRT SDK downloaded.") + + with zipfile.ZipFile(consts.SDK_SAVE_PATH, "r") as zip_ref: + zip_ref.extractall(consts.EXTENSION_WS) + shutil.move( + os.path.join(consts.EXTENSION_WS, "qairt", consts.QAIRT_VERSION), + os.path.join(consts.QNN_SDK_ROOT, ".."), + ) + shutil.rmtree(os.path.join(consts.EXTENSION_WS, "qairt")) + os.remove(consts.SDK_SAVE_PATH) + +def setup_qairt_env(): + # Preparing all the binaries and libraries for execution. + SDK_lib_dir = consts.QNN_SDK_ROOT + "\\lib\\arm64x-windows-msvc" + SDK_skel = consts.QNN_SDK_ROOT + "\\lib\\hexagon-v{}\\unsigned\\libQnnHtpV{}Skel.so".format( + consts.DSP_ARCH, consts.DSP_ARCH + ) + + # Copy necessary libraries to a common location + libs = [ + "QnnHtp.dll", + "QnnSystem.dll", + "QnnHtpPrepare.dll", + "QnnHtpV{}Stub.dll".format(consts.DSP_ARCH), + ] + for lib in libs: + if not os.path.isfile(os.path.join(consts.QNN_LIBS_DIR, lib)): + shutil.copy(os.path.join(SDK_lib_dir, lib), consts.QNN_LIBS_DIR) + + if not os.path.isfile(os.path.join(consts.QNN_LIBS_DIR, SDK_skel)): + shutil.copy(SDK_skel, consts.QNN_LIBS_DIR) + + +def create_venv_for_qai_hub(): + if not os.path.isdir(consts.QAI_HUB_VENV_PATH): + utils.run_command(f"python -m venv {consts.QAI_HUB_VENV_PATH}") + +def install_qai_hub(): + utils.run_command(f"{consts.QAI_HUB_VENV_PYTHON_PATH} -m pip install qai-hub") + utils.run_command(f"{consts.QAI_HUB_VENV_PYTHON_PATH} -m pip install qai_hub_models") + utils.run_command(f"{consts.QAI_HUB_VENV_PATH}\\Scripts\\qai-hub.exe configure --api_token {consts.HUB_ID} > NUL", False) + + +print(f"Downloading QAIRT model bin files...") +SD_MODEL_1_5_REVISION="120de88f304daa9d5fa726ddccdfe086b6349801" +SD_MODEL_2_1_REVISION="52f821ad5420d1b0408a8b856733f9e372e7776a" + +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v1.5", + filename="UNet_Quantized.bin", + local_dir=model_path_1_5, + revision=SD_MODEL_1_5_REVISION, +) +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v1.5", + filename="TextEncoder_Quantized.bin", + local_dir=model_path_1_5, + revision=SD_MODEL_1_5_REVISION, +) +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v1.5", + filename="VAEDecoder_Quantized.bin", + local_dir=model_path_1_5, + revision=SD_MODEL_1_5_REVISION, +) + +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v2.1", + filename="UNet_Quantized.bin", + local_dir=model_path_2_1, + revision=SD_MODEL_2_1_REVISION, +) +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v2.1", + filename="TextEncoder_Quantized.bin", + local_dir=model_path_2_1, + revision=SD_MODEL_2_1_REVISION, +) +hf_hub_download( + repo_id="qualcomm/Stable-Diffusion-v2.1", + filename="VAEDecoder_Quantized.bin", + local_dir=model_path_2_1, + revision=SD_MODEL_2_1_REVISION, +) +print(f"QAIRT model bin files downloaded.") + +os.makedirs(consts.CACHE_DIR, exist_ok=True) +os.makedirs(consts.QNN_LIBS_DIR, exist_ok=True) + +download_qairt_sdk() +setup_qairt_env() +create_venv_for_qai_hub() +install_qai_hub() + +print("Downloading required models using qai-hub...") +utils.run_command(f"{consts.QAI_HUB_VENV_PYTHON_PATH} {consts.EXTENSION_WS}//qairt_hub_models.py") +print("Downloaded required models.") diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/qairt_constants.py b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_constants.py new file mode 100644 index 0000000..8a6564e --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_constants.py @@ -0,0 +1,32 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + + +import os +from modules.paths_internal import script_path, extensions_dir + +QNN_SDK_DOWNLOAD_URL="https://softwarecenter.qualcomm.com/api/download/software/qualcomm_neural_processing_sdk/v2.24.0.240710.zip" +QAI_APPBUILDER_WHEEL="https://github.com/quic/ai-engine-direct-helper/releases/download/v2.24.0/qai_appbuilder-2.24.0-cp310-cp310-win_amd64.whl" + +QAIRT_VERSION = "2.24.0.240626" +DSP_ARCH = "73" # For X-Elite device. +EXTENSION_WS = os.path.join(extensions_dir, "qairt_accelerate") +VENV_PYTHON_PATH = f"{EXTENSION_WS}\\..\\..\\venv\\Scripts\\python.exe" +QAI_HUB_VENV_PATH = f"{EXTENSION_WS}\\qai_hub_venv" +QAI_HUB_VENV_PYTHON_PATH = f"{EXTENSION_WS}\\qai_hub_venv\\Scripts\\python.exe" +QNN_LIBS_DIR = os.path.join(EXTENSION_WS, "qnn_assets", "qnn_libs") +CACHE_DIR = os.path.join(EXTENSION_WS, "qnn_assets", "cache") +SDK_SAVE_PATH= EXTENSION_WS + f"\\{QAIRT_VERSION}.zip" +QAIRT_DIR=f"C:\\Qualcomm\\AIStack\\QAIRT" +QNN_SDK_ROOT=f"C:\\Qualcomm\\AIStack\\QAIRT\\{QAIRT_VERSION}" + + +HUB_ID="aac24f12d047e7f558d8effe4b2fdad0f5c2c341" +CONVERTION_DIR = os.path.join(EXTENSION_WS, "model_conversion") +ESRGAN_X4_MODEL_ID="meq29lx7n" +ESRGAN_X4_MODEL_PATH=os.path.join(CONVERTION_DIR,"esrgan.bin") diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/qairt_hub_models.py b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_hub_models.py new file mode 100644 index 0000000..4675b25 --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_hub_models.py @@ -0,0 +1,30 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + +import common_utils as utils +import os +import qairt_constants as consts +import shutil +import qai_hub + +def convert_and_download_models(): + os.makedirs(consts.CONVERTION_DIR, exist_ok=True) + if not os.path.isfile(consts.ESRGAN_X4_MODEL_PATH): + try: + model = qai_hub.get_model(consts.ESRGAN_X4_MODEL_ID) + model.download(filename=consts.ESRGAN_X4_MODEL_PATH) + except Exception: + utils.run_command(f"{consts.QAI_HUB_VENV_PYTHON_PATH} -m qai_hub_models.models.esrgan.export " + + f"--device \"Snapdragon X Elite CRD\" --height 512 --width 512 --target-runtime qnn " + + f"--skip-profiling --skip-inferencing --skip-summary --output-dir {consts.CONVERTION_DIR}") + shutil.move( + os.path.join(consts.CONVERTION_DIR,"esrgan.so"), + consts.ESRGAN_X4_MODEL_PATH, + ) + +convert_and_download_models() diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/qairt_sd_pipeline.py b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_sd_pipeline.py new file mode 100644 index 0000000..189566b --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/qairt_sd_pipeline.py @@ -0,0 +1,458 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + +import time +from PIL import Image +import os +import shutil +import cv2 +import numpy as np +import torch +from transformers import CLIPTokenizer +from diffusers import DPMSolverMultistepScheduler +from modules import paths, shared, modelloader +from qai_appbuilder import ( + QNNContext, + QNNContextProc, + QNNShareMemory, + Runtime, + LogLevel, + ProfilingLevel, + PerfProfile, + QNNConfig, + timer, +) +from diffusers import UNet2DConditionModel +from diffusers.models.embeddings import get_timestep_embedding +from diffusers import ( + DDIMScheduler, + DPMSolverMultistepScheduler, + EulerAncestralDiscreteScheduler, + EulerDiscreteScheduler, + HeunDiscreteScheduler, + LMSDiscreteScheduler, + PNDMScheduler, +) +import qairt_constants as consts + +model_path_1_5 = os.path.abspath( + os.path.join(paths.models_path, "Stable-diffusion", "qcom-Stable-Diffusion-v1.5") +) +model_path_2_1 = os.path.abspath( + os.path.join(paths.models_path, "Stable-diffusion", "qcom-Stable-Diffusion-v2.1") +) + + +class TextEncoder(QNNContext): + # @timer + def Inference(self, input_data, sd_version): + input_datas = [input_data] + output_data = super().Inference(input_datas, perf_profile="burst")[0] + + # Output of Text encoder should be of shape (1, 77, 768) + if sd_version == "1.5": + output_data = output_data.reshape((1, 77, 768)) + elif sd_version == "2.1": + output_data = output_data.reshape((1, 77, 1024)) + return output_data + + +class Unet(QNNContext): + # @timer + def Inference(self, input_data_1, input_data_2, input_data_3): + # We need to reshape the array to 1 dimensionality before send it to the network. 'input_data_2' already is 1 dimensionality, so doesn't need to reshape. + input_data_1 = input_data_1.reshape(input_data_1.size) + input_data_3 = input_data_3.reshape(input_data_3.size) + + input_datas = [input_data_1, input_data_2, input_data_3] + output_data = super().Inference(input_datas, perf_profile="burst")[0] + + output_data = output_data.reshape(1, 64, 64, 4) + return output_data + + +class VaeDecoder(QNNContext): + # @timer + def Inference(self, input_data): + input_data = input_data.reshape(input_data.size) + input_datas = [input_data] + + output_data = super().Inference(input_datas, perf_profile="burst")[0] + + return output_data + + +class UpscaleModel(QNNContextProc): + # @timer + def Inference(self, mem, input_data): + input_datas = [input_data] + output_data = super().Inference(mem, input_datas, perf_profile="burst")[0] + return output_data + + +class StableDiffusionInput: + user_prompt = "" + uncond_prompt = "" + user_seed = np.int64(0) + user_step = 20 + user_text_guidance = 7.5 + is_high_resolution = False + upscaler_model_name = None + sampler_name = None + model_name = None + + def __init__( + self, + prompt, + un_prompt, + seed, + step, + text_guidance, + high_resolution, + sampler_name, + model_name, + upscaler_model_name, + ): + self.user_prompt = prompt + self.uncond_prompt = un_prompt + self.user_seed = seed + self.user_step = step + self.user_text_guidance = text_guidance + self.is_high_resolution = high_resolution + self.sampler_name = sampler_name + self.model_name = model_name + self.upscaler_model_name = upscaler_model_name + self.upscaler_model_used, self.upscaler_model_path = get_upscaler_model( + upscaler_model_name + ) + + assert ( + isinstance(self.user_seed, int) == True + ), "user_seed should be of type int" + assert ( + isinstance(self.user_step, int) == True + ), "user_step should be of type int" + assert ( + isinstance(self.user_text_guidance, float) == True + ), "user_text_guidance should be of type float" + + +def get_upscaler_model(model_name): + # This is the default model, more upscale models will be added. + return ("ESRGAN_4x", consts.ESRGAN_X4_MODEL_PATH) + + +class UpscalerPipeline: + model = None + model_mem = None + model_name = None + + def __init__(self, model_name, model_path): + self.model_name = model_name + name = "upscale" + # process names + model_proc = "~upscale" + # share memory names. + model_mem_name = name + "~memory" + + # Instance for RealESRGan which inherited from the class QNNContextProc, the model will be loaded into a separate process. + self.model = UpscaleModel(name, model_proc, model_path) + self.model_mem = QNNShareMemory(model_mem_name, 1024 * 1024 * 50) # 50M + + # Release all the models. + def __del__(self): + del self.model + del self.model_mem + + def execute(self, input): + return self.model.Inference(self.model_mem, [input]) + + +class QnnStableDiffusionPipeline: + TOKENIZER_MAX_LENGTH = 77 # Define Tokenizer output max length (must be 77) + + default_scheduler = DPMSolverMultistepScheduler( + num_train_timesteps=1000, + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + ) + scheduler = None + tokenizer = None + text_encoder = None + unet = None + vae_decoder = None + + unet_time_embeddings_1_5 = None + unet_time_embeddings_2_1 = None + + sd_version = None + + def __init__(self, model_name): + self.set_model_version(model_name) + + self.unet_time_embeddings_1_5 = UNet2DConditionModel.from_pretrained( + "runwayml/stable-diffusion-v1-5", + subfolder="unet", + cache_dir=consts.CACHE_DIR, + ).time_embedding + self.unet_time_embeddings_2_1 = UNet2DConditionModel.from_pretrained( + "stabilityai/stable-diffusion-2-1-base", + subfolder="unet", + cache_dir=consts.CACHE_DIR, + ).time_embedding + + self.load_model() + + torch.from_numpy( + np.array([1]) + ) # Let LazyImport to import the torch & numpy lib here. + + def is_sd_1_5(self): + return self.sd_version == "1.5" + + def run_tokenizer(self, prompt): + text_input = self.tokenizer( + prompt, + padding="max_length", + max_length=self.TOKENIZER_MAX_LENGTH, + truncation=True, + ) + text_input = np.array(text_input.input_ids, dtype=np.float32) + return text_input + + def run_scheduler( + self, + sd_input, + noise_pred_uncond, + noise_pred_text, + latent_in, + timestep, + ): + # Convert all inputs from NHWC to NCHW + noise_pred_uncond = np.transpose(noise_pred_uncond, (0, 3, 1, 2)).copy() + noise_pred_text = np.transpose(noise_pred_text, (0, 3, 1, 2)).copy() + latent_in = np.transpose(latent_in, (0, 3, 1, 2)).copy() + + # Convert all inputs to torch tensors + noise_pred_uncond = torch.from_numpy(noise_pred_uncond) + noise_pred_text = torch.from_numpy(noise_pred_text) + latent_in = torch.from_numpy(latent_in) + + # Merge noise_pred_uncond and noise_pred_text based on user_text_guidance + noise_pred = noise_pred_uncond + sd_input.user_text_guidance * ( + noise_pred_text - noise_pred_uncond + ) + + # Run Scheduler step + latent_out = self.scheduler.step( + noise_pred, timestep, latent_in + ).prev_sample.numpy() + + # Convert latent_out from NCHW to NHWC + latent_out = np.transpose(latent_out, (0, 2, 3, 1)).copy() + + return latent_out + + # Function to get timesteps + def get_timestep(self, step): + return np.int32(self.scheduler.timesteps.numpy()[step]) + + def get_time_embedding(self, timestep, unet_time_embeddings): + timestep = torch.tensor([timestep]) + t_emb = get_timestep_embedding(timestep, 320, True, 0) + emb = unet_time_embeddings(t_emb).detach().numpy() + return emb + + def set_scheduler(self, config, sampler_name): + if sampler_name == "Euler a": + self.scheduler = EulerAncestralDiscreteScheduler.from_config(config) + elif sampler_name == "Euler": + self.scheduler = EulerDiscreteScheduler.from_config(config) + elif sampler_name == "LMS": + self.scheduler = LMSDiscreteScheduler.from_config(config) + elif sampler_name == "Heun": + self.scheduler = HeunDiscreteScheduler.from_config(config) + elif sampler_name == "DPM++ 2M": + self.scheduler = DPMSolverMultistepScheduler.from_config( + config, + algorithm_type="dpmsolver++", + use_karras_sigmas=False, + ) + elif sampler_name == "LMS Karras": + self.scheduler = LMSDiscreteScheduler.from_config( + config, use_karras_sigmas=True + ) + elif sampler_name == "DPM++ 2M Karras": + self.scheduler = DPMSolverMultistepScheduler.from_config( + config, + algorithm_type="dpmsolver++", + use_karras_sigmas=True, + ) + elif sampler_name == "DDIM": + self.scheduler = DDIMScheduler.from_config(config) + elif sampler_name == "PLMS": + self.scheduler = PNDMScheduler.from_config(config) + else: + self.scheduler = DPMSolverMultistepScheduler( + num_train_timesteps=1000, + beta_start=0.00085, + beta_end=0.012, + beta_schedule="scaled_linear", + ) + + def set_model_version(self, model_name): + if model_name == "Stable-Diffusion-1.5": + self.sd_version = "1.5" + elif model_name == "Stable-Diffusion-2.1": + self.sd_version = "2.1" + + def load_model(self): + QNNConfig.Config( + consts.QNN_LIBS_DIR, Runtime.HTP, LogLevel.ERROR, ProfilingLevel.BASIC + ) + # model names + model_text_encoder = "text_encoder" + model_unet = "model_unet" + model_vae_decoder = "vae_decoder" + + model_path = None + if self.is_sd_1_5(): + model_path = model_path_1_5 + # Initializing the Tokenizer + self.tokenizer = CLIPTokenizer.from_pretrained( + "openai/clip-vit-base-patch32", cache_dir=consts.CACHE_DIR + ) + else: + model_path = model_path_2_1 + self.tokenizer = CLIPTokenizer.from_pretrained( + "stabilityai/stable-diffusion-2-1-base", + subfolder="tokenizer", + revision="main", + cache_dir=consts.CACHE_DIR, + ) + text_encoder_model = "{}\\TextEncoder_Quantized.bin".format(model_path) + unet_model = "{}\\UNet_Quantized.bin".format(model_path) + vae_decoder_model = "{}\\VAEDecoder_Quantized.bin".format(model_path) + print(f"Loading models from {model_path}") + # Instance for Unet + self.unet = Unet(model_unet, unet_model) + + # Instance for TextEncoder + self.text_encoder = TextEncoder(model_text_encoder, text_encoder_model) + + # Instance for VaeDecoder + self.vae_decoder = VaeDecoder(model_vae_decoder, vae_decoder_model) + + def reload_model(self, model_name): + self.set_model_version(model_name) + del self.tokenizer + del self.text_encoder + del self.unet + del self.vae_decoder + self.load_model() + + # Execute the Stable Diffusion pipeline + def model_execute( + self, + sd_input: StableDiffusionInput, + callback, + upscaler_pipeline: UpscalerPipeline, + ) -> Image: + image = None + PerfProfile.SetPerfProfileGlobal(PerfProfile.BURST) + self.set_scheduler(self.default_scheduler.config, sd_input.sampler_name) + + self.scheduler.set_timesteps( + sd_input.user_step + ) # Setting up user provided time steps for Scheduler + + # Run Tokenizer + cond_tokens = self.run_tokenizer(sd_input.user_prompt) + uncond_tokens = self.run_tokenizer(sd_input.uncond_prompt) + + # Run Text Encoder on Tokens + uncond_text_embedding = self.text_encoder.Inference( + uncond_tokens, self.sd_version + ) + user_text_embedding = self.text_encoder.Inference(cond_tokens, self.sd_version) + + # Initialize the latent input with random initial latent + random_init_latent = torch.randn( + (1, 4, 64, 64), generator=torch.manual_seed(sd_input.user_seed) + ).numpy() + latent_in = random_init_latent.transpose(0, 2, 3, 1) + + # Run the loop for user_step times + for step in range(sd_input.user_step): + # print(f"Step {step} Running...") + + time_step = self.get_timestep(step) + unet_time_embeddings = self.unet_time_embeddings_1_5 + if not self.is_sd_1_5(): + unet_time_embeddings = self.unet_time_embeddings_2_1 + time_embedding = self.get_time_embedding(time_step, unet_time_embeddings) + + unconditional_noise_pred = self.unet.Inference( + latent_in, time_embedding, uncond_text_embedding + ) + conditional_noise_pred = self.unet.Inference( + latent_in, time_embedding, user_text_embedding + ) + + latent_in = self.run_scheduler( + sd_input, + unconditional_noise_pred, + conditional_noise_pred, + latent_in, + time_step, + ) + callback(step) + + # Run VAE + import datetime + + now = datetime.datetime.now() + output_image = self.vae_decoder.Inference(latent_in) + formatted_time = now.strftime("%Y_%m_%d_%H_%M_%S") + + if len(output_image) == 0: + callback(None) + else: + image_size = 512 + + # Run RealESRGan + if sd_input.is_high_resolution: + print(f"Upscaler used: {sd_input.upscaler_model_path}") + output_image = upscaler_pipeline.execute(output_image) + image_size = 2048 + + output_image = np.clip(output_image * 255.0, 0.0, 255.0).astype(np.uint8) + output_image = output_image.reshape(image_size, image_size, -1) + image = Image.fromarray(output_image, mode="RGB") # .save(image_path) + + callback(image) + + PerfProfile.RelPerfProfileGlobal() + return image + + # Release all the models. + def __del__(self): + del self.text_encoder + del self.unet + del self.vae_decoder + if self.scheduler: + del self.scheduler + del self.tokenizer + + def is_model_loaded(self): + return self.unet != None + + +pipeline = None +upscaler_pipeline: UpscalerPipeline = None diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/scripts/qairt_accelerate.py b/plugins/stable-diffusion-webui/qairt_accelerate/scripts/qairt_accelerate.py new file mode 100644 index 0000000..fab0aab --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/scripts/qairt_accelerate.py @@ -0,0 +1,92 @@ +# ============================================================================= +# +# Copyright (c) 2024, Qualcomm Innovation Center, Inc. All rights reserved. +# +# SPDX-License-Identifier: BSD-3-Clause +# +# ============================================================================= + +import modules.scripts as scripts +import gradio as gr +from modules.processing import Processed, get_fixed_seed +from modules.processing import StableDiffusionProcessing +import time +from PIL import Image +import qairt_sd_pipeline as sd +import qairt_constants as consts + +DEFAULT_MODEL = "Stable-Diffusion-1.5" +sd.pipeline = sd.QnnStableDiffusionPipeline(DEFAULT_MODEL) +sd.upscaler_pipeline = sd.UpscalerPipeline("ESRGAN_4x", consts.ESRGAN_X4_MODEL_PATH) + +def modelExecuteCallback(result, sd_input): + if Image.isImageType(result): + print("Image Generation successful") + elif None == result: + print("modelExecuteCallback result: No image generated") + +class Script(scripts.Script): + + def title(self): + return "Accelerate with Qualcomm AI Runtime" + + def show(self, is_img2img): + return True + + def reload_pipeline(self, qnn_model_name): + if sd.pipeline: + del sd.pipeline + sd.pipeline = sd.QnnStableDiffusionPipeline(qnn_model_name) + return qnn_model_name + + def ui(self, is_img2img): + with gr.Blocks() as demo: + qnn_model_name = gr.Dropdown(label="Model to use", choices=["Stable-Diffusion-1.5", "Stable-Diffusion-2.1"], value=DEFAULT_MODEL, visible=True) + qnn_model_name.change(fn=self.reload_pipeline,inputs=qnn_model_name, outputs=qnn_model_name) + return [qnn_model_name] + + def get_info_message(self, sd_input: sd.StableDiffusionInput, qnn_model_name, image): + message = f"{sd_input.user_prompt}\nNegative prompt: {sd_input.uncond_prompt}\n" + message += f"Steps: {sd_input.user_step}, Sampler: {sd_input.sampler_name}, " + message += f"CFG scale: {sd_input.user_text_guidance}, Seed: {sd_input.user_seed}, " + message += f"Size: {image.size[0]}x{image.size[1]}, " + message += f"Model: {qnn_model_name}, Enable Hires: {sd_input.is_high_resolution}" + if sd_input.is_high_resolution: + message += f", Upscaler model: {sd_input.upscaler_model_used}" + return message + + def run(self, p: StableDiffusionProcessing, qnn_model_name): + time_start = time.time() + + user_seed = get_fixed_seed(p.seed) + supported_samplers = ["DPM++ 2M"] + if (p.sampler_name not in supported_samplers): + p.sampler_name = "DPM++ 2M" + sd_input = sd.StableDiffusionInput( + p.prompt, + p.negative_prompt, + user_seed, + p.steps, + float(p.cfg_scale), + p.enable_hr, + p.sampler_name, + qnn_model_name, + p.hr_upscaler + ) + if sd.upscaler_pipeline.model_name != sd_input.upscaler_model_name: + del sd.upscaler_pipeline + sd.upscaler_pipeline = sd.UpscalerPipeline(sd_input.upscaler_model_name, sd_input.upscaler_model_path) + + image = sd.pipeline.model_execute( + sd_input, lambda result: modelExecuteCallback(result, sd_input), sd.upscaler_pipeline + ) + + time_end = time.time() + print("time consumes for inference {}(s)".format(str(time_end - time_start))) + + return Processed( + p, + [image], + seed=user_seed, + info=self.get_info_message(sd_input, qnn_model_name, image), + ) diff --git a/plugins/stable-diffusion-webui/qairt_accelerate/ui-config.json b/plugins/stable-diffusion-webui/qairt_accelerate/ui-config.json new file mode 100644 index 0000000..b8db528 --- /dev/null +++ b/plugins/stable-diffusion-webui/qairt_accelerate/ui-config.json @@ -0,0 +1,817 @@ +{ + "txt2img/Prompt/visible": true, + "txt2img/Prompt/value": "", + "txt2img/Negative prompt/visible": true, + "txt2img/Negative prompt/value": "", + "txt2img/Interrupt/visible": true, + "txt2img/Skip/visible": true, + "txt2img/Interrupting.../visible": true, + "txt2img/Generate/visible": true, + "txt2img/↙️/visible": true, + "txt2img/🗑️/visible": true, + "txt2img/📋/visible": true, + "txt2img/Styles/visible": true, + "txt2img/Styles/value": [], + "txt2img/🖌️/visible": true, + "txt2img/🔄/visible": true, + "txt2img/📝/visible": true, + "txt2img/Close/visible": true, + "txt2img/Tabs@txt2img_extra_tabs/selected": null, + "customscript/qairt_accelerate.py/txt2img/Sampling method/visible": true, + "customscript/qairt_accelerate.py/txt2img/Sampling method/value": "Euler", + "customscript/sampler.py/txt2img/Sampling method/visible": true, + "customscript/sampler.py/txt2img/Sampling method/value": "DPM++ 2M", + "customscript/sampler.py/txt2img/Schedule type/visible": true, + "customscript/sampler.py/txt2img/Schedule type/value": "Automatic", + "customscript/sampler.py/txt2img/Sampling steps/visible": true, + "customscript/sampler.py/txt2img/Sampling steps/value": 20, + "customscript/sampler.py/txt2img/Sampling steps/minimum": 1, + "customscript/sampler.py/txt2img/Sampling steps/maximum": 150, + "customscript/sampler.py/txt2img/Sampling steps/step": 1, + "txt2img/Hires. fix/visible": true, + "txt2img/Hires. fix/value": false, + "txt2img/Upscaler/visible": true, + "txt2img/Upscaler/value": "ESRGAN_4x", + "txt2img/Hires steps/visible": false, + "txt2img/Hires steps/value": 0, + "txt2img/Hires steps/minimum": 0, + "txt2img/Hires steps/maximum": 150, + "txt2img/Hires steps/step": 1, + "txt2img/Denoising strength/visible": false, + "txt2img/Denoising strength/value": 0.7, + "txt2img/Denoising strength/minimum": 0.0, + "txt2img/Denoising strength/maximum": 1.0, + "txt2img/Denoising strength/step": 0.01, + "txt2img/Upscale by/visible": true, + "txt2img/Upscale by/value": 4.0, + "txt2img/Upscale by/minimum": 2.0, + "txt2img/Upscale by/maximum": 4.0, + "txt2img/Upscale by/step": 2, + "txt2img/Resize width to/visible": true, + "txt2img/Resize width to/value": 0, + "txt2img/Resize width to/minimum": 0, + "txt2img/Resize width to/maximum": 2048, + "txt2img/Resize width to/step": 8, + "txt2img/Resize height to/visible": true, + "txt2img/Resize height to/value": 0, + "txt2img/Resize height to/minimum": 0, + "txt2img/Resize height to/maximum": 2048, + "txt2img/Resize height to/step": 8, + "txt2img/Checkpoint/visible": true, + "txt2img/Checkpoint/value": "Use same checkpoint", + "txt2img/Hires sampling method/visible": true, + "txt2img/Hires sampling method/value": "Use same sampler", + "txt2img/Hires schedule type/visible": true, + "txt2img/Hires schedule type/value": "Use same scheduler", + "txt2img/Hires prompt/visible": true, + "txt2img/Hires prompt/value": "", + "txt2img/Hires negative prompt/visible": true, + "txt2img/Hires negative prompt/value": "", + "customscript/refiner.py/txt2img/Refiner/visible": false, + "customscript/refiner.py/txt2img/Refiner/value": false, + "customscript/refiner.py/txt2img/Checkpoint/visible": true, + "customscript/refiner.py/txt2img/Checkpoint/value": "", + "customscript/refiner.py/txt2img/Switch at/visible": true, + "customscript/refiner.py/txt2img/Switch at/value": 0.8, + "customscript/refiner.py/txt2img/Switch at/minimum": 0.01, + "customscript/refiner.py/txt2img/Switch at/maximum": 1.0, + "customscript/refiner.py/txt2img/Switch at/step": 0.01, + "txt2img/Width/visible": false, + "txt2img/Width/value": 512, + "txt2img/Width/minimum": 64, + "txt2img/Width/maximum": 2048, + "txt2img/Width/step": 8, + "txt2img/Height/visible": false, + "txt2img/Height/value": 512, + "txt2img/Height/minimum": 64, + "txt2img/Height/maximum": 2048, + "txt2img/Height/step": 8, + "txt2img/⇅/visible": true, + "txt2img/Batch count/visible": false, + "txt2img/Batch count/value": 1, + "txt2img/Batch count/minimum": 1, + "txt2img/Batch count/maximum": 100, + "txt2img/Batch count/step": 1, + "txt2img/Batch size/visible": false, + "txt2img/Batch size/value": 1, + "txt2img/Batch size/minimum": 1, + "txt2img/Batch size/maximum": 8, + "txt2img/Batch size/step": 1, + "txt2img/CFG Scale/visible": true, + "txt2img/CFG Scale/value": 7.0, + "txt2img/CFG Scale/minimum": 1.0, + "txt2img/CFG Scale/maximum": 30.0, + "txt2img/CFG Scale/step": 0.5, + "customscript/seed.py/txt2img/Seed/visible": true, + "customscript/seed.py/txt2img/Seed/value": -1, + "txt2img/🎲️/visible": true, + "txt2img/♻️/visible": true, + "customscript/seed.py/txt2img/Extra/visible": false, + "customscript/seed.py/txt2img/Extra/value": false, + "customscript/seed.py/txt2img/Variation seed/visible": true, + "customscript/seed.py/txt2img/Variation seed/value": -1, + "customscript/seed.py/txt2img/Variation strength/visible": true, + "customscript/seed.py/txt2img/Variation strength/value": 0.0, + "customscript/seed.py/txt2img/Variation strength/minimum": 0, + "customscript/seed.py/txt2img/Variation strength/maximum": 1, + "customscript/seed.py/txt2img/Variation strength/step": 0.01, + "customscript/seed.py/txt2img/Resize seed from width/visible": true, + "customscript/seed.py/txt2img/Resize seed from width/value": 0, + "customscript/seed.py/txt2img/Resize seed from width/minimum": 0, + "customscript/seed.py/txt2img/Resize seed from width/maximum": 2048, + "customscript/seed.py/txt2img/Resize seed from width/step": 8, + "customscript/seed.py/txt2img/Resize seed from height/visible": true, + "customscript/seed.py/txt2img/Resize seed from height/value": 0, + "customscript/seed.py/txt2img/Resize seed from height/minimum": 0, + "customscript/seed.py/txt2img/Resize seed from height/maximum": 2048, + "customscript/seed.py/txt2img/Resize seed from height/step": 8, + "txt2img/Override settings/value": null, + "txt2img/Script/visible": true, + "txt2img/Script/value": "Accelerate with Qualcomm AI Runtime", + "customscript/prompt_matrix.py/txt2img/Put variable parts at start of prompt/visible": true, + "customscript/prompt_matrix.py/txt2img/Put variable parts at start of prompt/value": false, + "customscript/prompt_matrix.py/txt2img/Use different seed for each picture/visible": true, + "customscript/prompt_matrix.py/txt2img/Use different seed for each picture/value": false, + "customscript/prompt_matrix.py/txt2img/Select prompt/visible": true, + "customscript/prompt_matrix.py/txt2img/Select prompt/value": "positive", + "customscript/prompt_matrix.py/txt2img/Select joining char/visible": true, + "customscript/prompt_matrix.py/txt2img/Select joining char/value": "comma", + "customscript/prompt_matrix.py/txt2img/Grid margins (px)/visible": true, + "customscript/prompt_matrix.py/txt2img/Grid margins (px)/value": 0, + "customscript/prompt_matrix.py/txt2img/Grid margins (px)/minimum": 0, + "customscript/prompt_matrix.py/txt2img/Grid margins (px)/maximum": 500, + "customscript/prompt_matrix.py/txt2img/Grid margins (px)/step": 2, + "customscript/prompts_from_file.py/txt2img/Iterate seed every line/visible": true, + "customscript/prompts_from_file.py/txt2img/Iterate seed every line/value": false, + "customscript/prompts_from_file.py/txt2img/Use same random seed for all lines/visible": true, + "customscript/prompts_from_file.py/txt2img/Use same random seed for all lines/value": false, + "customscript/prompts_from_file.py/txt2img/Insert prompts at the/visible": true, + "customscript/prompts_from_file.py/txt2img/Insert prompts at the/value": "start", + "customscript/prompts_from_file.py/txt2img/List of prompt inputs/visible": true, + "customscript/prompts_from_file.py/txt2img/List of prompt inputs/value": "", + "customscript/xyz_grid.py/txt2img/X type/visible": true, + "customscript/xyz_grid.py/txt2img/X type/value": "Seed", + "customscript/xyz_grid.py/txt2img/X values/visible": true, + "customscript/xyz_grid.py/txt2img/X values/value": "", + "customscript/xyz_grid.py/txt2img/Y type/visible": true, + "customscript/xyz_grid.py/txt2img/Y type/value": "Nothing", + "customscript/xyz_grid.py/txt2img/Y values/visible": true, + "customscript/xyz_grid.py/txt2img/Y values/value": "", + "customscript/xyz_grid.py/txt2img/Z type/visible": true, + "customscript/xyz_grid.py/txt2img/Z type/value": "Nothing", + "customscript/xyz_grid.py/txt2img/Z values/visible": true, + "customscript/xyz_grid.py/txt2img/Z values/value": "", + "customscript/xyz_grid.py/txt2img/Draw legend/visible": true, + "customscript/xyz_grid.py/txt2img/Draw legend/value": true, + "customscript/xyz_grid.py/txt2img/Keep -1 for seeds/visible": true, + "customscript/xyz_grid.py/txt2img/Keep -1 for seeds/value": false, + "customscript/xyz_grid.py/txt2img/Vary seeds for X/visible": true, + "customscript/xyz_grid.py/txt2img/Vary seeds for X/value": false, + "customscript/xyz_grid.py/txt2img/Vary seeds for Y/visible": true, + "customscript/xyz_grid.py/txt2img/Vary seeds for Y/value": false, + "customscript/xyz_grid.py/txt2img/Vary seeds for Z/visible": true, + "customscript/xyz_grid.py/txt2img/Vary seeds for Z/value": false, + "customscript/xyz_grid.py/txt2img/Include Sub Images/visible": true, + "customscript/xyz_grid.py/txt2img/Include Sub Images/value": false, + "customscript/xyz_grid.py/txt2img/Include Sub Grids/visible": true, + "customscript/xyz_grid.py/txt2img/Include Sub Grids/value": false, + "customscript/xyz_grid.py/txt2img/Use text inputs instead of dropdowns/visible": true, + "customscript/xyz_grid.py/txt2img/Use text inputs instead of dropdowns/value": false, + "customscript/xyz_grid.py/txt2img/Grid margins (px)/visible": true, + "customscript/xyz_grid.py/txt2img/Grid margins (px)/value": 0, + "customscript/xyz_grid.py/txt2img/Grid margins (px)/minimum": 0, + "customscript/xyz_grid.py/txt2img/Grid margins (px)/maximum": 500, + "customscript/xyz_grid.py/txt2img/Grid margins (px)/step": 2, + "txt2img/Swap X/Y axes/visible": true, + "txt2img/Swap Y/Z axes/visible": true, + "txt2img/Swap X/Z axes/visible": true, + "txt2img/📂/visible": true, + "txt2img/💾/visible": true, + "txt2img/🗃️/visible": true, + "txt2img/🖼️/visible": true, + "txt2img/🎨️/visible": true, + "txt2img/📐/visible": true, + "txt2img/✨/visible": true, + "txt2img/Description/visible": true, + "txt2img/Description/value": "", + "txt2img/Cancel/visible": true, + "txt2img/Replace preview/visible": true, + "txt2img/Save/visible": true, + "txt2img/Preferred VAE/visible": true, + "txt2img/Preferred VAE/value": "None", + "txt2img/Stable Diffusion version/visible": true, + "txt2img/Stable Diffusion version/value": "Unknown", + "txt2img/Activation text/visible": true, + "txt2img/Activation text/value": "", + "txt2img/Preferred weight/visible": true, + "txt2img/Preferred weight/value": 0.0, + "txt2img/Preferred weight/minimum": 0.0, + "txt2img/Preferred weight/maximum": 2.0, + "txt2img/Preferred weight/step": 0.01, + "txt2img/Random prompt/visible": true, + "txt2img/Random prompt/value": "", + "img2img/Prompt/visible": true, + "img2img/Prompt/value": "", + "img2img/Negative prompt/visible": true, + "img2img/Negative prompt/value": "", + "img2img/Interrupt/visible": true, + "img2img/Skip/visible": true, + "img2img/Interrupting.../visible": true, + "img2img/Generate/visible": true, + "img2img/↙️/visible": true, + "img2img/🗑️/visible": true, + "img2img/📋/visible": true, + "img2img/📎/visible": true, + "img2img/📦/visible": true, + "img2img/Styles/visible": true, + "img2img/Styles/value": [], + "img2img/🖌️/visible": true, + "img2img/🔄/visible": true, + "img2img/📝/visible": true, + "img2img/Close/visible": true, + "img2img/Tabs@img2img_extra_tabs/selected": null, + "img2img/Tabs@mode_img2img/selected": null, + "img2img/img2img/visible": true, + "img2img/sketch/visible": true, + "img2img/inpaint/visible": true, + "img2img/inpaint sketch/visible": true, + "img2img/Input directory/visible": true, + "img2img/Input directory/value": "", + "img2img/Output directory/visible": true, + "img2img/Output directory/value": "", + "img2img/Inpaint batch mask directory (required for inpaint batch processing only)/visible": true, + "img2img/Inpaint batch mask directory (required for inpaint batch processing only)/value": "", + "img2img/Append png info to prompts/visible": true, + "img2img/Append png info to prompts/value": false, + "img2img/PNG info directory/visible": true, + "img2img/PNG info directory/value": "", + "img2img/Resize mode/visible": true, + "img2img/Resize mode/value": "Just resize", + "img2img/Mask blur/visible": true, + "img2img/Mask blur/value": 4, + "img2img/Mask blur/minimum": 0, + "img2img/Mask blur/maximum": 64, + "img2img/Mask blur/step": 1, + "img2img/Mask transparency/value": 0, + "img2img/Mask transparency/minimum": 0, + "img2img/Mask transparency/maximum": 100, + "img2img/Mask transparency/step": 1, + "img2img/Mask mode/visible": true, + "img2img/Mask mode/value": "Inpaint masked", + "img2img/Masked content/visible": true, + "img2img/Masked content/value": "original", + "img2img/Inpaint area/visible": true, + "img2img/Inpaint area/value": "Whole picture", + "img2img/Only masked padding, pixels/visible": true, + "img2img/Only masked padding, pixels/value": 32, + "img2img/Only masked padding, pixels/minimum": 0, + "img2img/Only masked padding, pixels/maximum": 256, + "img2img/Only masked padding, pixels/step": 4, + "customscript/soft_inpainting.py/img2img/Soft inpainting/visible": true, + "customscript/soft_inpainting.py/img2img/Soft inpainting/value": false, + "customscript/soft_inpainting.py/img2img/Schedule bias/visible": true, + "customscript/soft_inpainting.py/img2img/Schedule bias/value": 1, + "customscript/soft_inpainting.py/img2img/Schedule bias/minimum": 0, + "customscript/soft_inpainting.py/img2img/Schedule bias/maximum": 8, + "customscript/soft_inpainting.py/img2img/Schedule bias/step": 0.1, + "customscript/soft_inpainting.py/img2img/Preservation strength/visible": true, + "customscript/soft_inpainting.py/img2img/Preservation strength/value": 0.5, + "customscript/soft_inpainting.py/img2img/Preservation strength/minimum": 0, + "customscript/soft_inpainting.py/img2img/Preservation strength/maximum": 8, + "customscript/soft_inpainting.py/img2img/Preservation strength/step": 0.05, + "customscript/soft_inpainting.py/img2img/Transition contrast boost/visible": true, + "customscript/soft_inpainting.py/img2img/Transition contrast boost/value": 4, + "customscript/soft_inpainting.py/img2img/Transition contrast boost/minimum": 1, + "customscript/soft_inpainting.py/img2img/Transition contrast boost/maximum": 32, + "customscript/soft_inpainting.py/img2img/Transition contrast boost/step": 0.5, + "customscript/soft_inpainting.py/img2img/Mask influence/visible": true, + "customscript/soft_inpainting.py/img2img/Mask influence/value": 0, + "customscript/soft_inpainting.py/img2img/Mask influence/minimum": 0, + "customscript/soft_inpainting.py/img2img/Mask influence/maximum": 1, + "customscript/soft_inpainting.py/img2img/Mask influence/step": 0.05, + "customscript/soft_inpainting.py/img2img/Difference threshold/visible": true, + "customscript/soft_inpainting.py/img2img/Difference threshold/value": 0.5, + "customscript/soft_inpainting.py/img2img/Difference threshold/minimum": 0, + "customscript/soft_inpainting.py/img2img/Difference threshold/maximum": 8, + "customscript/soft_inpainting.py/img2img/Difference threshold/step": 0.25, + "customscript/soft_inpainting.py/img2img/Difference contrast/visible": true, + "customscript/soft_inpainting.py/img2img/Difference contrast/value": 2, + "customscript/soft_inpainting.py/img2img/Difference contrast/minimum": 0, + "customscript/soft_inpainting.py/img2img/Difference contrast/maximum": 8, + "customscript/soft_inpainting.py/img2img/Difference contrast/step": 0.25, + "customscript/sampler.py/img2img/Sampling method/visible": true, + "customscript/sampler.py/img2img/Sampling method/value": "DPM++ 2M", + "customscript/sampler.py/img2img/Schedule type/visible": true, + "customscript/sampler.py/img2img/Schedule type/value": "Automatic", + "customscript/sampler.py/img2img/Sampling steps/visible": true, + "customscript/sampler.py/img2img/Sampling steps/value": 20, + "customscript/sampler.py/img2img/Sampling steps/minimum": 1, + "customscript/sampler.py/img2img/Sampling steps/maximum": 150, + "customscript/sampler.py/img2img/Sampling steps/step": 1, + "customscript/refiner.py/img2img/Refiner/visible": true, + "customscript/refiner.py/img2img/Refiner/value": false, + "customscript/refiner.py/img2img/Checkpoint/visible": true, + "customscript/refiner.py/img2img/Checkpoint/value": "", + "customscript/refiner.py/img2img/Switch at/visible": true, + "customscript/refiner.py/img2img/Switch at/value": 0.8, + "customscript/refiner.py/img2img/Switch at/minimum": 0.01, + "customscript/refiner.py/img2img/Switch at/maximum": 1.0, + "customscript/refiner.py/img2img/Switch at/step": 0.01, + "img2img/Width/visible": true, + "img2img/Width/value": 512, + "img2img/Width/minimum": 64, + "img2img/Width/maximum": 2048, + "img2img/Width/step": 8, + "img2img/Height/visible": true, + "img2img/Height/value": 512, + "img2img/Height/minimum": 64, + "img2img/Height/maximum": 2048, + "img2img/Height/step": 8, + "img2img/⇅/visible": true, + "img2img/📐/visible": true, + "img2img/Scale/visible": true, + "img2img/Scale/value": 1.0, + "img2img/Scale/minimum": 0.05, + "img2img/Scale/maximum": 4.0, + "img2img/Scale/step": 0.05, + "img2img/Unused/visible": true, + "img2img/Unused/value": 0, + "img2img/Unused/minimum": 0, + "img2img/Unused/maximum": 100, + "img2img/Unused/step": 1, + "img2img/Batch count/visible": true, + "img2img/Batch count/value": 1, + "img2img/Batch count/minimum": 1, + "img2img/Batch count/maximum": 100, + "img2img/Batch count/step": 1, + "img2img/Batch size/visible": true, + "img2img/Batch size/value": 1, + "img2img/Batch size/minimum": 1, + "img2img/Batch size/maximum": 8, + "img2img/Batch size/step": 1, + "img2img/CFG Scale/visible": true, + "img2img/CFG Scale/value": 7.0, + "img2img/CFG Scale/minimum": 1.0, + "img2img/CFG Scale/maximum": 30.0, + "img2img/CFG Scale/step": 0.5, + "img2img/Image CFG Scale/value": 1.5, + "img2img/Image CFG Scale/minimum": 0, + "img2img/Image CFG Scale/maximum": 3.0, + "img2img/Image CFG Scale/step": 0.05, + "img2img/Denoising strength/visible": true, + "img2img/Denoising strength/value": 0.75, + "img2img/Denoising strength/minimum": 0.0, + "img2img/Denoising strength/maximum": 1.0, + "img2img/Denoising strength/step": 0.01, + "customscript/seed.py/img2img/Seed/visible": true, + "customscript/seed.py/img2img/Seed/value": -1, + "img2img/🎲️/visible": true, + "img2img/♻️/visible": true, + "customscript/seed.py/img2img/Extra/visible": true, + "customscript/seed.py/img2img/Extra/value": false, + "customscript/seed.py/img2img/Variation seed/visible": true, + "customscript/seed.py/img2img/Variation seed/value": -1, + "customscript/seed.py/img2img/Variation strength/visible": true, + "customscript/seed.py/img2img/Variation strength/value": 0.0, + "customscript/seed.py/img2img/Variation strength/minimum": 0, + "customscript/seed.py/img2img/Variation strength/maximum": 1, + "customscript/seed.py/img2img/Variation strength/step": 0.01, + "customscript/seed.py/img2img/Resize seed from width/visible": true, + "customscript/seed.py/img2img/Resize seed from width/value": 0, + "customscript/seed.py/img2img/Resize seed from width/minimum": 0, + "customscript/seed.py/img2img/Resize seed from width/maximum": 2048, + "customscript/seed.py/img2img/Resize seed from width/step": 8, + "customscript/seed.py/img2img/Resize seed from height/visible": true, + "customscript/seed.py/img2img/Resize seed from height/value": 0, + "customscript/seed.py/img2img/Resize seed from height/minimum": 0, + "customscript/seed.py/img2img/Resize seed from height/maximum": 2048, + "customscript/seed.py/img2img/Resize seed from height/step": 8, + "img2img/Override settings/value": null, + "img2img/Script/visible": true, + "img2img/Script/value": "None", + "customscript/img2imgalt.py/img2img/Override `Sampling method` to Euler?(this method is built for it)/visible": true, + "customscript/img2imgalt.py/img2img/Override `Sampling method` to Euler?(this method is built for it)/value": true, + "customscript/img2imgalt.py/img2img/Override `prompt` to the same value as `original prompt`?(and `negative prompt`)/visible": true, + "customscript/img2imgalt.py/img2img/Override `prompt` to the same value as `original prompt`?(and `negative prompt`)/value": true, + "customscript/img2imgalt.py/img2img/Original prompt/visible": true, + "customscript/img2imgalt.py/img2img/Original prompt/value": "", + "customscript/img2imgalt.py/img2img/Original negative prompt/visible": true, + "customscript/img2imgalt.py/img2img/Original negative prompt/value": "", + "customscript/img2imgalt.py/img2img/Override `Sampling Steps` to the same value as `Decode steps`?/visible": true, + "customscript/img2imgalt.py/img2img/Override `Sampling Steps` to the same value as `Decode steps`?/value": true, + "customscript/img2imgalt.py/img2img/Decode steps/visible": true, + "customscript/img2imgalt.py/img2img/Decode steps/value": 50, + "customscript/img2imgalt.py/img2img/Decode steps/minimum": 1, + "customscript/img2imgalt.py/img2img/Decode steps/maximum": 150, + "customscript/img2imgalt.py/img2img/Decode steps/step": 1, + "customscript/img2imgalt.py/img2img/Override `Denoising strength` to 1?/visible": true, + "customscript/img2imgalt.py/img2img/Override `Denoising strength` to 1?/value": true, + "customscript/img2imgalt.py/img2img/Decode CFG scale/visible": true, + "customscript/img2imgalt.py/img2img/Decode CFG scale/value": 1.0, + "customscript/img2imgalt.py/img2img/Decode CFG scale/minimum": 0.0, + "customscript/img2imgalt.py/img2img/Decode CFG scale/maximum": 15.0, + "customscript/img2imgalt.py/img2img/Decode CFG scale/step": 0.1, + "customscript/img2imgalt.py/img2img/Randomness/visible": true, + "customscript/img2imgalt.py/img2img/Randomness/value": 0.0, + "customscript/img2imgalt.py/img2img/Randomness/minimum": 0.0, + "customscript/img2imgalt.py/img2img/Randomness/maximum": 1.0, + "customscript/img2imgalt.py/img2img/Randomness/step": 0.01, + "customscript/img2imgalt.py/img2img/Sigma adjustment for finding noise for image/visible": true, + "customscript/img2imgalt.py/img2img/Sigma adjustment for finding noise for image/value": false, + "customscript/loopback.py/img2img/Loops/visible": true, + "customscript/loopback.py/img2img/Loops/value": 4, + "customscript/loopback.py/img2img/Loops/minimum": 1, + "customscript/loopback.py/img2img/Loops/maximum": 32, + "customscript/loopback.py/img2img/Loops/step": 1, + "customscript/loopback.py/img2img/Final denoising strength/visible": true, + "customscript/loopback.py/img2img/Final denoising strength/value": 0.5, + "customscript/loopback.py/img2img/Final denoising strength/minimum": 0, + "customscript/loopback.py/img2img/Final denoising strength/maximum": 1, + "customscript/loopback.py/img2img/Final denoising strength/step": 0.01, + "customscript/loopback.py/img2img/Denoising strength curve/visible": true, + "customscript/loopback.py/img2img/Denoising strength curve/value": "Linear", + "customscript/loopback.py/img2img/Append interrogated prompt at each iteration/visible": true, + "customscript/loopback.py/img2img/Append interrogated prompt at each iteration/value": "None", + "customscript/outpainting_mk_2.py/img2img/Pixels to expand/visible": true, + "customscript/outpainting_mk_2.py/img2img/Pixels to expand/value": 128, + "customscript/outpainting_mk_2.py/img2img/Pixels to expand/minimum": 8, + "customscript/outpainting_mk_2.py/img2img/Pixels to expand/maximum": 256, + "customscript/outpainting_mk_2.py/img2img/Pixels to expand/step": 8, + "customscript/outpainting_mk_2.py/img2img/Mask blur/visible": true, + "customscript/outpainting_mk_2.py/img2img/Mask blur/value": 8, + "customscript/outpainting_mk_2.py/img2img/Mask blur/minimum": 0, + "customscript/outpainting_mk_2.py/img2img/Mask blur/maximum": 64, + "customscript/outpainting_mk_2.py/img2img/Mask blur/step": 1, + "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/visible": true, + "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/value": 1.0, + "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/minimum": 0.0, + "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/maximum": 4.0, + "customscript/outpainting_mk_2.py/img2img/Fall-off exponent (lower=higher detail)/step": 0.01, + "customscript/outpainting_mk_2.py/img2img/Color variation/visible": true, + "customscript/outpainting_mk_2.py/img2img/Color variation/value": 0.05, + "customscript/outpainting_mk_2.py/img2img/Color variation/minimum": 0.0, + "customscript/outpainting_mk_2.py/img2img/Color variation/maximum": 1.0, + "customscript/outpainting_mk_2.py/img2img/Color variation/step": 0.01, + "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/visible": true, + "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/value": 128, + "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/minimum": 8, + "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/maximum": 256, + "customscript/poor_mans_outpainting.py/img2img/Pixels to expand/step": 8, + "customscript/poor_mans_outpainting.py/img2img/Mask blur/visible": true, + "customscript/poor_mans_outpainting.py/img2img/Mask blur/value": 4, + "customscript/poor_mans_outpainting.py/img2img/Mask blur/minimum": 0, + "customscript/poor_mans_outpainting.py/img2img/Mask blur/maximum": 64, + "customscript/poor_mans_outpainting.py/img2img/Mask blur/step": 1, + "customscript/poor_mans_outpainting.py/img2img/Masked content/visible": true, + "customscript/poor_mans_outpainting.py/img2img/Masked content/value": "fill", + "customscript/prompt_matrix.py/img2img/Put variable parts at start of prompt/visible": true, + "customscript/prompt_matrix.py/img2img/Put variable parts at start of prompt/value": false, + "customscript/prompt_matrix.py/img2img/Use different seed for each picture/visible": true, + "customscript/prompt_matrix.py/img2img/Use different seed for each picture/value": false, + "customscript/prompt_matrix.py/img2img/Select prompt/visible": true, + "customscript/prompt_matrix.py/img2img/Select prompt/value": "positive", + "customscript/prompt_matrix.py/img2img/Select joining char/visible": true, + "customscript/prompt_matrix.py/img2img/Select joining char/value": "comma", + "customscript/prompt_matrix.py/img2img/Grid margins (px)/visible": true, + "customscript/prompt_matrix.py/img2img/Grid margins (px)/value": 0, + "customscript/prompt_matrix.py/img2img/Grid margins (px)/minimum": 0, + "customscript/prompt_matrix.py/img2img/Grid margins (px)/maximum": 500, + "customscript/prompt_matrix.py/img2img/Grid margins (px)/step": 2, + "customscript/prompts_from_file.py/img2img/Iterate seed every line/visible": true, + "customscript/prompts_from_file.py/img2img/Iterate seed every line/value": false, + "customscript/prompts_from_file.py/img2img/Use same random seed for all lines/visible": true, + "customscript/prompts_from_file.py/img2img/Use same random seed for all lines/value": false, + "customscript/prompts_from_file.py/img2img/Insert prompts at the/visible": true, + "customscript/prompts_from_file.py/img2img/Insert prompts at the/value": "start", + "customscript/prompts_from_file.py/img2img/List of prompt inputs/visible": true, + "customscript/prompts_from_file.py/img2img/List of prompt inputs/value": "", + "customscript/sd_upscale.py/img2img/Tile overlap/visible": true, + "customscript/sd_upscale.py/img2img/Tile overlap/value": 64, + "customscript/sd_upscale.py/img2img/Tile overlap/minimum": 0, + "customscript/sd_upscale.py/img2img/Tile overlap/maximum": 256, + "customscript/sd_upscale.py/img2img/Tile overlap/step": 16, + "customscript/sd_upscale.py/img2img/Scale Factor/visible": true, + "customscript/sd_upscale.py/img2img/Scale Factor/value": 2.0, + "customscript/sd_upscale.py/img2img/Scale Factor/minimum": 1.0, + "customscript/sd_upscale.py/img2img/Scale Factor/maximum": 4.0, + "customscript/sd_upscale.py/img2img/Scale Factor/step": 0.05, + "customscript/sd_upscale.py/img2img/Upscaler/visible": true, + "customscript/sd_upscale.py/img2img/Upscaler/value": "None", + "customscript/xyz_grid.py/img2img/X type/visible": true, + "customscript/xyz_grid.py/img2img/X type/value": "Seed", + "customscript/xyz_grid.py/img2img/X values/visible": true, + "customscript/xyz_grid.py/img2img/X values/value": "", + "customscript/xyz_grid.py/img2img/Y type/visible": true, + "customscript/xyz_grid.py/img2img/Y type/value": "Nothing", + "customscript/xyz_grid.py/img2img/Y values/visible": true, + "customscript/xyz_grid.py/img2img/Y values/value": "", + "customscript/xyz_grid.py/img2img/Z type/visible": true, + "customscript/xyz_grid.py/img2img/Z type/value": "Nothing", + "customscript/xyz_grid.py/img2img/Z values/visible": true, + "customscript/xyz_grid.py/img2img/Z values/value": "", + "customscript/xyz_grid.py/img2img/Draw legend/visible": true, + "customscript/xyz_grid.py/img2img/Draw legend/value": true, + "customscript/xyz_grid.py/img2img/Keep -1 for seeds/visible": true, + "customscript/xyz_grid.py/img2img/Keep -1 for seeds/value": false, + "customscript/xyz_grid.py/img2img/Vary seeds for X/visible": true, + "customscript/xyz_grid.py/img2img/Vary seeds for X/value": false, + "customscript/xyz_grid.py/img2img/Vary seeds for Y/visible": true, + "customscript/xyz_grid.py/img2img/Vary seeds for Y/value": false, + "customscript/xyz_grid.py/img2img/Vary seeds for Z/visible": true, + "customscript/xyz_grid.py/img2img/Vary seeds for Z/value": false, + "customscript/xyz_grid.py/img2img/Include Sub Images/visible": true, + "customscript/xyz_grid.py/img2img/Include Sub Images/value": false, + "customscript/xyz_grid.py/img2img/Include Sub Grids/visible": true, + "customscript/xyz_grid.py/img2img/Include Sub Grids/value": false, + "customscript/xyz_grid.py/img2img/Use text inputs instead of dropdowns/visible": true, + "customscript/xyz_grid.py/img2img/Use text inputs instead of dropdowns/value": false, + "customscript/xyz_grid.py/img2img/Grid margins (px)/visible": true, + "customscript/xyz_grid.py/img2img/Grid margins (px)/value": 0, + "customscript/xyz_grid.py/img2img/Grid margins (px)/minimum": 0, + "customscript/xyz_grid.py/img2img/Grid margins (px)/maximum": 500, + "customscript/xyz_grid.py/img2img/Grid margins (px)/step": 2, + "img2img/Swap X/Y axes/visible": true, + "img2img/Swap Y/Z axes/visible": true, + "img2img/Swap X/Z axes/visible": true, + "img2img/📂/visible": true, + "img2img/💾/visible": true, + "img2img/🗃️/visible": true, + "img2img/🖼️/visible": true, + "img2img/🎨️/visible": true, + "img2img/Description/visible": true, + "img2img/Description/value": "", + "img2img/Cancel/visible": true, + "img2img/Replace preview/visible": true, + "img2img/Save/visible": true, + "img2img/Preferred VAE/visible": true, + "img2img/Preferred VAE/value": "None", + "img2img/Stable Diffusion version/visible": true, + "img2img/Stable Diffusion version/value": "Unknown", + "img2img/Activation text/visible": true, + "img2img/Activation text/value": "", + "img2img/Preferred weight/visible": true, + "img2img/Preferred weight/value": 0.0, + "img2img/Preferred weight/minimum": 0.0, + "img2img/Preferred weight/maximum": 2.0, + "img2img/Preferred weight/step": 0.01, + "img2img/Random prompt/visible": true, + "img2img/Random prompt/value": "", + "extras/Tabs@mode_extras/selected": null, + "extras/Input directory/visible": true, + "extras/Input directory/value": "", + "extras/Output directory/visible": true, + "extras/Output directory/value": "", + "extras/Show result images/visible": true, + "extras/Show result images/value": true, + "customscript/postprocessing_upscale.py/extras/Upscale/visible": true, + "customscript/postprocessing_upscale.py/extras/Upscale/value": true, + "customscript/postprocessing_upscale.py/extras/Upscaler 1/visible": true, + "customscript/postprocessing_upscale.py/extras/Upscaler 1/value": "None", + "customscript/postprocessing_upscale.py/extras/Upscaler 2/visible": true, + "customscript/postprocessing_upscale.py/extras/Upscaler 2/value": "None", + "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/visible": true, + "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/value": 0.0, + "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/minimum": 0.0, + "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/maximum": 1.0, + "customscript/postprocessing_upscale.py/extras/Upscaler 2 visibility/step": 0.001, + "extras/Tabs@extras_resize_mode/selected": null, + "customscript/postprocessing_upscale.py/extras/Resize/visible": true, + "customscript/postprocessing_upscale.py/extras/Resize/value": 4, + "customscript/postprocessing_upscale.py/extras/Resize/minimum": 1.0, + "customscript/postprocessing_upscale.py/extras/Resize/maximum": 8.0, + "customscript/postprocessing_upscale.py/extras/Resize/step": 0.05, + "customscript/postprocessing_upscale.py/extras/Max side length/visible": true, + "customscript/postprocessing_upscale.py/extras/Max side length/value": 0.0, + "customscript/postprocessing_upscale.py/extras/Width/visible": true, + "customscript/postprocessing_upscale.py/extras/Width/value": 512, + "customscript/postprocessing_upscale.py/extras/Width/minimum": 64, + "customscript/postprocessing_upscale.py/extras/Width/maximum": 8192, + "customscript/postprocessing_upscale.py/extras/Width/step": 8, + "customscript/postprocessing_upscale.py/extras/Height/visible": true, + "customscript/postprocessing_upscale.py/extras/Height/value": 512, + "customscript/postprocessing_upscale.py/extras/Height/minimum": 64, + "customscript/postprocessing_upscale.py/extras/Height/maximum": 8192, + "customscript/postprocessing_upscale.py/extras/Height/step": 8, + "extras/⇅/visible": true, + "customscript/postprocessing_upscale.py/extras/Crop to fit/visible": true, + "customscript/postprocessing_upscale.py/extras/Crop to fit/value": true, + "customscript/postprocessing_gfpgan.py/extras/GFPGAN/visible": true, + "customscript/postprocessing_gfpgan.py/extras/GFPGAN/value": false, + "customscript/postprocessing_gfpgan.py/extras/Visibility/visible": true, + "customscript/postprocessing_gfpgan.py/extras/Visibility/value": 1.0, + "customscript/postprocessing_gfpgan.py/extras/Visibility/minimum": 0.0, + "customscript/postprocessing_gfpgan.py/extras/Visibility/maximum": 1.0, + "customscript/postprocessing_gfpgan.py/extras/Visibility/step": 0.001, + "customscript/postprocessing_codeformer.py/extras/CodeFormer/visible": true, + "customscript/postprocessing_codeformer.py/extras/CodeFormer/value": false, + "customscript/postprocessing_codeformer.py/extras/Visibility/visible": true, + "customscript/postprocessing_codeformer.py/extras/Visibility/value": 1.0, + "customscript/postprocessing_codeformer.py/extras/Visibility/minimum": 0.0, + "customscript/postprocessing_codeformer.py/extras/Visibility/maximum": 1.0, + "customscript/postprocessing_codeformer.py/extras/Visibility/step": 0.001, + "customscript/postprocessing_codeformer.py/extras/Weight (0 = maximum effect, 1 = minimum effect)/visible": true, + "customscript/postprocessing_codeformer.py/extras/Weight (0 = maximum effect, 1 = minimum effect)/value": 0, + "customscript/postprocessing_codeformer.py/extras/Weight (0 = maximum effect, 1 = minimum effect)/minimum": 0.0, + "customscript/postprocessing_codeformer.py/extras/Weight (0 = maximum effect, 1 = minimum effect)/maximum": 1.0, + "customscript/postprocessing_codeformer.py/extras/Weight (0 = maximum effect, 1 = minimum effect)/step": 0.001, + "customscript/postprocessing_split_oversized.py/extras/Split oversized images/visible": true, + "customscript/postprocessing_split_oversized.py/extras/Split oversized images/value": false, + "customscript/postprocessing_split_oversized.py/extras/Threshold/visible": true, + "customscript/postprocessing_split_oversized.py/extras/Threshold/value": 0.5, + "customscript/postprocessing_split_oversized.py/extras/Threshold/minimum": 0.0, + "customscript/postprocessing_split_oversized.py/extras/Threshold/maximum": 1.0, + "customscript/postprocessing_split_oversized.py/extras/Threshold/step": 0.05, + "customscript/postprocessing_split_oversized.py/extras/Overlap ratio/visible": true, + "customscript/postprocessing_split_oversized.py/extras/Overlap ratio/value": 0.2, + "customscript/postprocessing_split_oversized.py/extras/Overlap ratio/minimum": 0.0, + "customscript/postprocessing_split_oversized.py/extras/Overlap ratio/maximum": 0.9, + "customscript/postprocessing_split_oversized.py/extras/Overlap ratio/step": 0.05, + "customscript/postprocessing_focal_crop.py/extras/Auto focal point crop/visible": true, + "customscript/postprocessing_focal_crop.py/extras/Auto focal point crop/value": false, + "customscript/postprocessing_focal_crop.py/extras/Focal point face weight/visible": true, + "customscript/postprocessing_focal_crop.py/extras/Focal point face weight/value": 0.9, + "customscript/postprocessing_focal_crop.py/extras/Focal point face weight/minimum": 0.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point face weight/maximum": 1.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point face weight/step": 0.05, + "customscript/postprocessing_focal_crop.py/extras/Focal point entropy weight/visible": true, + "customscript/postprocessing_focal_crop.py/extras/Focal point entropy weight/value": 0.15, + "customscript/postprocessing_focal_crop.py/extras/Focal point entropy weight/minimum": 0.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point entropy weight/maximum": 1.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point entropy weight/step": 0.05, + "customscript/postprocessing_focal_crop.py/extras/Focal point edges weight/visible": true, + "customscript/postprocessing_focal_crop.py/extras/Focal point edges weight/value": 0.5, + "customscript/postprocessing_focal_crop.py/extras/Focal point edges weight/minimum": 0.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point edges weight/maximum": 1.0, + "customscript/postprocessing_focal_crop.py/extras/Focal point edges weight/step": 0.05, + "customscript/postprocessing_focal_crop.py/extras/Create debug image/visible": true, + "customscript/postprocessing_focal_crop.py/extras/Create debug image/value": false, + "customscript/postprocessing_autosized_crop.py/extras/Auto-sized crop/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Auto-sized crop/value": false, + "customscript/postprocessing_autosized_crop.py/extras/Dimension lower bound/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Dimension lower bound/value": 384, + "customscript/postprocessing_autosized_crop.py/extras/Dimension lower bound/minimum": 64, + "customscript/postprocessing_autosized_crop.py/extras/Dimension lower bound/maximum": 2048, + "customscript/postprocessing_autosized_crop.py/extras/Dimension lower bound/step": 8, + "customscript/postprocessing_autosized_crop.py/extras/Dimension upper bound/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Dimension upper bound/value": 768, + "customscript/postprocessing_autosized_crop.py/extras/Dimension upper bound/minimum": 64, + "customscript/postprocessing_autosized_crop.py/extras/Dimension upper bound/maximum": 2048, + "customscript/postprocessing_autosized_crop.py/extras/Dimension upper bound/step": 8, + "customscript/postprocessing_autosized_crop.py/extras/Area lower bound/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Area lower bound/value": 4096, + "customscript/postprocessing_autosized_crop.py/extras/Area lower bound/minimum": 4096, + "customscript/postprocessing_autosized_crop.py/extras/Area lower bound/maximum": 4194304, + "customscript/postprocessing_autosized_crop.py/extras/Area lower bound/step": 1, + "customscript/postprocessing_autosized_crop.py/extras/Area upper bound/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Area upper bound/value": 409600, + "customscript/postprocessing_autosized_crop.py/extras/Area upper bound/minimum": 4096, + "customscript/postprocessing_autosized_crop.py/extras/Area upper bound/maximum": 4194304, + "customscript/postprocessing_autosized_crop.py/extras/Area upper bound/step": 1, + "customscript/postprocessing_autosized_crop.py/extras/Resizing objective/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Resizing objective/value": "Maximize area", + "customscript/postprocessing_autosized_crop.py/extras/Error threshold/visible": true, + "customscript/postprocessing_autosized_crop.py/extras/Error threshold/value": 0.1, + "customscript/postprocessing_autosized_crop.py/extras/Error threshold/minimum": 0, + "customscript/postprocessing_autosized_crop.py/extras/Error threshold/maximum": 1, + "customscript/postprocessing_autosized_crop.py/extras/Error threshold/step": 0.01, + "customscript/postprocessing_create_flipped_copies.py/extras/Create flipped copies/visible": true, + "customscript/postprocessing_create_flipped_copies.py/extras/Create flipped copies/value": false, + "customscript/postprocessing_caption.py/extras/Caption/visible": true, + "customscript/postprocessing_caption.py/extras/Caption/value": false, + "extras/Interrupt/visible": true, + "extras/Skip/visible": true, + "extras/Interrupting.../visible": true, + "extras/Generate/visible": true, + "extras/📂/visible": true, + "extras/🖼️/visible": true, + "extras/🎨️/visible": true, + "extras/📐/visible": true, + "pnginfo/Send to txt2img/visible": true, + "pnginfo/Send to img2img/visible": true, + "pnginfo/Send to inpaint/visible": true, + "pnginfo/Send to extras/visible": true, + "modelmerger/Primary model (A)/visible": true, + "modelmerger/Primary model (A)/value": null, + "modelmerger/🔄/visible": true, + "modelmerger/Secondary model (B)/visible": true, + "modelmerger/Secondary model (B)/value": null, + "modelmerger/Tertiary model (C)/visible": true, + "modelmerger/Tertiary model (C)/value": null, + "modelmerger/Custom Name (Optional)/visible": true, + "modelmerger/Custom Name (Optional)/value": "", + "modelmerger/Multiplier (M) - set to 0 to get model A/visible": true, + "modelmerger/Multiplier (M) - set to 0 to get model A/value": 0.3, + "modelmerger/Multiplier (M) - set to 0 to get model A/minimum": 0.0, + "modelmerger/Multiplier (M) - set to 0 to get model A/maximum": 1.0, + "modelmerger/Multiplier (M) - set to 0 to get model A/step": 0.05, + "modelmerger/Interpolation Method/visible": true, + "modelmerger/Interpolation Method/value": "Weighted sum", + "modelmerger/Checkpoint format/visible": true, + "modelmerger/Checkpoint format/value": "safetensors", + "modelmerger/Save as float16/visible": true, + "modelmerger/Save as float16/value": false, + "modelmerger/Copy config from/visible": true, + "modelmerger/Copy config from/value": "A, B or C", + "modelmerger/Bake in VAE/visible": true, + "modelmerger/Bake in VAE/value": "None", + "modelmerger/Discard weights with matching name/visible": true, + "modelmerger/Discard weights with matching name/value": "", + "modelmerger/Save metadata/visible": true, + "modelmerger/Save metadata/value": true, + "modelmerger/Add merge recipe metadata/visible": true, + "modelmerger/Add merge recipe metadata/value": true, + "modelmerger/Copy metadata from merged models/visible": true, + "modelmerger/Copy metadata from merged models/value": true, + "modelmerger/Read metadata from selected checkpoints/visible": true, + "modelmerger/Merge/visible": true, + "train/Tabs@train_tabs/selected": null, + "train/Name/visible": true, + "train/Name/value": "", + "train/Initialization text/visible": true, + "train/Initialization text/value": "*", + "train/Number of vectors per token/visible": true, + "train/Number of vectors per token/value": 1, + "train/Number of vectors per token/minimum": 1, + "train/Number of vectors per token/maximum": 75, + "train/Number of vectors per token/step": 1, + "train/Overwrite Old Embedding/visible": true, + "train/Overwrite Old Embedding/value": false, + "train/Create embedding/visible": true, + "train/Enter hypernetwork layer structure/visible": true, + "train/Enter hypernetwork layer structure/value": "1, 2, 1", + "train/Select activation function of hypernetwork. Recommended : Swish / Linear(none)/visible": true, + "train/Select activation function of hypernetwork. Recommended : Swish / Linear(none)/value": "linear", + "train/Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise/visible": true, + "train/Select Layer weights initialization. Recommended: Kaiming for relu-like, Xavier for sigmoid-like, Normal otherwise/value": "Normal", + "train/Add layer normalization/visible": true, + "train/Add layer normalization/value": false, + "train/Use dropout/visible": true, + "train/Use dropout/value": false, + "train/Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15/visible": true, + "train/Enter hypernetwork Dropout structure (or empty). Recommended : 0~0.35 incrementing sequence: 0, 0.05, 0.15/value": "0, 0, 0", + "train/Overwrite Old Hypernetwork/visible": true, + "train/Overwrite Old Hypernetwork/value": false, + "train/Create hypernetwork/visible": true, + "train/Embedding/visible": true, + "train/Embedding/value": null, + "train/🔄/visible": true, + "train/Hypernetwork/visible": true, + "train/Hypernetwork/value": null, + "train/Embedding Learning rate/visible": true, + "train/Embedding Learning rate/value": "0.005", + "train/Hypernetwork Learning rate/visible": true, + "train/Hypernetwork Learning rate/value": "0.00001", + "train/Gradient Clipping/visible": true, + "train/Gradient Clipping/value": "disabled", + "train/Batch size/visible": true, + "train/Batch size/value": 1, + "train/Gradient accumulation steps/visible": true, + "train/Gradient accumulation steps/value": 1, + "train/Dataset directory/visible": true, + "train/Dataset directory/value": "", + "train/Log directory/visible": true, + "train/Log directory/value": "textual_inversion", + "train/Prompt template/visible": true, + "train/Prompt template/value": "style_filewords.txt", + "train/Width/visible": true, + "train/Width/value": 512, + "train/Width/minimum": 64, + "train/Width/maximum": 2048, + "train/Width/step": 8, + "train/Height/visible": true, + "train/Height/value": 512, + "train/Height/minimum": 64, + "train/Height/maximum": 2048, + "train/Height/step": 8, + "train/Do not resize images/visible": true, + "train/Do not resize images/value": false, + "train/Max steps/visible": true, + "train/Max steps/value": 100000, + "train/Save an image to log directory every N steps, 0 to disable/visible": true, + "train/Save an image to log directory every N steps, 0 to disable/value": 500, + "train/Save a copy of embedding to log directory every N steps, 0 to disable/visible": true, + "train/Save a copy of embedding to log directory every N steps, 0 to disable/value": 500, + "train/Use PNG alpha channel as loss weight/visible": true, + "train/Use PNG alpha channel as loss weight/value": false, + "train/Save images with embedding in PNG chunks/visible": true, + "train/Save images with embedding in PNG chunks/value": true, + "train/Read parameters (prompt, etc...) from txt2img tab when making previews/visible": true, + "train/Read parameters (prompt, etc...) from txt2img tab when making previews/value": false, + "train/Shuffle tags by ',' when creating prompts./visible": true, + "train/Shuffle tags by ',' when creating prompts./value": false, + "train/Drop out tags when creating prompts./visible": true, + "train/Drop out tags when creating prompts./value": 0, + "train/Drop out tags when creating prompts./minimum": 0, + "train/Drop out tags when creating prompts./maximum": 1, + "train/Drop out tags when creating prompts./step": 0.1, + "train/Choose latent sampling method/visible": true, + "train/Choose latent sampling method/value": "once", + "train/Train Embedding/visible": true, + "train/Interrupt/visible": true, + "train/Train Hypernetwork/visible": true, + "webui/Tabs@tabs/selected": null, + "customscript/qairt_accelerate.py/txt2img/Model to use/visible": true, + "customscript/qairt_accelerate.py/txt2img/Model to use/value": "Stable-Diffusion-1.5", + "customscript/qairt_accelerate.py/img2img/Model to use/visible": true, + "customscript/qairt_accelerate.py/img2img/Model to use/value": "Stable-Diffusion-1.5" +} \ No newline at end of file