Skip to content

Commit 99f5345

Browse files
committed
support model ProteusV0.2
1 parent 0100df2 commit 99f5345

File tree

5 files changed

+17
-4
lines changed

5 files changed

+17
-4
lines changed

WebUI/configs/basicconfig.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -429,7 +429,7 @@ def generate_prompt_for_imagegen(model_name : str = "", prompt : str = "", image
429429
new_prompt = ""
430430
if len(model_name) == 0 or len(prompt) == 0:
431431
return prompt
432-
if model_name == "OpenDalleV1.1":
432+
if model_name == "OpenDalleV1.1" or model_name == "ProteusV0.2":
433433
new_prompt = """
434434
You need to create prompts for an image generation model based on the user's question. The format of the prompts is the features of the image, separated by commas, with no any other information outputted, for example:
435435

WebUI/configs/imagemodels.py

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -123,7 +123,7 @@ def init_image_generation_models(config):
123123
if not torch.cuda.is_available():
124124
return None
125125
if isinstance(config, dict):
126-
if config["model_name"] == "OpenDalleV1.1":
126+
if config["model_name"] == "OpenDalleV1.1" or config["model_name"] == "ProteusV0.2":
127127
from diffusers import AutoencoderKL, DiffusionPipeline
128128
model_id = config["model_path"]
129129
enable_torch_compile = config["torch_compile"]
@@ -180,7 +180,7 @@ def first_prompt(prompt):
180180

181181
if len(text_data) and model is not None:
182182
if isinstance(config, dict):
183-
if config["model_name"] == "OpenDalleV1.1":
183+
if config["model_name"] == "OpenDalleV1.1" or config["model_name"] == "ProteusV0.2":
184184
seed = config["seed"]
185185
apply_refiner = False if refiner is None else True
186186
if seed == -1:

WebUI/configs/webuiconfig.json

Lines changed: 13 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -137,6 +137,19 @@
137137
"cpu_offload": false,
138138
"refiner": true
139139
}
140+
},
141+
"ProteusV0.2": {
142+
"type": "local",
143+
"path": "models/imagegeneration/ProteusV0.2",
144+
"device": "auto",
145+
"loadbits": 16,
146+
"Huggingface": "dataautogpt3/ProteusV0.2",
147+
"config": {
148+
"seed": -1,
149+
"torch_compile": false,
150+
"cpu_offload": false,
151+
"refiner": true
152+
}
140153
}
141154
},
142155
"VtoTModel": {

WebUI/webui_pages/tools_agent/toolsagent.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -924,7 +924,7 @@ def tools_agent_page(api: ApiRequest, is_lite: bool = False):
924924
provider = imagegenmodels[imagegenmodel].get("provider", "")
925925
pathstr = imagegenmodels[imagegenmodel].get("path")
926926
if provider != "" or ImageModelExist(pathstr):
927-
if imagegenmodel == "OpenDalleV1.1" and ImageModelExist("models/imagegeneration/sdxl-vae-fp16-fix") == False:
927+
if (imagegenmodel == "OpenDalleV1.1" or imagegenmodel == "ProteusV0.2") and ImageModelExist("models/imagegeneration/sdxl-vae-fp16-fix") == False:
928928
st.error("Please first download the sdxl-vae-fp16-fix model from Hugginface.")
929929
else:
930930
r = api.change_image_generation_model(current_imagegen_model, imagegenmodel)

0 commit comments

Comments
 (0)