Skip to content

Commit

Permalink
Merge pull request #78 from v8hid/develop
Browse files Browse the repository at this point in the history
v1.2 


    Added common prompt prefix and suffix for better user experience.
    Improved frame correction and enhancement with mask_blur and non-inpainting models.
    All main frames will be shown as a gallery view in output.
    Updated default parameter values.
    Fixed bugs related to prompts import.
    Made improvements to UI parameter Names and Frame problem (It's gone).
    Refactored code for better maintenance.
  • Loading branch information
v8hid authored May 2, 2023
2 parents 28a1a0f + aecab9c commit d7263b3
Show file tree
Hide file tree
Showing 16 changed files with 1,373 additions and 936 deletions.
1 change: 1 addition & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -129,3 +129,4 @@ dmypy.json
.pyre/
.vscode/settings.json
.DS_Store
/.vs
4 changes: 2 additions & 2 deletions iz_helpers/__init__.py
Original file line number Diff line number Diff line change
@@ -1,2 +1,2 @@
from .image import shrink_and_paste_on_blank
from .video import write_video
# from .ui import on_ui_tabs
# from .settings import on_ui_settings
Empty file added iz_helpers/extra.py
Empty file.
126 changes: 126 additions & 0 deletions iz_helpers/helpers.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,126 @@
import math
import os
import modules.shared as shared
import modules.sd_models
import gradio as gr
from scripts import postprocessing_upscale
from .prompt_util import readJsonPrompt
import asyncio


def fix_env_Path_ffprobe():
envpath = os.environ["PATH"]
ffppath = shared.opts.data.get("infzoom_ffprobepath", "")

if ffppath and not ffppath in envpath:
path_sep = ";" if os.name == "nt" else ":"
os.environ["PATH"] = envpath + path_sep + ffppath


def closest_upper_divisible_by_eight(num):
if num % 8 == 0:
return num
else:
return math.ceil(num / 8) * 8


def load_model_from_setting(model_field_name, progress, progress_desc):
# fix typo in Automatic1111 vs Vlad111
if hasattr(modules.sd_models, "checkpoint_alisases"):
checkPList = modules.sd_models.checkpoint_alisases
elif hasattr(modules.sd_models, "checkpoint_aliases"):
checkPList = modules.sd_models.checkpoint_aliases
else:
raise Exception(
"This is not a compatible StableDiffusion Platform, can not access checkpoints"
)

model_name = shared.opts.data.get(model_field_name)
if model_name is not None and model_name != "":
checkinfo = checkPList[model_name]

if not checkinfo:
raise NameError(model_field_name + " Does not exist in your models.")

if progress:
progress(0, desc=progress_desc + checkinfo.name)

modules.sd_models.load_model(checkinfo)


def do_upscaleImg(curImg, upscale_do, upscaler_name, upscale_by):
if not upscale_do:
return curImg

# ensure even width and even height for ffmpeg
# if odd, switch to scale to mode
rwidth = round(curImg.width * upscale_by)
rheight = round(curImg.height * upscale_by)

ups_mode = 2 # upscale_by
if (rwidth % 2) == 1:
ups_mode = 1
rwidth += 1
if (rheight % 2) == 1:
ups_mode = 1
rheight += 1

if 1 == ups_mode:
print(
"Infinite Zoom: aligning output size to even width and height: "
+ str(rwidth)
+ " x "
+ str(rheight),
end="\r",
)

pp = postprocessing_upscale.scripts_postprocessing.PostprocessedImage(curImg)
ups = postprocessing_upscale.ScriptPostprocessingUpscale()
ups.process(
pp,
upscale_mode=ups_mode,
upscale_by=upscale_by,
upscale_to_width=rwidth,
upscale_to_height=rheight,
upscale_crop=False,
upscaler_1_name=upscaler_name,
upscaler_2_name=None,
upscaler_2_visibility=0.0,
)
return pp.image

async def showGradioErrorAsync(txt, delay=1):
await asyncio.sleep(delay) # sleep for 1 second
raise gr.Error(txt)

def putPrompts(files):
try:
with open(files.name, "r") as f:
file_contents = f.read()

data = readJsonPrompt(file_contents,False)
return [
gr.Textbox.update(data["prePrompt"]),
gr.DataFrame.update(data["prompts"]),
gr.Textbox.update(data["postPrompt"]),
gr.Textbox.update(data["negPrompt"])
]

except Exception:
print(
"[InfiniteZoom:] Loading your prompt failed. It seems to be invalid. Your prompt table is preserved."
)

# error only be shown with raise, so ui gets broken.
#asyncio.run(showGradioErrorAsync("Loading your prompts failed. It seems to be invalid. Your prompt table has been preserved.",5))

return [gr.Textbox.update(), gr.DataFrame.update(), gr.Textbox.update(),gr.Textbox.update()]


def clearPrompts():
return [
gr.DataFrame.update(value=[[0, "Infinite Zoom. Start over"]]),
gr.Textbox.update(""),
gr.Textbox.update(""),
gr.Textbox.update("")
]
67 changes: 67 additions & 0 deletions iz_helpers/prompt_util.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,67 @@
import json
from jsonschema import validate

from .static_variables import (
empty_prompt,
invalid_prompt,
jsonprompt_schemafile,
promptTableHeaders
)

def completeOptionals(j):
if isinstance(j, dict):
# Remove header information, user dont pimp our ui
if "prompts" in j:
if "headers" in j["prompts"]:
del j["prompts"]["headers"]
j["prompts"]["headers"]=promptTableHeaders

if "negPrompt" not in j:
j["negPrompt"]=""

if "prePrompt" not in j:
if "commonPromptPrefix" in j:
j["prePrompt"]=j["commonPromptPrefix"]
else:
j["prePrompt"]=""

if "postPrompt" not in j:
if "commonPromptSuffix" in j:
j["postPrompt"]=j["commonPromptSuffix"]
else:
j["postPrompt"]=""

return j


def validatePromptJson_throws(data):
with open(jsonprompt_schemafile, "r") as s:
schema = json.load(s)
try:
validate(instance=data, schema=schema)

except Exception:
raise Exception("Your prompts are not schema valid.")

return completeOptionals(data)


def readJsonPrompt(txt, returnFailPrompt=False):
if not txt:
return empty_prompt

try:
jpr = json.loads(txt)
except Exception:
if returnFailPrompt:
print (f"Infinite Zoom: Corrupted Json structure: {txt[:24]} ...")
return invalid_prompt
raise (f"Infinite Zoom: Corrupted Json structure: {txt[:24]} ...")

try:
return validatePromptJson_throws(jpr)
except Exception:
if returnFailPrompt:
return invalid_prompt
pass

60 changes: 60 additions & 0 deletions iz_helpers/promptschema.json
Original file line number Diff line number Diff line change
@@ -0,0 +1,60 @@
{
"$schema": "http://json-schema.org/draft-07/schema#",
"$id": "1.1",
"type": "object",
"properties": {
"prompts": {
"type": "object",
"properties": {
"data": {
"type": "array",
"items": {
"type": "array",
"items": [
{
"oneOf": [
{
"type": "integer",
"minimum": 0
},
{
"type": "string"
}
]
},
{
"type": "string"
}
],
"minItems": 0,
"maxItems": 999,
"uniqueItems": false
},
"minItems": 0
},
"headers": {
"type": "array",
"items": {
"type": "string"
},
"minItems": 2
}
},
"required": [
"data"
]
},
"negPrompt": {
"type": "string"
},
"prePrompt": {
"type": "string"
},
"postPrompt": {
"type": "string"
}
},
"required": [
"prompts"
]
}
Loading

0 comments on commit d7263b3

Please sign in to comment.