add replicate demo

This commit is contained in:
zsyOAOA
2024-12-14 16:53:42 +08:00
parent aa56a0aabc
commit 42fe15feb9
2 changed files with 134 additions and 0 deletions

43
cog.yaml Normal file
View File

@@ -0,0 +1,43 @@
# Configuration for Cog ⚙️
# Reference: https://cog.run/yaml
build:
# set to true if your model requires a GPU
gpu: true
# a list of ubuntu apt packages to install
system_packages:
- "libgl1-mesa-glx"
- "libglib2.0-0"
# python version in the form '3.11' or '3.11.4'
python_version: "3.10"
# a list of packages in the format <package-name>==<version>
python_packages:
- "torch==2.4.0"
- "torchvision==0.19.0"
- "torchaudio==2.4.0"
- "git+https://github.com/huggingface/transformers.git@v4.46.0"
- "xformers==0.0.27.post2"
- "git+https://github.com/zsyOAOA/InvSR"
- "scikit-image==0.24.0"
- "albumentations==1.4.3"
- "opencv-python==4.10.0.84"
- "bitsandbytes==0.45.0"
- "sentencepiece==0.2.0"
- "protobuf==5.29.1"
- "python-box==7.3.0"
- "omegaconf==2.3.0"
- "loguru==0.7.3"
- "einops==0.8.0"
- "pydantic==1.10.11"
- "accelerate==0.34.2"
# commands run after the environment is setup
run:
#- "pip install git+https://github.com/zsyOAOA/InvSR"
- "echo env is ready!"
# predict.py defines how predictions are run on your model
predict: "predict.py:Predictor"

91
predict.py Normal file
View File

@@ -0,0 +1,91 @@
# Prediction interface for Cog ⚙️
# https://cog.run/python
import shutil, os
from omegaconf import OmegaConf
from cog import BasePredictor, Input, Path
import numpy as np
from utils import util_common
from sampler_invsr import InvSamplerSR
from basicsr.utils.download_util import load_file_from_url
class Predictor(BasePredictor):
def setup(self) -> None:
self.configs = OmegaConf.load("./configs/sample-sd-turbo.yaml")
def set_configs(self, num_steps=1, chopping_size=128, seed=12345):
if num_steps == 1:
self.configs.timesteps = [200,]
elif num_steps == 2:
self.configs.timesteps = [200, 100]
elif num_steps == 3:
self.configs.timesteps = [200, 100, 50]
elif num_steps == 4:
self.configs.timesteps = [200, 150, 100, 50]
elif num_steps == 5:
self.configs.timesteps = [250, 200, 150, 100, 50]
else:
assert num_steps <= 250
self.configs.timesteps = np.linspace(
start=250, stop=0, num=num_steps, endpoint=False, dtype=np.int64()
).tolist()
print(f'Setting timesteps for inference: {self.configs.timesteps}')
# path to save Stable Diffusion
sd_path = "./weights"
util_common.mkdir(sd_path, delete=False, parents=True)
self.configs.sd_pipe.params.cache_dir = sd_path
# path to save noise predictor
started_ckpt_name = "noise_predictor_sd_turbo_v5.pth"
started_ckpt_dir = "./weights"
util_common.mkdir(started_ckpt_dir, delete=False, parents=True)
started_ckpt_path = os.path.join(started_ckpt_dir, started_ckpt_name)
if not os.path.exists(started_ckpt_path):
load_file_from_url(
url="https://huggingface.co/OAOA/InvSR/resolve/main/noise_predictor_sd_turbo_v5.pth",
model_dir=started_ckpt_dir,
progress=True,
file_name=started_ckpt_name,
)
self.configs.model_start.ckpt_path = started_ckpt_path
self.configs.bs = 1
self.configs.seed = 12345
self.configs.basesr.chopping.pch_size = chopping_size
if chopping_size == 128:
self.configs.basesr.chopping.extra_bs = 4
elif chopping_size == 256:
self.configs.basesr.chopping.extra_bs = 2
else:
self.configs.basesr.chopping.extra_bs = 1
def predict(
self,
in_path: Path = Input(description="Input low-quality image"),
num_steps: int = Input(
choices=[1,2,3,4,5], description="Number of sampling steps.", default=1
),
chopping_size: int = Input(
choices=[128, 256, 512], description="Chopping resolution", default=128
),
seed: int = Input(
description="Random seed. Leave blank to randomize the seed.", default=12345
),
) -> Path:
# setting configurations
self.set_configs(num_steps, chopping_size, seed)
sampler = InvSamplerSR(self.configs)
out_dir = 'invsr_output'
if os.path.exists(out_dir):
shutil.rmtree(out_dir)
sampler.inference(in_path, out_path=out_dir, bs=1)
out = "/tmp/out.png"
shutil.copy(os.path.join(out_dir, os.listdir(out_dir)[0]), out)
return Path(out)