generated from fofr/cog-comfyui
-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathpredict.py
165 lines (146 loc) · 6.24 KB
/
predict.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
# An example of how to convert a given API workflow into its own Replicate model
# Replace predict.py with this file when building your own workflow
import os
import mimetypes
import json
import shutil
from typing import List
from cog import BasePredictor, Input, Path
from comfyui import ComfyUI
OUTPUT_DIR = "/tmp/outputs"
INPUT_DIR = "/tmp/inputs"
COMFYUI_TEMP_OUTPUT_DIR = "ComfyUI/temp"
ALL_DIRECTORIES = [OUTPUT_DIR, INPUT_DIR, COMFYUI_TEMP_OUTPUT_DIR]
mimetypes.add_type("image/webp", ".webp")
api_json_file = "workflow_api.json"
# Force HF offline
os.environ["HF_DATASETS_OFFLINE"] = "1"
os.environ["TRANSFORMERS_OFFLINE"] = "1"
os.environ["HF_HUB_DISABLE_TELEMETRY"] = "1"
class Predictor(BasePredictor):
def setup(self):
self.comfyUI = ComfyUI("127.0.0.1:8188")
self.comfyUI.start_server(OUTPUT_DIR, INPUT_DIR)
self.comfyUI.handle_weights(
{},
weights_to_download=[
"buffalo_l",
"appearance_feature_extractor.safetensors",
"landmark.onnx",
"motion_extractor.safetensors",
"spade_generator.safetensors",
"stitching_retargeting_module.safetensors",
"warping_module.safetensors",
],
)
def filename_with_extension(self, input_file, prefix):
extension = os.path.splitext(input_file.name)[1]
return f"{prefix}{extension}"
def handle_input_file(
self,
input_file: Path,
filename: str = "image.png",
):
shutil.copy(input_file, os.path.join(INPUT_DIR, filename))
def update_workflow(self, workflow, **kwargs):
load_video = workflow["8"]["inputs"]
load_video["video"] = kwargs["driving_filename"]
load_video["frame_load_cap"] = kwargs["frame_load_cap"]
load_video["select_every_n_frames"] = kwargs["select_every_n_frames"]
load_image = workflow["4"]["inputs"]
load_image["image"] = kwargs["face_filename"]
live_portrait = workflow["30"]["inputs"]
live_portrait["dsize"] = kwargs["dsize"]
live_portrait["scale"] = kwargs["scale"]
live_portrait["vx_ratio"] = kwargs["vx_ratio"]
live_portrait["vy_ratio"] = kwargs["vy_ratio"]
live_portrait["lip_zero"] = kwargs["lip_zero"]
live_portrait["eye_retargeting"] = kwargs["eye_retargeting"]
live_portrait["eyes_retargeting_multiplier"] = kwargs[
"eyes_retargeting_multiplier"
]
live_portrait["lip_retargeting"] = kwargs["lip_retargeting"]
live_portrait["lip_retargeting_multiplier"] = kwargs[
"lip_retargeting_multiplier"
]
live_portrait["stitching"] = kwargs["stitching"]
live_portrait["relative"] = kwargs["relative"]
def predict(
self,
face_image: Path = Input(
description="An image with a face",
),
driving_video: Path = Input(
description="A video to drive the animation",
),
video_frame_load_cap: int = Input(
description="The maximum number of frames to load from the driving video. Set to 0 to use all frames.",
default=128,
),
video_select_every_n_frames: int = Input(
description="Select every nth frame from the driving video. Set to 1 to use all frames.",
default=1,
),
live_portrait_dsize: int = Input(
description="Size of the output image", default=512, ge=64, le=2048
),
live_portrait_scale: float = Input(
description="Scaling factor for the face", default=2.3, ge=1.0, le=4.0
),
live_portrait_vx_ratio: float = Input(
description="Horizontal shift ratio", default=0, ge=-1.0, le=1.0
),
live_portrait_vy_ratio: float = Input(
description="Vertical shift ratio", default=-0.12, ge=-1.0, le=1.0
),
live_portrait_lip_zero: bool = Input(
description="Enable lip zero", default=True
),
live_portrait_eye_retargeting: bool = Input(
description="Enable eye retargeting", default=False
),
live_portrait_eyes_retargeting_multiplier: float = Input(
description="Multiplier for eye retargeting", default=1.0, ge=0.01, le=10.0
),
live_portrait_lip_retargeting: bool = Input(
description="Enable lip retargeting", default=False
),
live_portrait_lip_retargeting_multiplier: float = Input(
description="Multiplier for lip retargeting", default=1.0, ge=0.01, le=10.0
),
live_portrait_stitching: bool = Input(
description="Enable stitching", default=True
),
live_portrait_relative: bool = Input(
description="Use relative positioning", default=True
),
) -> List[Path]:
"""Run a single prediction on the model"""
self.comfyUI.cleanup(ALL_DIRECTORIES)
face_filename = self.filename_with_extension(face_image, "face")
self.handle_input_file(face_image, face_filename)
driving_filename = self.filename_with_extension(driving_video, "driving")
self.handle_input_file(driving_video, driving_filename)
with open(api_json_file, "r") as file:
workflow = json.loads(file.read())
self.update_workflow(
workflow,
face_filename=face_filename,
driving_filename=driving_filename,
frame_load_cap=video_frame_load_cap,
select_every_n_frames=video_select_every_n_frames,
dsize=live_portrait_dsize,
scale=live_portrait_scale,
vx_ratio=live_portrait_vx_ratio,
vy_ratio=live_portrait_vy_ratio,
lip_zero=live_portrait_lip_zero,
eye_retargeting=live_portrait_eye_retargeting,
eyes_retargeting_multiplier=live_portrait_eyes_retargeting_multiplier,
lip_retargeting=live_portrait_lip_retargeting,
lip_retargeting_multiplier=live_portrait_lip_retargeting_multiplier,
stitching=live_portrait_stitching,
relative=live_portrait_relative,
)
self.comfyUI.connect()
self.comfyUI.run_workflow(workflow)
return self.comfyUI.get_files(OUTPUT_DIR, file_extensions=["mp4"])