Spaces:
Build error
Build error
mayuema commited on
Commit ·
d1820b1
1
Parent(s): 4748ebe
updata
Browse files
__pycache__/inference_followyourpose.cpython-38.pyc
CHANGED
|
Binary files a/__pycache__/inference_followyourpose.cpython-38.pyc and b/__pycache__/inference_followyourpose.cpython-38.pyc differ
|
|
|
__pycache__/inference_mmpose.cpython-38.pyc
CHANGED
|
Binary files a/__pycache__/inference_mmpose.cpython-38.pyc and b/__pycache__/inference_mmpose.cpython-38.pyc differ
|
|
|
app.py
CHANGED
|
@@ -69,7 +69,9 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 69 |
|
| 70 |
|
| 71 |
gr.HTML("""
|
| 72 |
-
<p>
|
|
|
|
|
|
|
| 73 |
</p>""")
|
| 74 |
|
| 75 |
with gr.Row():
|
|
@@ -77,6 +79,12 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 77 |
with gr.Accordion('Input Video', open=True):
|
| 78 |
# user_input_video = gr.File(label='Input Source Video')
|
| 79 |
user_input_video = gr.Video(label='Input Source Video', source='upload', type='numpy', format="mp4", visible=True).style(height="auto")
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 80 |
with gr.Accordion('Temporal Crop offset and Sampling Stride', open=False):
|
| 81 |
n_sample_frame = gr.Slider(label='Number of Frames',
|
| 82 |
minimum=0,
|
|
@@ -88,9 +96,6 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 88 |
maximum=20,
|
| 89 |
step=1,
|
| 90 |
value=1)
|
| 91 |
-
start_sample_frame = gr.Number(label='Start frame in the video',
|
| 92 |
-
value=0,
|
| 93 |
-
precision=0)
|
| 94 |
|
| 95 |
with gr.Accordion('Spatial Crop offset', open=False):
|
| 96 |
left_crop = gr.Number(label='Left crop',
|
|
@@ -113,19 +118,10 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 113 |
]
|
| 114 |
|
| 115 |
ImageSequenceDataset_list = [
|
| 116 |
-
start_sample_frame,
|
| 117 |
n_sample_frame,
|
| 118 |
stride
|
| 119 |
] + offset_list
|
| 120 |
|
| 121 |
-
# model_id = gr.Dropdown(
|
| 122 |
-
# label='Model ID',
|
| 123 |
-
# choices=[
|
| 124 |
-
# 'CompVis/stable-diffusion-v1-4',
|
| 125 |
-
# # add shape editing ckpt here
|
| 126 |
-
# ],
|
| 127 |
-
# value='CompVis/stable-diffusion-v1-4')
|
| 128 |
-
|
| 129 |
|
| 130 |
with gr.Accordion('Text Prompt', open=True):
|
| 131 |
|
|
@@ -155,16 +151,16 @@ with gr.Blocks(css='style.css') as demo:
|
|
| 155 |
minimum=0,
|
| 156 |
maximum=50,
|
| 157 |
step=0.1,
|
| 158 |
-
value=12.
|
| 159 |
with gr.Row():
|
| 160 |
from example import style_example
|
| 161 |
examples = style_example
|
| 162 |
-
|
| 163 |
inputs = [
|
| 164 |
user_input_video,
|
| 165 |
target_prompt,
|
| 166 |
num_steps,
|
| 167 |
guidance_scale,
|
|
|
|
| 168 |
*ImageSequenceDataset_list
|
| 169 |
]
|
| 170 |
target_prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
|
|
|
|
| 69 |
|
| 70 |
|
| 71 |
gr.HTML("""
|
| 72 |
+
<p>In order to run the demo successfully, we recommend the length of video is about <b>3~5 seconds</b>.
|
| 73 |
+
The temporal crop offset and sampling stride are used to adjust the starting point and interval of video samples.
|
| 74 |
+
Alternatively, try our GitHub <a href=https://github.com/mayuelala/FollowYourPose> code </a> on your GPU.
|
| 75 |
</p>""")
|
| 76 |
|
| 77 |
with gr.Row():
|
|
|
|
| 79 |
with gr.Accordion('Input Video', open=True):
|
| 80 |
# user_input_video = gr.File(label='Input Source Video')
|
| 81 |
user_input_video = gr.Video(label='Input Source Video', source='upload', type='numpy', format="mp4", visible=True).style(height="auto")
|
| 82 |
+
video_type = gr.Dropdown(
|
| 83 |
+
label='The type of input video',
|
| 84 |
+
choices=[
|
| 85 |
+
"Raw Video",
|
| 86 |
+
"Skeleton Video"
|
| 87 |
+
], value="Raw Video")
|
| 88 |
with gr.Accordion('Temporal Crop offset and Sampling Stride', open=False):
|
| 89 |
n_sample_frame = gr.Slider(label='Number of Frames',
|
| 90 |
minimum=0,
|
|
|
|
| 96 |
maximum=20,
|
| 97 |
step=1,
|
| 98 |
value=1)
|
|
|
|
|
|
|
|
|
|
| 99 |
|
| 100 |
with gr.Accordion('Spatial Crop offset', open=False):
|
| 101 |
left_crop = gr.Number(label='Left crop',
|
|
|
|
| 118 |
]
|
| 119 |
|
| 120 |
ImageSequenceDataset_list = [
|
|
|
|
| 121 |
n_sample_frame,
|
| 122 |
stride
|
| 123 |
] + offset_list
|
| 124 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 125 |
|
| 126 |
with gr.Accordion('Text Prompt', open=True):
|
| 127 |
|
|
|
|
| 151 |
minimum=0,
|
| 152 |
maximum=50,
|
| 153 |
step=0.1,
|
| 154 |
+
value=12.0)
|
| 155 |
with gr.Row():
|
| 156 |
from example import style_example
|
| 157 |
examples = style_example
|
|
|
|
| 158 |
inputs = [
|
| 159 |
user_input_video,
|
| 160 |
target_prompt,
|
| 161 |
num_steps,
|
| 162 |
guidance_scale,
|
| 163 |
+
video_type,
|
| 164 |
*ImageSequenceDataset_list
|
| 165 |
]
|
| 166 |
target_prompt.submit(fn=pipe.run, inputs=inputs, outputs=result)
|
inference_followyourpose.py
CHANGED
|
@@ -35,6 +35,7 @@ class merge_config_then_run():
|
|
| 35 |
target_prompt,
|
| 36 |
num_steps,
|
| 37 |
guidance_scale,
|
|
|
|
| 38 |
user_input_video=None,
|
| 39 |
start_sample_frame=0,
|
| 40 |
n_sample_frame=8,
|
|
@@ -44,7 +45,8 @@ class merge_config_then_run():
|
|
| 44 |
top_crop=0,
|
| 45 |
bottom_crop=0,
|
| 46 |
):
|
| 47 |
-
|
|
|
|
| 48 |
default_edit_config='./FollowYourPose/configs/pose_sample.yaml'
|
| 49 |
Omegadict_default_edit_config = OmegaConf.load(default_edit_config)
|
| 50 |
|
|
@@ -75,7 +77,11 @@ class merge_config_then_run():
|
|
| 75 |
# ddim config
|
| 76 |
config_now['validation_data']['guidance_scale'] = guidance_scale
|
| 77 |
config_now['validation_data']['num_inference_steps'] = num_steps
|
| 78 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 79 |
|
| 80 |
save_path = test(**config_now)
|
| 81 |
mp4_path = save_path.replace('_0.gif', '_0_0_0.mp4')
|
|
|
|
| 35 |
target_prompt,
|
| 36 |
num_steps,
|
| 37 |
guidance_scale,
|
| 38 |
+
video_type,
|
| 39 |
user_input_video=None,
|
| 40 |
start_sample_frame=0,
|
| 41 |
n_sample_frame=8,
|
|
|
|
| 45 |
top_crop=0,
|
| 46 |
bottom_crop=0,
|
| 47 |
):
|
| 48 |
+
if video_type == "Raw Video":
|
| 49 |
+
infer_skeleton(self.mmpose, data_path)
|
| 50 |
default_edit_config='./FollowYourPose/configs/pose_sample.yaml'
|
| 51 |
Omegadict_default_edit_config = OmegaConf.load(default_edit_config)
|
| 52 |
|
|
|
|
| 77 |
# ddim config
|
| 78 |
config_now['validation_data']['guidance_scale'] = guidance_scale
|
| 79 |
config_now['validation_data']['num_inference_steps'] = num_steps
|
| 80 |
+
|
| 81 |
+
if video_type == "Raw Video":
|
| 82 |
+
config_now['skeleton_path'] = './mmpose_result.mp4'
|
| 83 |
+
else:
|
| 84 |
+
config_now['skeleton_path'] = data_path
|
| 85 |
|
| 86 |
save_path = test(**config_now)
|
| 87 |
mp4_path = save_path.replace('_0.gif', '_0_0_0.mp4')
|