Limbicnation commited on
Commit
9c0baf5
·
verified ·
1 Parent(s): ffe2208

Fix: diffusers from git main for Flux2KleinPipeline, low_cpu_mem_usage=False, no cpu_offload

Browse files
Files changed (1) hide show
  1. sprite_lora_resume_v6.py +5 -9
sprite_lora_resume_v6.py CHANGED
@@ -2,7 +2,7 @@
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
  # "torch",
5
- # "diffusers>=0.32.0",
6
  # "transformers",
7
  # "accelerate",
8
  # "peft",
@@ -211,10 +211,7 @@ def collate_fn(batch):
211
 
212
  def train(token):
213
  """Main training loop."""
214
- try:
215
- from diffusers import Flux2KleinPipeline as KleinPipeline
216
- except ImportError:
217
- from diffusers import Flux2Pipeline as KleinPipeline
218
  from peft import LoraConfig, get_peft_model
219
  from accelerate import Accelerator
220
 
@@ -234,18 +231,17 @@ def train(token):
234
 
235
  # --- Load model ---
236
  accelerator.print(f"Loading {BASE_MODEL}...")
237
- pipe = KleinPipeline.from_pretrained(
238
  BASE_MODEL,
239
  torch_dtype=torch.bfloat16,
240
  low_cpu_mem_usage=False, # Materialize all weights (prevents meta tensor crash)
241
  )
242
  # Do NOT call pipe.enable_model_cpu_offload() — accelerator manages device placement.
243
- # Move frozen components (VAE, text encoders) to GPU manually.
 
244
  pipe.vae = pipe.vae.to(device)
245
  if hasattr(pipe, 'text_encoder') and pipe.text_encoder is not None:
246
  pipe.text_encoder = pipe.text_encoder.to(device)
247
- if hasattr(pipe, 'text_encoder_2') and pipe.text_encoder_2 is not None:
248
- pipe.text_encoder_2 = pipe.text_encoder_2.to(device)
249
 
250
  # --- Apply LoRA ---
251
  lora_config = LoraConfig(
 
2
  # requires-python = ">=3.10"
3
  # dependencies = [
4
  # "torch",
5
+ # "diffusers @ git+https://github.com/huggingface/diffusers.git",
6
  # "transformers",
7
  # "accelerate",
8
  # "peft",
 
211
 
212
  def train(token):
213
  """Main training loop."""
214
+ from diffusers import Flux2KleinPipeline
 
 
 
215
  from peft import LoraConfig, get_peft_model
216
  from accelerate import Accelerator
217
 
 
231
 
232
  # --- Load model ---
233
  accelerator.print(f"Loading {BASE_MODEL}...")
234
+ pipe = Flux2KleinPipeline.from_pretrained(
235
  BASE_MODEL,
236
  torch_dtype=torch.bfloat16,
237
  low_cpu_mem_usage=False, # Materialize all weights (prevents meta tensor crash)
238
  )
239
  # Do NOT call pipe.enable_model_cpu_offload() — accelerator manages device placement.
240
+ # Move frozen components to GPU manually.
241
+ # FLUX.2-klein uses Qwen3 text encoder + AutoencoderKLFlux2
242
  pipe.vae = pipe.vae.to(device)
243
  if hasattr(pipe, 'text_encoder') and pipe.text_encoder is not None:
244
  pipe.text_encoder = pipe.text_encoder.to(device)
 
 
245
 
246
  # --- Apply LoRA ---
247
  lora_config = LoraConfig(