Example code

import torch
from diffusers import AutoPipelineForText2Image

# PEFT
from peft import PeftModel, PeftConfig

# 
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")

# 
print("In progress...")
pipe = AutoPipelineForText2Image.from_pretrained(
    "black-forest-labs/FLUX.1-dev", 
    torch_dtype=torch.float16  # bfloat16
)
pipe.to(device)

# 
print("Loading lora ๋กœ๋“œ ์ค‘...")
pipe.load_lora_weights(
    'Heartsync/Flux-NSFW-uncensored', 
    weight_name='lora.safetensors',
    adapter_name="uncensored"
)

# ์ด๋ฏธ์ง€ ์ƒ์„ฑ
prompt = "A woman in a sheer white dress standing on a beach at sunset"

seed = 42
generator = torch.Generator(device=device).manual_seed(seed)

image = pipe(
    prompt=prompt,
    negative_prompt=negative_prompt,
    guidance_scale=7.0,
    num_inference_steps=28,
    width=1024,
    height=1024,
    generator=generator,
).images[0]

image.save("generated_image.png")
Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for Pushpendrasinghparmar/ns

Finetuned
(535)
this model