Skip to content

Commit a6e2f3b

Browse files
committed
Merge branch 'add_safety_checker' of https://github.com/patrickvonplaten/stable-diffusion into patrickvonplaten-add_safety_checker
2 parents 7b8c883 + b985178 commit a6e2f3b

File tree

1 file changed

+28
-2
lines changed

1 file changed

+28
-2
lines changed

scripts/txt2img.py

+28-2
Original file line numberDiff line numberDiff line change
@@ -16,12 +16,31 @@
1616
from ldm.models.diffusion.ddim import DDIMSampler
1717
from ldm.models.diffusion.plms import PLMSSampler
1818

19+
from diffusers.pipelines.stable_diffusion.safety_checker import StableDiffusionSafetyChecker
20+
from transformers import AutoFeatureExtractor
21+
22+
# load safety model
23+
safety_model_id = "CompVis/stable-diffusion-v-1-3"
24+
safety_feature_extractor = AutoFeatureExtractor.from_pretrained(safety_model_id, use_auth_token=True)
25+
safety_checker = StableDiffusionSafetyChecker.from_pretrained(safety_model_id, use_auth_token=True)
1926

2027
def chunk(it, size):
2128
it = iter(it)
2229
return iter(lambda: tuple(islice(it, size)), ())
2330

2431

32+
def numpy_to_pil(images):
33+
"""
34+
Convert a numpy image or a batch of images to a PIL image.
35+
"""
36+
if images.ndim == 3:
37+
images = images[None, ...]
38+
images = (images * 255).round().astype("uint8")
39+
pil_images = [Image.fromarray(image) for image in images]
40+
41+
return pil_images
42+
43+
2544
def load_model_from_config(config, ckpt, verbose=False):
2645
print(f"Loading model from {ckpt}")
2746
pl_sd = torch.load(ckpt, map_location="cpu")
@@ -247,16 +266,23 @@ def main():
247266

248267
x_samples_ddim = model.decode_first_stage(samples_ddim)
249268
x_samples_ddim = torch.clamp((x_samples_ddim + 1.0) / 2.0, min=0.0, max=1.0)
269+
x_samples_ddim = x_samples_ddim.cpu().permute(0, 2, 3, 1).numpy()
270+
271+
x_image = x_samples_ddim
272+
safety_checker_input = safety_feature_extractor(numpy_to_pil(x_image), return_tensors="pt")
273+
x_checked_image, has_nsfw_concept = safety_checker(images=x_image, clip_input=safety_checker_input.pixel_values)
274+
275+
x_checked_image_torch = torch.from_numpy(x_checked_image).permute(0, 3, 2, 1)
250276

251277
if not opt.skip_save:
252-
for x_sample in x_samples_ddim:
278+
for x_sample in x_checked_image_torch:
253279
x_sample = 255. * rearrange(x_sample.cpu().numpy(), 'c h w -> h w c')
254280
Image.fromarray(x_sample.astype(np.uint8)).save(
255281
os.path.join(sample_path, f"{base_count:05}.png"))
256282
base_count += 1
257283

258284
if not opt.skip_grid:
259-
all_samples.append(x_samples_ddim)
285+
all_samples.append(x_checked_image_torch)
260286

261287
if not opt.skip_grid:
262288
# additionally, save as grid

0 commit comments

Comments
 (0)