From 06210c12e19ef7cb442977e3fc47d5cbc9787a7d Mon Sep 17 00:00:00 2001 From: Ren Pang Date: Mon, 17 Mar 2025 14:17:01 -0400 Subject: [PATCH 1/2] Update shieldgemma2.md --- docs/source/en/model_doc/shieldgemma2.md | 22 ++++++++++++---------- 1 file changed, 12 insertions(+), 10 deletions(-) diff --git a/docs/source/en/model_doc/shieldgemma2.md b/docs/source/en/model_doc/shieldgemma2.md index cd253ed50a3e..71e417222ff8 100644 --- a/docs/source/en/model_doc/shieldgemma2.md +++ b/docs/source/en/model_doc/shieldgemma2.md @@ -35,37 +35,39 @@ This model was contributed by [Ryan Mullins](https://huggingface.co/RyanMullins) - You can extend ShieldGemma's built-in in policies with the `custom_policies` argument to the Processor. Using the same key as one of the built-in policies will overwrite that policy with your custom defintion. - ShieldGemma 2 does not support the image cropping capabilities used by Gemma 3. -### Classificaiton against Built-in Policies +### Classification against Built-in Policies ```python +from PIL import Image +import requests from transformers import AutoProcessor, ShieldGemma2ForImageClassification model_id = "google/shieldgemma-2-4b-it" model = ShieldGemma2ForImageClassification.from_pretrained(model_id, device_map="auto") processor = AutoProcessor.from_pretrained(model_id) -image_1 = # An image you have loaded -image_2 = # An image you have loaded -image_3 = # An image you have loaded +url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" +image = Image.open(requests.get(url, stream=True).raw) -inputs = processor(images=[image_1, image_2, image_3]).to(model.device) +inputs = processor(images=[image], return_tensors="pt").to(model.device) output = model(**inputs) print(output.probabilities) ``` -### Classificaiton against Custom Policies +### Classification against Custom Policies ```python +from PIL import Image +import requests from transformers import AutoProcessor, ShieldGemma2ForImageClassification model_id = "google/shieldgemma-2-4b-it" model = ShieldGemma2ForImageClassification.from_pretrained(model_id, device_map="auto") processor = AutoProcessor.from_pretrained(model_id) -image_1 = # An image you have loaded -image_2 = # An image you have loaded -image_3 = # An image you have loaded +url = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/bee.jpg" +image = Image.open(requests.get(url, stream=True).raw) custom_policies = { "key_a": "descrition_a", @@ -73,7 +75,7 @@ custom_policies = { } inputs = processor( - images=[image_1, image_2, image_3], + images=[image], custom_policies=custom_policies, policies=["dangerous", "key_a", "key_b"], ).to(model.device) From ed6c1d1cb3ff432798bcec6e5808280b7e76038f Mon Sep 17 00:00:00 2001 From: Ren Pang Date: Mon, 17 Mar 2025 14:25:08 -0400 Subject: [PATCH 2/2] Update shieldgemma2.md --- docs/source/en/model_doc/shieldgemma2.md | 1 + 1 file changed, 1 insertion(+) diff --git a/docs/source/en/model_doc/shieldgemma2.md b/docs/source/en/model_doc/shieldgemma2.md index 71e417222ff8..a45b3a095534 100644 --- a/docs/source/en/model_doc/shieldgemma2.md +++ b/docs/source/en/model_doc/shieldgemma2.md @@ -78,6 +78,7 @@ inputs = processor( images=[image], custom_policies=custom_policies, policies=["dangerous", "key_a", "key_b"], + return_tensors="pt", ).to(model.device) output = model(**inputs)