Whitepaper
Docs
Sign In
Tool
Tool
v0.3.0
FLUX.1-schnell
Tool ID
flux1_schnell
Creator
@sweetbrulee
Downloads
370+
Image generation with FLUX.1-schnell based on Hugging Face's ZeroGPU API.
Get
README
No README available
Tool Code
Show
""" title: FLUX.1-schnell author: sweetbrulee description: Image generation with FLUX.1-schnell based on Hugging Face's ZeroGPU API. requirements: gradio_client version: 0.3.0 licence: MIT """ # 0.1.0 -> 0.2.0 migration tips: # - Reconfigure hf_token in the Valves panel. import asyncio import shutil import traceback from pathlib import Path from gradio_client import Client from pydantic import BaseModel, Field from open_webui.routers.images import IMAGE_CACHE_DIR from open_webui.main import WEBUI_URL class Tools: class Valves(BaseModel): hf_token: str = Field( default="", description="Hugging Face Access Token", ) def __init__(self): self.valves = self.Valves() async def generate_images( self, prompt_in_english: str, seed: float, randomize_seed: bool, width: float, height: float, num_inference_steps: float, __event_emitter__=None, ): """ Generate an image by providing arguments. :param prompt_in_english: prompt to use for image generation in English. :param seed: seed for random number generation. 0 if randomize_seed is True. :param randomize_seed: whether to randomize the seed. Default is True. :param width: width of the generated image. Default is 1280. :param height: height of the generated image. Default is 720. :param num_inference_steps: number of inference steps for the image generation. Default is 4. """ try: # return all arguments with \n separator # return "\n".join([f"{k}: {v}" for k, v in locals().items()]) await __event_emitter__( { "type": "status", "data": { "description": "Generating an image", "done": False, }, } ) image_source = await predict_async( prompt_in_english, seed, randomize_seed, width, height, num_inference_steps, hf_token=self.valves.hf_token, ) # copy the image folder tool_dir = IMAGE_CACHE_DIR.joinpath("./tools_flux1_schnell/") tool_dir.mkdir(parents=True, exist_ok=True) image_destination = ( tool_dir / Path(image_source).parent.name / Path(image_source).name ) print("image_destination:", image_destination) image_destination.parent.mkdir(parents=True, exist_ok=True) shutil.copy2(image_source, image_destination) await __event_emitter__( { "type": "status", "data": { "description": "Generated an image", "done": True, }, } ) await __event_emitter__( { "type": "message", "data": { "content": f"})" }, # the file route need to be relative to /app/backend/data, so that it can be accessed by the frontend (which will be appended to the fronend base url [WEBUI_URL]) } ) return "Notify the user that the task has been already done." except Exception as e: await __event_emitter__( { "type": "status", "data": {"description": f"An error occured: {e}", "done": True}, } ) return ( f"Tell the user: \n```unhandled error\n{traceback.format_exc()}\n```\n" ) def predict_sync( prompt_in_english, seed, randomize_seed, width, height, num_inference_steps, hf_token, ): client = Client("black-forest-labs/FLUX.1-schnell", hf_token=hf_token) result = client.predict( prompt_in_english, seed, randomize_seed, width, height, num_inference_steps, api_name="/infer", ) return result[0] async def predict_async( prompt_in_english, seed, randomize_seed, width, height, num_inference_steps, *, hf_token, ): loop = asyncio.get_running_loop() image_source = await loop.run_in_executor( None, predict_sync, prompt_in_english, seed, randomize_seed, width, height, num_inference_steps, hf_token, ) return image_source