added files

This commit is contained in:
Julian Kiedaisch 2025-04-03 19:38:12 +00:00
parent 1049ba4f07
commit f99a88d570
8 changed files with 409 additions and 0 deletions

5
requirements.txt Normal file
View File

@ -0,0 +1,5 @@
websocket-client
requests
pydantic
gradio
ollama

BIN
text2image/images/test.png Normal file

Binary file not shown.

After

Width:  |  Height:  |  Size: 816 KiB

34
text2image/main.py Normal file
View File

@ -0,0 +1,34 @@
from text2image import GenerateImage, Nodes, ComfyUIGenerateImageForm, load_workflow
import random
if __name__ == "__main__":
base_url = "http://172.22.0.29:8188"
client_id = "test_client"
image_file_name = "test.png"
image_file_folder = "CodeNight/text2image/images"
workflow_json = load_workflow("CodeNight/text2image/malerisch.json")
nodes = Nodes(
prompt=6,
negative_prompt=7,
width=5,
height=5,
seed=3,
steps=3
)
payload = ComfyUIGenerateImageForm(
workflow=workflow_json,
nodes=nodes,
# prompt="fashion photography of 23 y.o blonde woman, beautiful makeup, wearing dot dress, 35mm",
# prompt="b/w fashion photography of 23 y.o black man, instgram, 35mm",
# prompt="Cyberpunk android with glowing eyes, beautiful face, urban background, neon accents, high-tech, realistic style.",
# prompt="Cyberpunk rooftop scene, overlooking city, neon lights, futuristic skyline, atmospheric style.",
prompt="A goat with sun glasses on a skateboard. realistic. skatepark, sunny day. 35mm",
width=768,
height=768,
negative_prompt=" (octane render, render, drawing, anime, bad photo, bad photography:1.3), (worst quality, low quality, blurry:1.2), (bad teeth, deformed teeth, deformed lips), (bad anatomy, bad proportions:1.1), (deformed iris, deformed pupils), (deformed eyes, bad eyes), (deformed face, ugly face, bad face), (deformed hands, bad hands, fused fingers), morbid, mutilated, mutation, disfigured",
seed=random.randint(0, 1125899906842624),
steps=30
)
GenerateImage(payload, client_id, base_url, image_file_folder, image_file_name)

53
text2image/main_gradio.py Normal file
View File

@ -0,0 +1,53 @@
from text2image import GenerateImage, Nodes, ComfyUIGenerateImageForm, load_workflow
import random, os, gradio
def generate(prompt: str):
base_url = "http://172.22.0.29:8188"
client_id = "test_client"
image_file_name = "test.png"
image_file_folder = "CodeNight/text2image/images"
workflow_json = load_workflow("CodeNight/text2image/malerisch.json")
nodes = Nodes(
prompt=6,
negative_prompt=7,
width=5,
height=5,
seed=3,
steps=3
)
payload = ComfyUIGenerateImageForm(
workflow=workflow_json,
nodes=nodes,
# prompt="fashion photography of 23 y.o blonde woman, beautiful makeup, wearing dot dress, 35mm",
# prompt="b/w fashion photography of 23 y.o black man, instgram, 35mm",
# prompt="Cyberpunk android with glowing eyes, beautiful face, urban background, neon accents, high-tech, realistic style.",
# prompt="Cyberpunk rooftop scene, overlooking city, neon lights, futuristic skyline, atmospheric style.",
# prompt="A goat with sun glasses on a skateboard. realistic. skatepark, sunny day. 35mm",
prompt=prompt,
width=768,
height=768,
negative_prompt=" (octane render, render, drawing, anime, bad photo, bad photography:1.3), (worst quality, low quality, blurry:1.2), (bad teeth, deformed teeth, deformed lips), (bad anatomy, bad proportions:1.1), (deformed iris, deformed pupils), (deformed eyes, bad eyes), (deformed face, ugly face, bad face), (deformed hands, bad hands, fused fingers), morbid, mutilated, mutation, disfigured",
seed=random.randint(0, 1125899906842624),
steps=30
)
GenerateImage(payload, client_id, base_url, image_file_folder, image_file_name)
return os.path.join(image_file_folder, image_file_name)
with gradio.Blocks() as demo:
gradio.Markdown("# Meine erste Webanwendung")
with gradio.Row(equal_height=True):
textbox = gradio.Textbox(lines=1, show_label=False, placeholder="Gib hier deinen Prompt ein")
button = gradio.Button("Generate", variant="primary")
image = gradio.Image(height=768)
button.click(
generate,
inputs=textbox,
outputs=image
)
if __name__ == "__main__":
demo.launch(share=True)

107
text2image/malerisch.json Normal file
View File

@ -0,0 +1,107 @@
{
"3": {
"inputs": {
"seed": 766408214160696,
"steps": 30,
"cfg": 2,
"sampler_name": "dpmpp_2m_sde_gpu",
"scheduler": "karras",
"denoise": 1,
"model": [
"4",
0
],
"positive": [
"6",
0
],
"negative": [
"7",
0
],
"latent_image": [
"5",
0
]
},
"class_type": "KSampler",
"_meta": {
"title": "KSampler"
}
},
"4": {
"inputs": {
"ckpt_name": "dreamshaper/realvis5xl.safetensors"
},
"class_type": "CheckpointLoaderSimple",
"_meta": {
"title": "Load Checkpoint"
}
},
"5": {
"inputs": {
"width": 512,
"height": 512,
"batch_size": 1
},
"class_type": "EmptyLatentImage",
"_meta": {
"title": "Empty Latent Image"
}
},
"6": {
"inputs": {
"text": "In the vast, untouched wilderness of Kanadensische Alpen, der Sonnenaufgang über einen Schneelandschaft mit Bergrücken und tiefen Schluchten warf einen goldenen Lichtschein, während die unberührte Natur in Stille und Frieden schlug.",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"7": {
"inputs": {
"text": "text, watermarks, pornography",
"clip": [
"4",
1
]
},
"class_type": "CLIPTextEncode",
"_meta": {
"title": "CLIP Text Encode (Prompt)"
}
},
"8": {
"inputs": {
"samples": [
"3",
0
],
"vae": [
"4",
2
]
},
"class_type": "VAEDecode",
"_meta": {
"title": "VAE Decode"
}
},
"9": {
"inputs": {
"filename_prefix": "ComfyUI",
"images": [
"8",
0
]
},
"class_type": "SaveImage",
"_meta": {
"title": "Save Image"
}
}
}

134
text2image/text2image.py Normal file
View File

@ -0,0 +1,134 @@
import asyncio
import json
import random
import requests
import websocket
import os
from pydantic import BaseModel
from typing import Optional
class Nodes(BaseModel):
prompt: int
width: int
height: int
negative_prompt: Optional[int] = None
seed: Optional[int] = None
steps: Optional[int] = None
class ComfyUIGenerateImageForm(BaseModel):
workflow: str
prompt: str
nodes: Nodes
negative_prompt: Optional[str] = None
width: int
height: int
steps: Optional[int] = None
seed: Optional[int] = None
class GenerateImage:
def __init__(self, payload, client_id, base_url, image_file_folder, image_file_name):
self.payload = payload
self.client_id = client_id
self.base_url = base_url
self.image_file_folder = image_file_folder
self.image_file_name = image_file_name
asyncio.run(self.__generate())
def save_image(self, image_url):
"""Lädt ein Bild von einer URL herunter und speichert es im angegebenen Ordner mit dem gewünschten Dateinamen."""
response = requests.get(image_url, stream=True)
if response.status_code == 200:
os.makedirs(self.image_file_folder, exist_ok=True) # Erstelle den Ordner, falls er nicht existiert
file_path = os.path.join(self.image_file_folder, self.image_file_name)
with open(file_path, "wb") as file:
for chunk in response.iter_content(1024):
file.write(chunk)
print(f"Bild gespeichert unter: {file_path}")
else:
print(f"Fehler beim Download des Bildes: {response.status_code}")
def get_image_url(self, filename, subfolder, img_type):
return f"{self.base_url}/view?filename={filename}&subfolder={subfolder}&type={img_type}"
def queue_prompt(self):
response = requests.post(
f"{self.base_url}/prompt",
json={"prompt": self.workflow, "client_id": self.client_id},
headers={"Authorization": f"Bearer "},
)
return response.json()
def get_history(self, prompt_id):
response = requests.get(
f"{self.base_url}/history/{prompt_id}",
headers={"Authorization": f"Bearer "},
)
return response.json()
def get_images(self, ws):
prompt_id = self.queue_prompt()["prompt_id"]
output_images = []
while True:
out = ws.recv()
if isinstance(out, str):
message = json.loads(out)
if message["type"] == "executing":
data = message["data"]
if data["node"] is None and data["prompt_id"] == prompt_id:
break # Execution is done
else:
continue # previews are binary data
history = self.get_history(prompt_id)[prompt_id]
for node_id, node_output in history["outputs"].items():
if "images" in node_output:
for image in node_output["images"]:
url = self.get_image_url(
image["filename"], image["subfolder"], image["type"]
)
output_images.append({"url": url})
return {"data": output_images}
async def comfyui_generate_image(self):
ws_url = self.base_url.replace("http://", "ws://").replace("https://", "wss://")
self.workflow = json.loads(self.payload.workflow)
self.workflow[f"{self.payload.nodes.prompt}"]["inputs"]["text"] = self.payload.prompt
self.workflow[f"{self.payload.nodes.width}"]["inputs"]["width"] = self.payload.width
self.workflow[f"{self.payload.nodes.height}"]["inputs"]["height"] = self.payload.height
if self.payload.seed and self.payload.nodes.seed:
self.workflow[f"{self.payload.nodes.seed}"]["inputs"]["seed"] = self.payload.seed
if self.payload.steps and self.payload.nodes.steps:
self.workflow[f"{self.payload.nodes.steps}"]["inputs"]["steps"] = self.payload.steps
if self.payload.negative_prompt and self.payload.nodes.negative_prompt:
self.workflow[f"{self.payload.nodes.negative_prompt}"]["inputs"]["text"] = self.payload.negative_prompt
try:
ws = websocket.WebSocket()
headers = {"Authorization": f"Bearer "}
ws.connect(f"{ws_url}/ws?clientId={self.client_id}", header=headers)
except Exception as e:
return None
try:
images = await asyncio.to_thread(self.get_images, ws)
except Exception as e:
images = None
finally:
ws.close()
return images
async def __generate(self):
images = await self.comfyui_generate_image()
if images and "data" in images and images["data"]:
image_url = images["data"][0]["url"]
self.save_image(image_url)
def load_workflow(file_path):
with open(file_path, "r", encoding="utf-8") as file:
return file.read()

22
text2text/main.py Normal file
View File

@ -0,0 +1,22 @@
from ollama import Client
model = 'llama3.2'
prompt = "Wieso ist der Himmel blau?"
top_k = 40
top_p = 0.9
temp=0.8
client = Client(host='http://172.22.0.29:11434')
response = client.generate(
model=model,
prompt= prompt,
stream= False,
options= {
'top_k': top_k,
'temperature':top_p,
'top_p': temp
}
)
print(response.response)

54
text2text/main_gradio.py Normal file
View File

@ -0,0 +1,54 @@
from ollama import Client
import json
import gradio as gr
model = 'llama3.2'
context = []
client = Client(
host='http://172.22.0.29:11434',
)
def generate(prompt, context, top_k, top_p, temp):
global client
response = client.generate(
model=model,
prompt= prompt,
context= context,
stream= False,
options= {
'top_k': top_k,
'temperature':top_p,
'top_p': temp
}
)
return response.response, response.context
def chat(input, chat_history, top_k, top_p, temp):
chat_history = chat_history or []
global context
output, context = generate(input, context, top_k, top_p, temp)
chat_history.append((input, output))
return chat_history, chat_history
#########################Gradio Code##########################
block = gr.Blocks()
with block:
gr.Markdown("""<h1><center> Mein Assistent </center></h1>""")
chatbot = gr.Chatbot()
message = gr.Textbox(placeholder="Type here")
state = gr.State()
with gr.Row():
top_k = gr.Slider(0.0,100.0, label="top_k", value=40, info="Reduces the probability of generating nonsense. A higher value (e.g. 100) will give more diverse answers, while a lower value (e.g. 10) will be more conservative. (Default: 40)")
top_p = gr.Slider(0.0,1.0, label="top_p", value=0.9, info=" Works together with top-k. A higher value (e.g., 0.95) will lead to more diverse text, while a lower value (e.g., 0.5) will generate more focused and conservative text. (Default: 0.9)")
temp = gr.Slider(0.0,2.0, label="temperature", value=0.8, info="The temperature of the model. Increasing the temperature will make the model answer more creatively. (Default: 0.8)")
submit = gr.Button("Send")
submit.click(chat, inputs=[message, state, top_k, top_p, temp], outputs=[chatbot, state])
block.launch(debug=True)