# -- coding: utf-8 -- # --- IMPORT LIBRARY UTAMA & INTI --- import gradio as gr import torch import numpy as np from diffusers import DiffusionPipeline import random import time import os from datetime import datetime, timedelta # <-- KESALAHAN SUDAH DIPERBAIKI DI SINI import csv import pandas as pd import threading from PIL import Image, ImageEnhance from pathlib import Path import uuid # Library untuk ID unik chat import urllib.parse import json # Library baru untuk menyimpan data share import string # Library baru untuk generate ID random # --- LIBRARY BARU UNTUK FITUR UPGRADE --- try: import psutil import platform from transformers import Swin2SRForImageSuperResolution, Swin2SRImageProcessor print("✅ Library tambahan (psutil, transformers) berhasil diimpor.") except ImportError: print("❌ Peringatan: Library 'psutil' atau 'transformers' tidak ditemukan. Fitur System Monitor & Upscaler tidak akan berfungsi.") psutil = None platform = None Swin2SRForImageSuperResolution = None Swin2SRImageProcessor = None try: import google.generativeai as genai print("✅ Library 'google-generativeai' berhasil diimpor.") except ImportError: print("❌ Peringatan: Library 'google-generativeai' tidak ditemukan. Fitur Chatbot & Prompt Enhancer tidak akan berfungsi.") genai = None # --- HEAD HTML & CSS (Tampilan Profesional dengan Upgrade untuk Chat) --- HEAD_HTML = """ RenXploit's Creative AI Suite """ # --- PENGATURAN & LOGIC GLOBAL --- VISITOR_LOG_FILE = "visitor_log.csv" HISTORY_LOG_FILE = "generation_log.csv" IMAGE_HISTORY_DIR = Path("generated_images") SHARE_DATA_DIR = Path("shared_data") file_lock = threading.Lock() device = "cuda" if torch.cuda.is_available() else "cpu" print(f"âžĄī¸ Menggunakan device: {device.upper()}") # --- Inisialisasi File Log & Direktori --- def initialize_environment(): if not os.path.exists(VISITOR_LOG_FILE): with file_lock: if not os.path.exists(VISITOR_LOG_FILE): with open(VISITOR_LOG_FILE, mode='w', newline='', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(["Timestamp", "IP Address", "User Agent"]) print(f"✅ File log '{VISITOR_LOG_FILE}' berhasil dibuat.") IMAGE_HISTORY_DIR.mkdir(exist_ok=True) SHARE_DATA_DIR.mkdir(exist_ok=True) if not os.path.exists(HISTORY_LOG_FILE): with file_lock: if not os.path.exists(HISTORY_LOG_FILE): with open(HISTORY_LOG_FILE, mode='w', newline='', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow(["Timestamp", "Filename", "Prompt", "NegativePrompt", "Seed", "Steps"]) print(f"✅ File log riwayat '{HISTORY_LOG_FILE}' dan direktori '{IMAGE_HISTORY_DIR}' siap.") initialize_environment() # --- PEMUATAN MODEL-MODEL AI --- print("âžĄī¸ Memuat model SDXL-Turbo...") pipe = DiffusionPipeline.from_pretrained( "stabilityai/sdxl-turbo", torch_dtype=torch.float16 if device == "cuda" else torch.float32, variant="fp16" if device == "cuda" else None, use_safetensors=True ).to(device) if torch.cuda.is_available(): pipe.enable_xformers_memory_efficient_attention() print("✅ Model SDXL-Turbo berhasil dimuat.") upscaler_model = None upscaler_processor = None if Swin2SRForImageSuperResolution: try: print("âžĄī¸ Memuat model AI Upscaler (Swin2SR)...") upscaler_model = Swin2SRForImageSuperResolution.from_pretrained("caidas/swin2sr-realworld-sr-x4-64-bsrgan-psnr").to(device) upscaler_processor = Swin2SRImageProcessor.from_pretrained("caidas/swin2sr-realworld-sr-x4-64-bsrgan-psnr") print("✅ Model AI Upscaler berhasil dimuat.") except Exception as e: print(f"❌ Gagal memuat model Upscaler: {e}. Fitur upscale akan dinaktifkan.") # --- KELAS UNTUK CHATBOT GEMINI --- class GeminiChat: def __init__(self): self.api_keys = [] self.is_configured = False if not genai: return i = 1 while True: key = os.getenv(f"GEMINI_API_KEY_{i}") if key: self.api_keys.append(key) i += 1 else: break if self.api_keys: print(f"✅ Berhasil memuat {len(self.api_keys)} API Key Gemini. Sistem rotasi aktif.") self.is_configured = True else: print("❌ PERINGATAN: Tidak ada API Key Gemini yang ditemukan. Fitur AI Chat & Prompt Enhancer tidak akan berfungsi.") def chat(self, message, history, system_prompt=None): if not self.is_configured: return "Maaf, fitur ini tidak terkonfigurasi karena tidak ada API Key." try: selected_key = random.choice(self.api_keys) genai.configure(api_key=selected_key) model = genai.GenerativeModel('gemini-2.5-flash') chat_history_for_api = [] for user_msg, model_msg in history: if user_msg: chat_history_for_api.append({"role": "user", "parts": [user_msg]}) if model_msg: chat_history_for_api.append({"role": "model", "parts": [model_msg]}) chat_session = model.start_chat(history=chat_history_for_api) full_prompt = message if system_prompt and not history: full_prompt = f"{system_prompt}\n\nUser query: {message}" response = chat_session.send_message(full_prompt) return response.text except Exception as e: print(f"❌ Terjadi error pada API Key Gemini: {e}") return "Terjadi kesalahan saat menghubungi API AI. Mungkin salah satu API Key tidak valid atau ada masalah jaringan. Silakan coba lagi." gemini_bot = GeminiChat() # --- FUNGSI-FUNGSI INTI (GENERATOR & LAINNYA) --- def log_visitor(request: gr.Request): timestamp = datetime.now().strftime("%Y-%m-%d %H:%M:%S") ip_address = request.client.host if request else "N/A" user_agent = request.headers.get("user-agent", "Unknown") if request else "N/A" with file_lock: with open(VISITOR_LOG_FILE, mode='a', newline='', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow([timestamp, ip_address, user_agent]) print(f"✅ Pengunjung baru tercatat: IP {ip_address}") def generate_images(prompt, negative_prompt, steps, seed, num_images): if not prompt: raise gr.Error("Prompt tidak boleh kosong!") if seed == -1: seed = random.randint(0, 2**32 - 1) generator = torch.manual_seed(seed) images = pipe(prompt=prompt, negative_prompt=negative_prompt, generator=generator, num_inference_steps=steps, guidance_scale=0.0, num_images_per_prompt=num_images).images return images, seed def genie_wrapper(prompt, negative_prompt, steps, seed, num_images): yield gr.update(visible=False), gr.update(visible=True, value="
AI sedang melukis mahakarya Anda...
"), gr.update(interactive=False), gr.update(visible=False) start_time = time.time() images, used_seed = generate_images(prompt, negative_prompt, int(steps), int(seed), int(num_images)) end_time = time.time() timestamp_str = datetime.now().strftime("%Y-%m-%d %H:%M:%S") for i, img in enumerate(images): filename = f"{int(time.time())}_{used_seed}_{i}.png" filepath = IMAGE_HISTORY_DIR / filename img.save(filepath) with file_lock: with open(HISTORY_LOG_FILE, mode='a', newline='', encoding='utf-8') as f: writer = csv.writer(f) writer.writerow([timestamp_str, filename, prompt, negative_prompt, used_seed, int(steps)]) generation_time = end_time - start_time info_text = f"Seed yang digunakan: {used_seed}\nTotal waktu generasi: {generation_time:.2f} detik" yield gr.update(value=images, visible=True), gr.update(visible=False), gr.update(interactive=True), gr.update(value=info_text, visible=True) def submit_report(name, email, message): if not name or not message: gr.Warning("Nama dan Pesan tidak boleh kosong!") return gr.update(visible=False) report_time = datetime.now().strftime("%Y-%m-%d %H:%M:%S") report_content = f"--- Laporan Baru ({report_time}) ---\nNama: {name}\nEmail: {email}\nPesan: {message}\n\n" with open("reports.log", "a", encoding="utf-8") as f: f.write(report_content) print("✅ Laporan baru telah disimpan ke reports.log") return gr.update(value="✅ Terima kasih! Laporan Anda telah kami terima.", visible=True) # --- FUNGSI VISITOR MONITOR --- def update_visitor_monitor(time_filter: str): try: with file_lock: if not os.path.exists(VISITOR_LOG_FILE) or os.path.getsize(VISITOR_LOG_FILE) == 0: return "## 📈 0", pd.DataFrame({"Timestamp": [], "Total Pengunjung": []}) column_names = ["Timestamp", "IP Address", "User Agent"] df = pd.read_csv(VISITOR_LOG_FILE, header=None, names=column_names) if not df.empty and df.iloc[0]['Timestamp'] == 'Timestamp': df = df.iloc[1:].reset_index(drop=True) if df.empty: return "## 📈 0", pd.DataFrame({"Timestamp": [], "Total Pengunjung": []}) df['Timestamp'] = pd.to_datetime(df['Timestamp'], errors='coerce') df.dropna(subset=['Timestamp'], inplace=True) if df.empty: return "## 📈 0", pd.DataFrame({"Timestamp": [], "Total Pengunjung": []}) total_overall_visitors = len(df) total_visitors_formatted = f"## 📈 {total_overall_visitors:,}" df['Total Pengunjung'] = np.arange(1, len(df) + 1) now = datetime.now() if time_filter == "1 Minggu Terakhir": df_plot = df[df['Timestamp'] >= now - timedelta(weeks=1)] elif time_filter == "2 Minggu Terakhir": df_plot = df[df['Timestamp'] >= now - timedelta(weeks=2)] elif time_filter == "3 Bulan Terakhir": df_plot = df[df['Timestamp'] >= now - timedelta(days=90)] else: df_plot = df if df_plot.empty: return total_visitors_formatted, pd.DataFrame({"Timestamp": [], "Total Pengunjung": []}) return total_visitors_formatted, df_plot except Exception as e: error_message = f"Error saat memperbarui monitor: {e}" print(f"❌ {error_message}") return f"## âš ī¸ Error: {e}", pd.DataFrame({"Error": [error_message]}) # --- FUNGSI-FUNGSI FITUR BARU --- def enhance_prompt(simple_prompt): if not simple_prompt: gr.Warning("Tolong masukkan ide Anda terlebih dahulu.") return "" if not gemini_bot.is_configured: gr.Error("Fitur Prompt Enhancer tidak aktif karena API Key Gemini tidak diatur.") return "Fitur tidak aktif." system_instruction = ( "Anda adalah seorang ahli prompt engineering untuk model AI text-to-image seperti Stable Diffusion. " "Tugas Anda adalah mengubah ide sederhana dari pengguna menjadi prompt yang kaya, deskriptif, dan artistik. " "Fokus pada detail visual: subjek, setting, pencahayaan, gaya seni, komposisi, dan kualitas. " "Hasilkan HANYA prompt-nya saja dalam format teks panjang, tanpa penjelasan atau kalimat pembuka/penutup." ) yield "🧠 AI sedang meracik prompt ajaib untuk Anda..." enhanced_prompt = gemini_bot.chat(simple_prompt, [], system_prompt=system_instruction) yield enhanced_prompt def upscale_image(image_to_upscale, clarity_strength): if image_to_upscale is None: raise gr.Error("Silakan unggah gambar terlebih dahulu.") if upscaler_model is None or upscaler_processor is None: raise gr.Error("Fitur Upscaler tidak aktif karena model gagal dimuat.") yield None, "🚀 Memproses peningkatan resolusi 4x oleh AI..." try: with torch.no_grad(): inputs = upscaler_processor(image_to_upscale, return_tensors="pt").to(device) outputs = upscaler_model(**inputs) output_image = outputs.reconstruction.data.squeeze().float().cpu().clamp_(0, 1).numpy() output_image = np.moveaxis(output_image, source=0, destination=-1) output_image = (output_image * 255.0).round().astype(np.uint8) final_image = Image.fromarray(output_image) if clarity_strength > 1.0: yield final_image, f"✨ Menerapkan peningkatan kejernihan (Strength: {clarity_strength:.2f})..." enhancer = ImageEnhance.Sharpness(final_image) final_image = enhancer.enhance(clarity_strength) yield final_image, f"✅ Gambar berhasil ditingkatkan! Resolusi akhir: {final_image.width}x{final_image.height}px." except Exception as e: print(f"❌ Error saat upscaling: {e}") yield None, f"âš ī¸ Terjadi error saat upscaling: {e}" def update_system_info(): if not psutil or not platform: return "Informasi sistem tidak tersedia (library psutil tidak ditemukan)." cpu_percent = psutil.cpu_percent(interval=None) ram = psutil.virtual_memory() gpu_info = "Tidak terdeteksi (PyTorch tidak menemukan CUDA)" if torch.cuda.is_available(): gpu_name = torch.cuda.get_device_name(0) gpu_mem_used_gb = torch.cuda.memory_allocated(0) / (1024**3) gpu_mem_total_gb = torch.cuda.get_device_properties(0).total_memory / (1024**3) gpu_info = f"GPU: {gpu_name}\nVRAM Terpakai: {gpu_mem_used_gb:.2f} GB / {gpu_mem_total_gb:.2f} GB" sys_info = f"**Platform:** `{platform.system()} {platform.release()}`" return (f"**CPU Terpakai:** `{cpu_percent:.1f}%`\n" f"**RAM Terpakai:** `{ram.percent:.1f}% ({ram.used / (1024**3):.2f} GB / {ram.total / (1024**3):.2f} GB)`\n" f"{gpu_info}\n---\n{sys_info}") # --- FUNGSI-FUNGSI UNTUK MENU BARU & FITUR SHARE (DIPERBAIKI TOTAL) --- def generate_random_id(length=10): """Menghasilkan ID acak dari huruf dan angka.""" return ''.join(random.choices(string.ascii_lowercase + string.digits, k=length)) def share_image_history(selected_index: int, history_df: pd.DataFrame, request: gr.Request): """Membuat URL share untuk detail gambar.""" if selected_index is None or history_df.empty: gr.Warning("Pilih gambar terlebih dahulu sebelum membagikan.") return gr.update(visible=False) try: selected_row = history_df.iloc[selected_index] base_url = "https://floodd-ai2.hf.space/" share_data = { 'prompt': selected_row['Prompt'], 'neg': selected_row.get('NegativePrompt', ''), 'seed': int(selected_row['Seed']), 'steps': int(selected_row['Steps']) } share_id = generate_random_id() with open(SHARE_DATA_DIR / f"image_{share_id}.json", 'w') as f: json.dump(share_data, f) share_url = f"{base_url}?share_image_id={share_id}" share_content = f"✅ **URL Share Berhasil Dibuat!**\nBagikan link ini untuk mengisi ulang generator:\n\n`{share_url}`" return gr.update(value=share_content, visible=True) except Exception as e: print(f"❌ Error saat membuat URL share gambar: {e}") return gr.update(value=f"âš ī¸ Gagal membuat URL: {e}", visible=True) def share_chat_history(chat_id, all_chats_history, request: gr.Request): """Membuat URL share untuk riwayat obrolan.""" if not chat_id or chat_id not in all_chats_history: gr.Warning("Pilih obrolan yang valid untuk dibagikan.") return gr.update(visible=False) try: chat_to_share = all_chats_history[chat_id] if not chat_to_share.get('history'): gr.Warning("Obrolan ini kosong dan tidak bisa dibagikan.") return gr.update(visible=False) base_url = "https://floodd-ai2.hf.space/" with open(SHARE_DATA_DIR / f"chat_{chat_id}.json", 'w') as f: json.dump(chat_to_share, f) share_url = f"{base_url}?share_chat_id={chat_id}" share_content = f"✅ **URL Share Berhasil Dibuat!**\nBagikan link ini untuk melihat obrolan:\n\n`{share_url}`" return gr.update(value=share_content, visible=True) except Exception as e: print(f"❌ Error saat membuat URL share chat: {e}") return gr.update(value=f"âš ī¸ Gagal membuat URL: {e}", visible=True) def handle_shared_url(request: gr.Request): """Fungsi ini berjalan saat aplikasi dimuat untuk memeriksa parameter URL.""" # Mengembalikan tuple dengan panjang yang konsisten untuk semua return path default_chat_return = (gr.State.IDENTITY, None, [], gr.update(), gr.update()) default_image_return = (gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY, gr.update()) if not request: return default_chat_return, default_image_return params = request.query_params if 'share_chat_id' in params: chat_id = params['share_chat_id'] filepath = SHARE_DATA_DIR / f"chat_{chat_id}.json" if filepath.exists(): print(f"✅ Memuat chat dari URL share: {chat_id}") with open(filepath, 'r') as f: shared_chat_data = json.load(f) initial_chat_state = {chat_id: shared_chat_data} chat_return = ( initial_chat_state, chat_id, shared_chat_data.get('history', []), gr.update(choices=[shared_chat_data.get('title', 'Obrolan Dibagikan')], value=shared_chat_data.get('title', 'Obrolan Dibagikan')), gr.Tabs(selected=1) ) return chat_return, default_image_return else: print(f"âš ī¸ ID share chat tidak ditemukan: {chat_id}") elif 'share_image_id' in params: image_id = params['share_image_id'] filepath = SHARE_DATA_DIR / f"image_{image_id}.json" if filepath.exists(): print(f"✅ Memuat detail gambar dari URL share: {image_id}") with open(filepath, 'r') as f: shared_image_data = json.load(f) image_return = ( shared_image_data.get('prompt', ''), shared_image_data.get('neg', ''), shared_image_data.get('seed', -1), shared_image_data.get('steps', 2), gr.Tabs(selected=0) ) return default_chat_return, image_return else: print(f"âš ī¸ ID share gambar tidak ditemukan: {image_id}") return default_chat_return, default_image_return def load_history(): try: with file_lock: if not os.path.exists(HISTORY_LOG_FILE): return [], pd.DataFrame(), "### 📂 Riwayat Kosong\nBelum ada gambar yang dihasilkan." df = pd.read_csv(HISTORY_LOG_FILE) if df.empty: return [], df, "### 📂 Riwayat Kosong\nBelum ada gambar yang dihasilkan." df_sorted = df.sort_values(by="Timestamp", ascending=False) image_paths = [str(IMAGE_HISTORY_DIR / fname) for fname in df_sorted['Filename'] if (IMAGE_HISTORY_DIR / fname).exists()] return image_paths, df_sorted, "### ⓘ Detail Gambar\nKlik pada sebuah gambar untuk melihat detailnya." except Exception as e: print(f"❌ Error memuat riwayat: {e}") return [], pd.DataFrame(), f"### âš ī¸ Error\nTidak dapat memuat riwayat: {e}" def show_history_details(evt: gr.SelectData, history_df: pd.DataFrame): if not evt.selected or history_df.empty: return "### ⓘ Detail Gambar\nKlik gambar untuk melihat detailnya.", gr.update(visible=False), gr.update(visible=False), None selected_index = evt.index selected_row = history_df.iloc[selected_index] details = f""" **Prompt:** `{selected_row['Prompt']}` **Negative Prompt:** `{selected_row.get('NegativePrompt', 'N/A')}` --- **Seed:** `{selected_row['Seed']}` | **Steps:** `{selected_row['Steps']}` **File:** `{selected_row['Filename']}` | **Dibuat:** `{selected_row['Timestamp']}` """ return details, gr.update(visible=True), gr.update(visible=False), selected_index def send_history_to_generator(selected_index: int, history_df: pd.DataFrame): if selected_index is None or history_df.empty: return gr.update(), gr.update(), gr.update(), gr.update(), gr.update() selected_row = history_df.iloc[selected_index] return ( selected_row['Prompt'], selected_row.get('NegativePrompt', ''), selected_row['Seed'], selected_row['Steps'], gr.Tabs(selected=0) ) def send_history_to_editor(selected_index: int, history_df: pd.DataFrame): if selected_index is None or history_df.empty: return gr.update(), gr.update() selected_row = history_df.iloc[selected_index] image_path = str(IMAGE_HISTORY_DIR / selected_row['Filename']) return Image.open(image_path), gr.Tabs(selected=5) def apply_image_edits(image, brightness, contrast, saturation, sharpness, filter_choice): if image is None: return None output_image = image.copy() if filter_choice == "Grayscale": output_image = output_image.convert("L").convert("RGB") elif filter_choice == "Sepia": pixels = output_image.load() width, height = output_image.size for y in range(height): for x in range(width): r, g, b = output_image.getpixel((x, y)) tr, tg, tb = int(0.393*r + 0.769*g + 0.189*b), int(0.349*r + 0.686*g + 0.168*b), int(0.272*r + 0.534*g + 0.131*b) pixels[x, y] = (min(255, tr), min(255, tg), min(255, tb)) enhancer = ImageEnhance.Brightness(output_image); output_image = enhancer.enhance(brightness) enhancer = ImageEnhance.Contrast(output_image); output_image = enhancer.enhance(contrast) enhancer = ImageEnhance.Color(output_image); output_image = enhancer.enhance(saturation) enhancer = ImageEnhance.Sharpness(output_image); output_image = enhancer.enhance(sharpness) return output_image # --- FUNGSI LOGIC CHATBOT BARU --- def add_new_chat(all_chats_history): new_chat_id = str(uuid.uuid4()) all_chats_history[new_chat_id] = { "title": "Percakapan Baru", "history": [(None, "Halo! Saya Flood, asisten AI dari RenXploit. Ada yang bisa saya bantu?")] } history_titles = [info['title'] for info in all_chats_history.values()] new_title = all_chats_history[new_chat_id]['title'] return all_chats_history, new_chat_id, all_chats_history[new_chat_id]['history'], gr.update(choices=history_titles, value=new_title) def handle_user_message(user_message, chat_id, all_chats_history): if not chat_id or chat_id not in all_chats_history: all_chats_history, chat_id, current_history, _ = add_new_chat({}) current_history = all_chats_history[chat_id]['history'] if len(current_history) == 1 and current_history[0][0] is None: current_history = [] current_history.append((user_message, None)) all_chats_history[chat_id]['history'] = current_history if len(current_history) == 1: new_title = user_message[:30] + '...' if len(user_message) > 30 else user_message all_chats_history[chat_id]['title'] = new_title history_titles = [info['title'] for info in all_chats_history.values()] yield all_chats_history, current_history, gr.update(choices=history_titles, value=all_chats_history[chat_id]['title']), gr.update(value=""), gr.update(visible=False) bot_response = gemini_bot.chat(user_message, [h for h in current_history if h[0] is not None]) current_history[-1] = (user_message, bot_response) all_chats_history[chat_id]['history'] = current_history yield all_chats_history, current_history, gr.update(), gr.update(), gr.update(visible=True) def switch_chat(selected_title, all_chats_history): if not selected_title: return None, [], gr.update(visible=False) selected_id = None for chat_id, info in all_chats_history.items(): if info['title'] == selected_title: selected_id = chat_id break if selected_id: return selected_id, all_chats_history[selected_id]['history'], gr.update(visible=True) return None, [], gr.update(visible=False) # --- ANTARMUKA PENGGUNA (GRADIO UI) --- with gr.Blocks(theme=gr.themes.Base(), head=HEAD_HTML) as demo: gr.Markdown("# 🚀 RenXploit's Creative AI Suite 🌌", elem_id="main-title") gr.Markdown("Sebuah platform lengkap untuk kreativitas Anda, ditenagai oleh AI.", elem_id="main-subtitle") chat_history_state = gr.State({}) current_chat_id_state = gr.State(None) selected_history_index_state = gr.State(None) with gr.Tabs() as tabs: with gr.TabItem("🎨 Image Generator", id=0): with gr.Row(variant='panel', equal_height=False): with gr.Column(scale=1): gr.Markdown("### 📝 **Masukan Perintah Anda**") prompt_input = gr.Textbox(label="Prompt", placeholder="Contoh: Cinematic photo, seekor rubah merah...", lines=3, info="Jadilah sangat spesifik! Atau gunakan Prompt Enhancer.") negative_prompt_input = gr.Textbox(label="Prompt Negatif", placeholder="Contoh: blurry, low quality, bad hands...", lines=2, info="Hal-hal yang TIDAK Anda inginkan.") num_images_slider = gr.Slider(minimum=1, maximum=8, value=2, step=1, label="Jumlah Gambar") generate_btn = gr.Button("✨ Hasilkan Gambar!", variant="primary") with gr.Accordion("âš™ī¸ Opsi Lanjutan", open=False): steps_slider = gr.Slider(minimum=1, maximum=5, value=2, step=1, label="Langkah Iterasi (Kualitas vs Kecepatan)") with gr.Row(): seed_input = gr.Number(label="Seed", value=-1, precision=0, info="Gunakan -1 untuk acak.") random_seed_btn = gr.Button("🎲 Acak", variant="secondary") with gr.Column(scale=2): gr.Markdown("### đŸ–ŧī¸ **Hasil Generasi**") output_gallery = gr.Gallery(label="Hasil Gambar", show_label=False, elem_id="gallery", columns=2, object_fit="contain", height="auto") loader_html = gr.HTML(visible=False) info_box = gr.Textbox(label="Informasi Generasi", visible=False, interactive=False, lines=2) with gr.TabItem("đŸ’Ŧ Chat with AI", id=1): with gr.Row(variant='panel'): with gr.Column(scale=1, min_width=250) as sidebar: gr.Markdown("### đŸ—‚ī¸ Riwayat Chat") new_chat_btn = gr.Button("➕ Obrolan Baru", variant="primary") with gr.Group(elem_id="chat-history-sidebar"): chat_history_list = gr.Radio(label="Pilih Obrolan", choices=[], interactive=True) with gr.Row(visible=False) as chat_action_buttons: share_chat_btn = gr.Button("🔗 Bagikan Chat") share_chat_output = gr.Textbox(label="URL untuk Dibagikan", lines=4, interactive=True, show_copy_button=True, visible=False) with gr.Column(scale=3): gr.Markdown("### 🤖 **Asisten AI Flood**") if not gemini_bot.is_configured: gr.Warning("Fitur Chatbot dinonaktifkan. API Key Gemini tidak terkonfigurasi.") else: with gr.Column(elem_id="chatbot-container"): chatbot_display = gr.Chatbot(elem_id="chatbot-display", label="Flood AI", bubble_full_width=False) with gr.Row(): user_chat_input = gr.Textbox(show_label=False, placeholder="Ketik pesan Anda di sini...", scale=5) send_chat_btn = gr.Button("Kirim", variant="secondary", scale=1) with gr.TabItem("✨ Prompt Enhancer", id=2): with gr.Row(variant='panel'): with gr.Column(): gr.Markdown("### đŸĒ„ **Ubah Ide Jadi Prompt Ajaib**\nCukup tulis ide sederhana, dan biarkan AI menyempurnakannya menjadi prompt yang detail dan artistik.") simple_prompt_input = gr.Textbox(label="Ide Sederhana Anda", placeholder="Contoh: seekor astronot di hutan alien", lines=3) enhance_btn = gr.Button("Buat Prompt Ajaib!", variant="primary") enhanced_prompt_output = gr.Textbox(label="Prompt yang Disempurnakan", lines=5, interactive=True, show_copy_button=True) send_to_gen_btn = gr.Button("âžĄī¸ Kirim & Pindah ke Generator") with gr.TabItem("🚀 AI Image Upscaler", id=3): with gr.Row(variant='panel', equal_height=False): with gr.Column(): gr.Markdown("### **Tingkatkan Resolusi Gambar**\nUnggah gambar untuk meningkatkan kualitas dan ukurannya hingga 4x lipat menggunakan AI.") image_to_upscale_input = gr.Image(type="pil", label="Unggah Gambar Anda di Sini") clarity_slider = gr.Slider(minimum=1.0, maximum=3.0, value=1.0, step=0.1, label="Tingkat Peningkatan Kejernihan", info="Setelah di-upscale 4x, atur kejernihan gambar di sini. 1.0 = Tanpa efek.") upscale_btn = gr.Button("Tingkatkan Resolusi!", variant="primary") with gr.Column(): gr.Markdown("### **Hasil Peningkatan Resolusi**") upscaled_image_output = gr.Image(label="Gambar Hasil Upscale", interactive=False, show_download_button=True) upscale_status_text = gr.Markdown("Status: Menunggu gambar...") with gr.TabItem("đŸ–ŧī¸ Galeri & Riwayat", id=4) as history_tab: with gr.Row(variant='panel'): with gr.Column(scale=2): gr.Markdown("### **Galeri Hasil Generasi Anda**") history_gallery = gr.Gallery(label="Riwayat Gambar", show_label=False, columns=4, object_fit="contain", height="auto", elem_id="history_gallery") history_df_state = gr.State() with gr.Column(scale=1): gr.Markdown("### **Detail & Aksi**") history_details_md = gr.Markdown("### ⓘ Detail Gambar\nKlik pada sebuah gambar untuk melihat detailnya.") refresh_history_btn = gr.Button("🔄 Segarkan Galeri", variant="secondary") with gr.Row(visible=False) as history_action_buttons: history_to_gen_btn = gr.Button("Kirim ke Generator") history_to_editor_btn = gr.Button("Kirim ke Editor") share_history_btn = gr.Button("🔗 Bagikan Detail") share_history_output = gr.Textbox(label="URL untuk Dibagikan", lines=4, interactive=True, show_copy_button=True, visible=False) with gr.TabItem("🎨 Image Editor", id=5): with gr.Row(variant='panel'): with gr.Column(scale=1): gr.Markdown("### **Toolkit Pasca-Produksi**") editor_input_image = gr.Image(type="pil", label="Unggah Gambar atau Kirim dari Riwayat") with gr.Accordion("Penyesuaian", open=True): brightness_slider = gr.Slider(minimum=0.5, maximum=1.5, value=1.0, step=0.05, label="Kecerahan") contrast_slider = gr.Slider(minimum=0.5, maximum=1.5, value=1.0, step=0.05, label="Kontras") saturation_slider = gr.Slider(minimum=0.0, maximum=2.0, value=1.0, step=0.05, label="Saturasi Warna") sharpness_slider = gr.Slider(minimum=0.0, maximum=3.0, value=1.0, step=0.1, label="Ketajaman") with gr.Accordion("Filter Cepat", open=True): filter_radio = gr.Radio(["None", "Grayscale", "Sepia"], label="Pilih Filter", value="None") with gr.Column(scale=1): gr.Markdown("### **Hasil Editing**") editor_output_image = gr.Image(label="Hasil Akhir", interactive=False, show_download_button=True) with gr.TabItem("📊 Visitor Monitor", id=6): with gr.Row(variant='panel'): with gr.Column(): gr.Markdown("### 📈 **Live Visitor Monitor**\nPantau jumlah total pengunjung aplikasi Anda secara real-time.") with gr.Row(): with gr.Column(scale=3): visitor_count_display = gr.Markdown("## 📈 Memuat data...") with gr.Column(scale=2): time_filter_radio = gr.Radio(["Semua Waktu", "1 Minggu Terakhir", "2 Minggu Terakhir", "3 Bulan Terakhir"], label="Tampilkan data untuk", value="Semua Waktu") refresh_btn = gr.Button("🔄 Segarkan Manual", variant="secondary") visitor_plot = gr.LinePlot(x="Timestamp", y="Total Pengunjung", title="Grafik Pertumbuhan Pengunjung", tooltip=['Timestamp', 'Total Pengunjung'], height=500, interactive=True) with gr.TabItem("âš™ī¸ System & Settings", id=7): with gr.Row(variant='panel'): with gr.Column(): gr.Markdown("### **Live System Monitor**") system_info_md = gr.Markdown("Memuat info sistem...") system_info_trigger_btn = gr.Button("Trigger System Info", visible=False, elem_id="system-info-trigger-btn") with gr.Column(): gr.Markdown("### **Pengaturan Aplikasi**") with gr.Accordion("Kualitas Model", open=True): gr.Radio(["FP16 (Cepat, Kualitas Baik)", "FP32 (Lambat, Kualitas Terbaik)"], value="FP16 (Cepat, Kualitas Baik)" if device == "cuda" else "FP32 (Lambat, Kualitas Terbaik)", label="Presisi Model Generator", interactive=False, info="Terkunci. Ditentukan saat aplikasi dimulai.") with gr.TabItem("💡 Panduan Prompting", id=8): with gr.Row(variant='panel'): gr.Markdown("""## Cara Menjadi "Art Director" yang Hebat untuk AI...\n (Konten panduan Anda di sini)""") with gr.TabItem("📖 Blog & Updates", id=9): with gr.Row(variant='panel'): gr.Markdown("""### Perkembangan Terbaru dari RenXploit's AI Suite v3.1 (Perbaikan Share URL): Mengimplementasikan sistem share berbasis URL Query Parameter yang fungsional. Kini URL share untuk chat dan gambar dapat dibuka dan menampilkan konten yang sesuai. v3.0 (Perbaikan Kritis): Memperbaiki bug pada fitur Share URL dan Share Detail Galeri yang disebabkan oleh perbedaan versi Gradio dan logika event yang salah. v2.8: Mengganti komponen `gr.Box` dengan `gr.Group` untuk kompatibilitas. v2.7: Perombakan total UI Chatbot dengan fitur riwayat, new chat, dan share.""") with gr.TabItem("â„šī¸ About & Support", id=10): with gr.Row(variant='panel'): with gr.Column(): gr.Markdown("### Tentang Proyek dan Dukungan") with gr.Accordion("Tentang RenXploit's Creative AI Suite", open=True): gr.Markdown(""" RenXploit's Creative AI Suite adalah proyek pribadi untuk mengeksplorasi AI generatif. Hubungi saya melalui: ngoprek.xyz/contact """) with gr.Accordion("Laporkan Masalah atau Beri Masukan"): report_name = gr.Textbox(label="Nama Anda") report_email = gr.Textbox(label="Email Anda (Opsional)") report_message = gr.Textbox(label="Pesan Anda", lines=5, placeholder="Jelaskan masalah atau ide Anda...") report_btn = gr.Button("Kirim Laporan", variant="primary") report_status = gr.Markdown(visible=False) gr.Markdown("---\n", elem_classes="footer") # --- PENANGANAN EVENT (EVENT HANDLERS DENGAN PERBAIKAN) --- # Kumpulan output untuk URL handler shared_chat_outputs = [chat_history_state, current_chat_id_state, chatbot_display, chat_history_list, tabs] shared_image_outputs = [prompt_input, negative_prompt_input, seed_input, steps_slider, tabs] @demo.load def process_url_and_init(request: gr.Request, current_chat_state): chat_return, image_return = handle_shared_url(request) # Cek apakah URL share chat diproses if chat_return[1] is not None: # Cek chat_id # Nilai-nilai ini akan mengisi komponen chat return chat_return + (gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY, gr.update()) # Cek apakah URL share image diproses elif image_return[0] != gr.State.IDENTITY: # Cek prompt # Jika tidak ada share chat, inisialisasi chat default new_chat_state, new_chat_id, new_history, chat_list_update = add_new_chat({}) return (new_chat_state, new_chat_id, new_history, chat_list_update, image_return[4]) + image_return[:-1] # Jika tidak ada URL share sama sekali, inisialisasi chat elif not current_chat_state: new_chat_state, new_chat_id, new_history, chat_list_update = add_new_chat({}) return (new_chat_state, new_chat_id, new_history, chat_list_update, gr.update()) + (gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY, gr.State.IDENTITY) # Jika state sudah ada dan tidak ada URL, jangan lakukan apa-apa return (gr.State.IDENTITY,) * len(shared_chat_outputs + shared_image_outputs) demo.load(log_visitor, inputs=None, outputs=None) demo.load(fn=update_system_info, inputs=None, outputs=system_info_md) random_seed_btn.click(lambda: -1, outputs=seed_input) generate_btn.click(fn=genie_wrapper, inputs=[prompt_input, negative_prompt_input, steps_slider, seed_input, num_images_slider], outputs=[output_gallery, loader_html, generate_btn, info_box]) new_chat_btn.click(fn=add_new_chat, inputs=[chat_history_state], outputs=[chat_history_state, current_chat_id_state, chatbot_display, chat_history_list]) send_chat_btn.click(fn=handle_user_message, inputs=[user_chat_input, current_chat_id_state, chat_history_state], outputs=[chat_history_state, chatbot_display, chat_history_list, user_chat_input, chat_action_buttons]) user_chat_input.submit(fn=handle_user_message, inputs=[user_chat_input, current_chat_id_state, chat_history_state], outputs=[chat_history_state, chatbot_display, chat_history_list, user_chat_input, chat_action_buttons]) chat_history_list.change(fn=switch_chat, inputs=[chat_history_list, chat_history_state], outputs=[current_chat_id_state, chatbot_display, chat_action_buttons]) share_chat_btn.click(fn=share_chat_history, inputs=[current_chat_id_state, chat_history_state], outputs=[share_chat_output]) enhance_btn.click(fn=enhance_prompt, inputs=[simple_prompt_input], outputs=[enhanced_prompt_output]) send_to_gen_btn.click(fn=lambda prompt: (prompt, gr.Tabs(selected=0)), inputs=[enhanced_prompt_output], outputs=[prompt_input, tabs]) upscale_btn.click(fn=upscale_image, inputs=[image_to_upscale_input, clarity_slider], outputs=[upscaled_image_output, upscale_status_text]) history_tab.select(fn=load_history, inputs=None, outputs=[history_gallery, history_df_state, history_details_md]) refresh_history_btn.click(fn=load_history, inputs=None, outputs=[history_gallery, history_df_state, history_details_md]) history_gallery.select(fn=show_history_details, inputs=[history_df_state], outputs=[history_details_md, history_action_buttons, share_history_output, selected_history_index_state]) history_to_gen_btn.click(fn=send_history_to_generator, inputs=[selected_history_index_state, history_df_state], outputs=[prompt_input, negative_prompt_input, seed_input, steps_slider, tabs]) history_to_editor_btn.click(fn=send_history_to_editor, inputs=[selected_history_index_state, history_df_state], outputs=[editor_input_image, tabs]) share_history_btn.click(fn=share_image_history, inputs=[selected_history_index_state, history_df_state], outputs=[share_history_output]) editor_inputs = [editor_input_image, brightness_slider, contrast_slider, saturation_slider, sharpness_slider, filter_radio] for slider in [brightness_slider, contrast_slider, saturation_slider, sharpness_slider]: slider.release(fn=apply_image_edits, inputs=editor_inputs, outputs=editor_output_image) filter_radio.change(fn=apply_image_edits, inputs=editor_inputs, outputs=editor_output_image) editor_input_image.change(fn=apply_image_edits, inputs=editor_inputs, outputs=editor_output_image) demo.load(fn=update_visitor_monitor, inputs=[time_filter_radio], outputs=[visitor_count_display, visitor_plot]) refresh_btn.click(fn=update_visitor_monitor, inputs=[time_filter_radio], outputs=[visitor_count_display, visitor_plot]) time_filter_radio.change(fn=update_visitor_monitor, inputs=[time_filter_radio], outputs=[visitor_count_display, visitor_plot]) system_info_trigger_btn.click(fn=update_system_info, inputs=None, outputs=system_info_md) report_btn.click(fn=submit_report, inputs=[report_name, report_email, report_message], outputs=[report_status]) # --- Menjalankan Aplikasi --- if __name__ == "__main__": demo.launch(debug=True)