File size: 2,880 Bytes
6b3d060
2d855ab
6b3d060
 
0798794
6b3d060
2d855ab
6b3d060
 
 
 
2d855ab
6b3d060
 
 
 
2d855ab
6b3d060
 
 
2d855ab
c031815
6b3d060
2d855ab
6b3d060
2d855ab
6b3d060
 
2d855ab
 
 
 
 
 
 
 
 
 
 
 
6b3d060
 
 
 
 
 
 
 
 
2d855ab
 
 
6b3d060
2d855ab
6b3d060
 
 
0798794
2d855ab
6b3d060
 
0798794
c031815
 
2d855ab
0798794
2d855ab
 
c031815
2d855ab
 
0798794
 
 
2d855ab
6b3d060
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
import os
from pathlib import Path
import time
import gradio as gr
from gradio.themes import Soft
import numpy as np
import matplotlib.pyplot as plt
from huggingface_hub import hf_hub_download

from src.infer import load_model, predict

os.environ.setdefault("HF_HOME", str(Path.home() / ".cache" / "huggingface"))

_model = None

def _warmup():
    global _model
    if _model is not None:
        return
    t0 = time.time()
    ckpt_path = hf_hub_download(
        repo_id="rhasan/UPLME",
        filename="UPLME_NewsEmp_tuned-lambdas.ckpt",
        repo_type="model"
    )
    load_model(ckpt_path)
    return f"Model loaded in {time.time() - t0:.1f} seconds."

def ci_plot(mean: float, low: float, upp: float):
    fig, ax = plt.subplots(figsize=(6, 1))
    ax.hlines(1, 0, 100, linewidht=2, alpha=0.15)
    ax.hlines(1, low, upp, linewidht=6)
    ax.plot([mean], [1], "o")
    ax.set_xlim(0, 100)
    ax.set_yticks([])
    ax.set_xlabel("Empathy Score (0-100)")
    fig.tight_layout()
    return fig

def predict_with_ci(essay: str, article: str) -> tuple[float, float, float, plt.Figure]:
    _warmup()
    mean, var = predict(essay, article)
    # scores were originally in [1, 7]
    # lets scale them to [0, 100]
    mean = (mean - 1) / 6 * 100

    std = np.sqrt(var)
    ci_low = max(0.0, mean - 1.96 * std)
    ci_upp = min(100.0, mean + 1.96 * std)
    fig = ci_plot(mean, ci_low, ci_upp)
    return mean, ci_low, ci_upp, fig


with gr.Blocks(title="UPLME", theme=Soft(primary_hue="blue")) as demo:
    gr.Markdown("# Empathy Prediction with Uncertainty Estimation")
    with gr.Row():
        with gr.Column():
            essay_input = gr.Textbox(label="Response (E.g., Essay) towards the stimulus", lines=10, placeholder="Enter the essay text here...")
            article_input = gr.Textbox(label="Stimulus (E.g., News Article)", lines=10, placeholder="Enter the article text here...")
            button = gr.Button("Predict")
        with gr.Column():
            output_mean = gr.Number(label="Predicted Empathy Score (0-100)", precision=2)
            ci_low = gr.Number(label="95% CI Lower Bound", precision=2)
            ci_upp = gr.Number(label="95% CI Upper Bound", precision=2)
        
            fig = gr.Plot(label="Prediction +/- 95% CI")

        button.click(fn=predict_with_ci, inputs=[essay_input, article_input], outputs=[output_mean, ci_low, ci_upp, fig])

    gr.Markdown("## About")
    gr.Markdown("""
    This application predicts empathy score and uncertainty estimates using the UPLME model proposed in **UPLME: Uncertainty-Aware Probabilistic Language Modelling for Robust Empathy Regression** by **Md Rakibul Hasan, Md Zakir Hossain, Aneesh Krishna, Shafin Rahman and Tom Gedeon**.
    - Paper: https://arxiv.org/abs/2508.03520
    - Code: https://github.com/hasan-rakibul/UPLME
    """)

if __name__ == "__main__":
    demo.launch()