AMontiB commited on
Commit
142f504
·
1 Parent(s): 8860a18

update ALL

Browse files
__pycache__/app.cpython-310.pyc CHANGED
Binary files a/__pycache__/app.cpython-310.pyc and b/__pycache__/app.cpython-310.pyc differ
 
app.py CHANGED
@@ -34,6 +34,31 @@ if os.environ.get("SPACE_ID"):
34
  # Available detectors based on launcher.py
35
  DETECTORS = ['ALL', 'R50_TF', 'R50_nodown', 'CLIP-D', 'P2G', 'NPR']
36
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
37
  def process_image(image_path):
38
  """
39
  Check if image is larger than 1024x1024 and central crop it if necessary.
@@ -136,8 +161,8 @@ def predict(image_path, detector_name):
136
  if not results:
137
  return "Error: No results obtained from detectors.", None
138
 
139
- votes_real = 0
140
- votes_fake = 0
141
  confidences = []
142
  labels = []
143
  colors = []
@@ -160,12 +185,14 @@ def predict(image_path, detector_name):
160
  colors.append(color)
161
  total_conf += score
162
 
163
- # Voting logic
 
164
  if score > 0.6:
 
165
  if pred == 'fake':
166
- votes_fake += 1
167
  elif pred == 'real':
168
- votes_real += 1
169
 
170
  # Majority Voting
171
  if votes_real > votes_fake:
@@ -179,9 +206,9 @@ def predict(image_path, detector_name):
179
 
180
  # Explanation
181
  if verdict == "REAL":
182
- explanation = f"Considering the results obtained by all models, the analyzed image results, with an average confidence of {avg_conf:.4f}, not produced by a generative AI."
183
  elif verdict == "FAKE":
184
- explanation = f"Considering the results obtained by all models, the analyzed image results, with an average confidence of {avg_conf:.4f}, produced by a generative AI."
185
  else:
186
  explanation = f"The result is uncertain. The detectors produced unconsistent results. The average confidence is {avg_conf:.4f}."
187
 
@@ -268,7 +295,8 @@ with demo:
268
 
269
  ### Understanding the Results produced by "ALL"
270
  * Runs all available detectors (R50_TF, R50_nodown, CLIP-D, P2G, NPR) sequentially on the input image.
271
- * Produces a **Majority Vote** verdict (Real/Fake) considering only confident predictions (> 0.6). Also generates a **Confidence Plot** visualizing each model's score and a textual **Explanation** of the consensus.
 
272
  * In the plot, **Green** bars indicate a **Real** prediction, while **Red** bars indicate a **Fake** prediction.
273
 
274
 
 
34
  # Available detectors based on launcher.py
35
  DETECTORS = ['ALL', 'R50_TF', 'R50_nodown', 'CLIP-D', 'P2G', 'NPR']
36
 
37
+ # Performance weights from Table III (Non-shared) and Table IV (Social Media: FB, TL, X)
38
+ # Weights are averaged between Non-shared and Social Media performance.
39
+ # R50_TF (R50-E2P):
40
+ # Real (TNR): (0.98 + 0.91) / 2 = 0.95
41
+ # Fake (TPR): (0.85 + 0.65) / 2 = 0.75
42
+ # R50_nodown (R50-ND):
43
+ # Real (TNR): (1.00 + 0.97) / 2 = 0.98
44
+ # Fake (TPR): (0.79 + 0.76) / 2 = 0.77
45
+ # CLIP-D:
46
+ # Real (TNR): (0.97 + 0.94) / 2 = 0.95
47
+ # Fake (TPR): (0.93 + 0.91) / 2 = 0.92
48
+ # P2G:
49
+ # Real (TNR): (1.00 + 1.00) / 2 = 1.00
50
+ # Fake (TPR): (0.91 + 0.56) / 2 = 0.74
51
+ # NPR:
52
+ # Real (TNR): (0.99 + 1.00) / 2 = 1.00
53
+ # Fake (TPR): (0.60 + 0.12) / 2 = 0.36
54
+ DETECTOR_WEIGHTS = {
55
+ 'R50_TF': {'real': 0.95, 'fake': 0.75},
56
+ 'R50_nodown': {'real': 0.98, 'fake': 0.77},
57
+ 'CLIP-D': {'real': 0.95, 'fake': 0.92},
58
+ 'P2G': {'real': 1.00, 'fake': 0.74},
59
+ 'NPR': {'real': 1.00, 'fake': 0.36}
60
+ }
61
+
62
  def process_image(image_path):
63
  """
64
  Check if image is larger than 1024x1024 and central crop it if necessary.
 
161
  if not results:
162
  return "Error: No results obtained from detectors.", None
163
 
164
+ votes_real = 0.0
165
+ votes_fake = 0.0
166
  confidences = []
167
  labels = []
168
  colors = []
 
185
  colors.append(color)
186
  total_conf += score
187
 
188
+ # Weighted Voting logic
189
+ # Only count vote if confidence > 0.6
190
  if score > 0.6:
191
+ weights = DETECTOR_WEIGHTS.get(det, {'real': 1.0, 'fake': 1.0})
192
  if pred == 'fake':
193
+ votes_fake += weights['fake']
194
  elif pred == 'real':
195
+ votes_real += weights['real']
196
 
197
  # Majority Voting
198
  if votes_real > votes_fake:
 
206
 
207
  # Explanation
208
  if verdict == "REAL":
209
+ explanation = f"Considering the results obtained by all models (weighted by their historical performance), the analyzed image results, with an average confidence of {avg_conf:.4f}, not produced by a generative AI."
210
  elif verdict == "FAKE":
211
+ explanation = f"Considering the results obtained by all models (weighted by their historical performance), the analyzed image results, with an average confidence of {avg_conf:.4f}, produced by a generative AI."
212
  else:
213
  explanation = f"The result is uncertain. The detectors produced unconsistent results. The average confidence is {avg_conf:.4f}."
214
 
 
295
 
296
  ### Understanding the Results produced by "ALL"
297
  * Runs all available detectors (R50_TF, R50_nodown, CLIP-D, P2G, NPR) sequentially on the input image.
298
+ * Produces a **Weighted Majority Vote** verdict (Real/Fake). Each model's vote is weighted by its historical performance (TPR for Fakes, TNR for Reals) on benchmark datasets. Only confident predictions (> 0.6) are counted.
299
+ * Also generates a **Confidence Plot** visualizing each model's score and a textual **Explanation** of the consensus.
300
  * In the plot, **Green** bars indicate a **Real** prediction, while **Red** bars indicate a **Fake** prediction.
301
 
302
 
verify_all_option.py CHANGED
@@ -41,10 +41,11 @@ def test_all_option():
41
 
42
  if isinstance(fig, plt.Figure):
43
  print("Figure created successfully.")
44
- # Optional: check plot content if needed
 
45
  else:
46
- print("Figure creation failed or None returned.")
47
-
48
  expected_verdict = "produced by a generative AI" # Majority Fake
49
  if expected_verdict in text:
50
  print("Verdict seems correct (Fake majority).")
 
41
 
42
  if isinstance(fig, plt.Figure):
43
  print("Figure created successfully.")
44
+ if "weighted by their historical performance" in text:
45
+ print("Text output confirms weighted voting is used.")
46
  else:
47
+ print("Text output MISSING weighted voting confirmation.")
48
+
49
  expected_verdict = "produced by a generative AI" # Majority Fake
50
  if expected_verdict in text:
51
  print("Verdict seems correct (Fake majority).")