#!/usr/bin/env python3 """ Script to test a model and see what information is returned. Usage: export CBORG_API_KEY=... python test_model_info.py """ import os import sys from openai import OpenAI def main(): if len(sys.argv) < 2: print("Usage: python test_model_info.py ") print("Example: python test_model_info.py lbl/cborg-chat:latest") sys.exit(1) model_id = sys.argv[1] api_key = os.environ.get('CBORG_API_KEY') if not api_key: print("Error: CBORG_API_KEY environment variable not set.") sys.exit(1) client = OpenAI( api_key=api_key, base_url="https://api.cborg.lbl.gov" ) try: print(f"Testing model: {model_id}") print("=" * 80) # Make a minimal test request response = client.chat.completions.create( model=model_id, messages=[{"role": "user", "content": "What is 2+2?"}], max_tokens=10 ) print(f"\n{'UNDERLYING MODEL NAME:':<30} {response.model}") print(f"{'Requested model:':<30} {model_id}") if response.model != model_id: print(f"\n⚠️ Note: '{model_id}' maps to '{response.model}'") else: print(f"\n✓ Model name matches (no aliasing detected)") print(f"\n{'Response ID:':<30} {response.id}") print(f"{'Created (timestamp):':<30} {response.created}") print(f"{'Object type:':<30} {response.object}") if hasattr(response, 'system_fingerprint') and response.system_fingerprint: print(f"{'System fingerprint:':<30} {response.system_fingerprint}") print(f"\n{'USAGE STATISTICS:':}") print(f"{' Prompt tokens:':<30} {response.usage.prompt_tokens}") print(f"{' Completion tokens:':<30} {response.usage.completion_tokens}") print(f"{' Total tokens:':<30} {response.usage.total_tokens}") print(f"\n{'Response content:':<30} {response.choices[0].message.content}") print("\n" + "=" * 80) print("Full response object:") print(response) except Exception as e: print(f"Error testing model: {e}") sys.exit(1) if __name__ == '__main__': main()