Spaces:
Sleeping
Sleeping
| # app.py - Fixed version with proper adapter loading | |
| import gradio as gr | |
| import torch | |
| from transformers import AutoModelForCausalLM, AutoTokenizer, BitsAndBytesConfig | |
| from peft import PeftModel, PeftConfig | |
| import os | |
| print("π ATS Resume Optimizer - Starting...") | |
| # Check adapter files | |
| print("\nπ Files in current directory:") | |
| for f in os.listdir("."): | |
| print(f" - {f}") | |
| # Load model with proper config | |
| print("\nπ₯ Loading model configuration...") | |
| try: | |
| # Load PEFT config first to understand the adapter structure | |
| peft_config = PeftConfig.from_pretrained(".") | |
| print("β Adapter config loaded") | |
| # Load tokenizer | |
| print("\nπ₯ Loading tokenizer...") | |
| tokenizer = AutoTokenizer.from_pretrained(peft_config.base_model_name_or_path) | |
| tokenizer.pad_token = tokenizer.eos_token | |
| print("β Tokenizer loaded") | |
| # Load base model | |
| print("\nπ₯ Loading base model (this takes 2-3 minutes)...") | |
| model = AutoModelForCausalLM.from_pretrained( | |
| peft_config.base_model_name_or_path, | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| low_cpu_mem_usage=True, | |
| ) | |
| print("β Base model loaded") | |
| # Load adapters with proper config | |
| print("\nπ₯ Loading your fine-tuned adapters...") | |
| model = PeftModel.from_pretrained( | |
| model, | |
| ".", | |
| config=peft_config, | |
| ) | |
| model.eval() | |
| print("β Fine-tuned model loaded successfully!") | |
| MODEL_LOADED = True | |
| except Exception as e: | |
| print(f"β Error loading adapters: {e}") | |
| print("\nβ οΈ Falling back to base model only") | |
| # Fallback to base model | |
| tokenizer = AutoTokenizer.from_pretrained("mistralai/Mistral-7B-Instruct-v0.2") | |
| tokenizer.pad_token = tokenizer.eos_token | |
| model = AutoModelForCausalLM.from_pretrained( | |
| "mistralai/Mistral-7B-Instruct-v0.2", | |
| torch_dtype=torch.float16, | |
| device_map="auto", | |
| low_cpu_mem_usage=True, | |
| ) | |
| MODEL_LOADED = False | |
| def analyze_resume(resume_text, job_description): | |
| """Generate ATS analysis""" | |
| if not MODEL_LOADED: | |
| return """β οΈ **Using Base Model Only** | |
| The fine-tuned adapters couldn't be loaded. The model will still work but responses may be less specific to ATS optimization. | |
| To see the full fine-tuned version, please contact the developer. | |
| --- | |
| **Analyzing with base Mistral-7B...** | |
| """ | |
| if not resume_text or len(resume_text.strip()) < 50: | |
| return "β οΈ Please enter a resume (at least 50 characters)" | |
| if not job_description or len(job_description.strip()) < 30: | |
| return "β οΈ Please enter a job description (at least 30 characters)" | |
| # Truncate to fit context | |
| resume_text = resume_text[:1500] | |
| job_description = job_description[:800] | |
| prompt = f"""<s>[INST] Analyze this resume for ATS compatibility with the job description. Provide an ATS score, identify missing keywords, and suggest improvements. | |
| RESUME: | |
| {resume_text} | |
| JOB DESCRIPTION: | |
| {job_description} [/INST] | |
| """ | |
| try: | |
| inputs = tokenizer(prompt, return_tensors="pt", truncation=True, max_length=2048) | |
| # Move to device | |
| if torch.cuda.is_available(): | |
| inputs = {k: v.cuda() for k, v in inputs.items()} | |
| with torch.no_grad(): | |
| outputs = model.generate( | |
| **inputs, | |
| max_new_tokens=800, | |
| temperature=0.7, | |
| top_p=0.9, | |
| do_sample=True, | |
| pad_token_id=tokenizer.eos_token_id, | |
| eos_token_id=tokenizer.eos_token_id, | |
| ) | |
| response = tokenizer.decode(outputs[0], skip_special_tokens=True) | |
| # Extract response | |
| if "[/INST]" in response: | |
| response = response.split("[/INST]")[1].strip() | |
| return response | |
| except Exception as e: | |
| return f"β Error: {str(e)}\n\nPlease try with shorter text." | |
| # Sample data | |
| SAMPLE_RESUME = """Sarah Johnson | |
| Email: [email protected] | Phone: (555) 234-5678 | |
| PROFESSIONAL SUMMARY | |
| Software Engineer with 3+ years of experience in full-stack development. | |
| TECHNICAL SKILLS | |
| Languages: Python, JavaScript, TypeScript | |
| Frontend: React, HTML5, CSS3 | |
| Backend: Node.js, Express | |
| Databases: PostgreSQL, MongoDB | |
| Tools: Git, Docker, AWS | |
| EXPERIENCE | |
| Software Engineer | TechCorp | 2021 - Present | |
| β’ Built web applications serving 100K+ users | |
| β’ Improved performance by 40% | |
| β’ Implemented CI/CD pipelines | |
| β’ Collaborated in Agile teams | |
| Junior Developer | StartupXYZ | 2020 - 2021 | |
| β’ Developed REST APIs | |
| β’ Created responsive UIs | |
| β’ Fixed bugs and added features | |
| EDUCATION | |
| BS Computer Science | State University | 2020 | |
| """ | |
| SAMPLE_JOB = """Position: Senior Full Stack Developer | |
| Required Skills: | |
| β’ React, TypeScript, JavaScript | |
| β’ Node.js, Express | |
| β’ MongoDB or PostgreSQL | |
| β’ REST API design | |
| β’ Git, Docker, AWS | |
| β’ Agile methodologies | |
| Experience: 3-5 years | |
| Responsibilities: | |
| β’ Design and develop web applications | |
| β’ Write clean, maintainable code | |
| β’ Code reviews and mentoring | |
| β’ Architecture decisions | |
| """ | |
| # Gradio interface | |
| with gr.Blocks(title="ATS Resume Optimizer", theme=gr.themes.Soft()) as demo: | |
| gr.Markdown(""" | |
| # π― ATS Resume Optimizer | |
| ### AI-Powered Resume Analysis | |
| Get instant feedback on your resume: | |
| - β **ATS Compatibility Score** | |
| - π **Missing Keywords** | |
| - π‘ **Optimization Suggestions** | |
| """) | |
| if not MODEL_LOADED: | |
| gr.Markdown(""" | |
| > β οΈ **Note:** Currently running with base model. Fine-tuned adapters couldn't be loaded. | |
| > The tool will still provide useful analysis but may be less specific. | |
| """) | |
| gr.Markdown("---") | |
| with gr.Row(): | |
| with gr.Column(): | |
| gr.Markdown("### π Your Resume") | |
| resume_input = gr.Textbox( | |
| label="Paste Resume", | |
| placeholder="Copy and paste your resume...", | |
| lines=12, | |
| value=SAMPLE_RESUME | |
| ) | |
| with gr.Column(): | |
| gr.Markdown("### πΌ Job Description") | |
| job_input = gr.Textbox( | |
| label="Paste Job Description", | |
| placeholder="Copy and paste job description...", | |
| lines=12, | |
| value=SAMPLE_JOB | |
| ) | |
| analyze_btn = gr.Button("π Analyze Resume", variant="primary", size="lg") | |
| gr.Markdown("### π Analysis Results") | |
| output = gr.Textbox( | |
| label="ATS Analysis", | |
| lines=15, | |
| show_copy_button=True | |
| ) | |
| gr.Markdown(""" | |
| --- | |
| ### π‘ How to Use | |
| 1. **Paste your resume** in the left box (or try the sample) | |
| 2. **Paste job description** in the right box | |
| 3. Click **"Analyze Resume"** | |
| 4. Wait 1-2 minutes for analysis | |
| ### π¬ About This Tool | |
| Built with Mistral-7B language model for intelligent resume analysis. | |
| Identifies missing keywords and provides actionable suggestions. | |
| **First analysis takes longer** as the model loads into memory. | |
| --- | |
| π» **Tech Stack:** PyTorch β’ Transformers β’ PEFT β’ Gradio | |
| π **Links:** [GitHub](#) | [LinkedIn](#) | [Portfolio](#) | |
| """) | |
| # Event | |
| analyze_btn.click( | |
| fn=analyze_resume, | |
| inputs=[resume_input, job_input], | |
| outputs=output | |
| ) | |
| print("\nπ Launching Gradio interface...") | |
| demo.launch() |