|
|
|
import gradio as gr |
|
import torch |
|
from transformers import AutoTokenizer, AutoModelForCausalLM |
|
import json |
|
import os |
|
|
|
|
|
model_name = "Salesforce/codegen-2B-mono" |
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
device = torch.device("cpu") |
|
model.to(device) |
|
|
|
|
|
CACHE_FILE = "cache.json" |
|
cache = {} |
|
|
|
|
|
if os.path.exists(CACHE_FILE): |
|
with open(CACHE_FILE, "r") as f: |
|
cache = json.load(f) |
|
|
|
def code_assistant(prompt, language): |
|
|
|
if not prompt.strip(): |
|
return "⚠️ Error: The input prompt cannot be empty. Please provide a coding question or code snippet." |
|
if len(prompt) > 1024: |
|
return "⚠️ Error: The input prompt is too long. Please limit it to 1024 characters." |
|
|
|
|
|
cache_key = (prompt, language) |
|
if str(cache_key) in cache: |
|
return cache[str(cache_key)] |
|
|
|
|
|
if language: |
|
prompt = f"[{language}] {prompt}" |
|
|
|
|
|
inputs = tokenizer(prompt, return_tensors="pt").to(device) |
|
|
|
|
|
outputs = model.generate( |
|
inputs.input_ids, |
|
max_length=256, |
|
temperature=0.1, |
|
top_p=0.8, |
|
do_sample=True |
|
) |
|
|
|
|
|
generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
|
|
|
if len(cache) >= 10: |
|
cache.pop(next(iter(cache))) |
|
cache[str(cache_key)] = generated_text |
|
|
|
|
|
with open(CACHE_FILE, "w") as f: |
|
json.dump(cache, f) |
|
|
|
return generated_text |
|
|
|
|
|
css = """ |
|
/* Center-align all text in the input and output boxes */ |
|
input, textarea, .output_text { |
|
text-align: center; |
|
} |
|
|
|
/* Style the main title */ |
|
h1 { |
|
color: #1e90ff; |
|
font-family: 'Arial', sans-serif; |
|
text-align: center; |
|
font-weight: bold; |
|
} |
|
|
|
/* Style the description */ |
|
.description { |
|
color: #555; |
|
font-family: 'Arial', sans-serif; |
|
text-align: center; |
|
margin-bottom: 20px; |
|
} |
|
|
|
/* Output box animation */ |
|
.output_text { |
|
color: #1e90ff; |
|
animation: fadeIn 2s ease-in-out; |
|
} |
|
|
|
/* Add fade-in animation */ |
|
@keyframes fadeIn { |
|
0% { opacity: 0; } |
|
100% { opacity: 1; } |
|
} |
|
|
|
/* Hover effect for the submit button */ |
|
button { |
|
background-color: #1e90ff; |
|
color: white; |
|
font-weight: bold; |
|
border: none; |
|
padding: 10px 20px; |
|
border-radius: 5px; |
|
transition: background-color 0.3s ease; |
|
} |
|
|
|
button:hover { |
|
background-color: #104e8b; |
|
cursor: pointer; |
|
} |
|
""" |
|
|
|
|
|
title_html = """ |
|
<h1>💻 CodeBand: AI Code Assistant</h1> |
|
""" |
|
|
|
description_html = """ |
|
<p class="description">An AI-powered assistant for coding queries, debugging, and code generation. |
|
Choose a programming language for more tailored responses. Limited to 1024 characters.</p> |
|
""" |
|
|
|
|
|
iface = gr.Interface( |
|
fn=code_assistant, |
|
inputs=[ |
|
gr.Textbox(lines=5, placeholder="Ask a coding question or paste your code here..."), |
|
gr.Dropdown(choices=["Python", "JavaScript", "Java", "C++", "HTML", "CSS", "SQL", "Other"], label="Programming Language") |
|
], |
|
outputs="text", |
|
title=title_html, |
|
description=description_html, |
|
css=css |
|
) |
|
|
|
|
|
iface.launch() |
|
|