Spaces:
Runtime error
Runtime error
pip install transformers | |
pip install transformers | |
pip install gradio | |
from transformers import AutoTokenizer, AutoModelForCausalLM | |
import gradio as gr | |
import logging | |
# Setup logging for better debugging | |
logging.basicConfig(level=logging.INFO) | |
logger = logging.getLogger(__name__) | |
def load_model_and_tokenizer(): | |
try: | |
# Load the tokenizer and model | |
tokenizer = AutoTokenizer.from_pretrained("Salesforce/codegen-350M-multi") | |
model = AutoModelForCausalLM.from_pretrained("Salesforce/codegen-350M-multi") | |
return tokenizer, model | |
except Exception as e: | |
logger.error(f"Failed to load model or tokenizer: {e}") | |
raise | |
# Initialize tokenizer and model | |
tokenizer, model = load_model_and_tokenizer() | |
def generate_code(prompt): | |
try: | |
# Tokenize the input text | |
input_ids = tokenizer(prompt, return_tensors="pt").input_ids | |
# Generate code based on the input text | |
generated_ids = model.generate( | |
input_ids, | |
max_length=200, # Adjust as needed | |
num_return_sequences=1, # Number of generated sequences to return | |
pad_token_id=tokenizer.eos_token_id # Handle padding tokens | |
) | |
# Decode the generated tokens to text | |
generated_code = tokenizer.decode(generated_ids[0], skip_special_tokens=True) | |
return generated_code | |
except Exception as e: | |
logger.error(f"Error during code generation: {e}") | |
return "Error generating code. Please check the logs." | |
# Define the Gradio interface | |
iface = gr.Interface( | |
fn=generate_code, | |
inputs=gr.Textbox(lines=2, placeholder="Enter your code prompt here..."), | |
outputs="text", | |
title="Code Generator", | |
description="Generate code snippets using the Salesforce CodeGen model." | |
) | |
# Launch the Gradio app | |
if __name__ == "__main__": | |
try: | |
iface.launch() | |
except Exception as e: | |
logger.error(f"Error launching the Gradio app: {e}") | |