loubnabnl's picture
loubnabnl HF staff
make blog
99db140
raw
history blame
1.41 kB
import json
import pandas as pd
import requests
from multiprocessing import Pool
from functools import partial
import streamlit as st
GITHUB_CODE = "https://huggingface.co./datasets/lvwerra/github-code"
INCODER_IMG = (
"https://huggingface.co./datasets/loubnabnl/repo-images/raw/main/incoder.png"
)
@st.cache()
def load_examples():
with open("utils/examples.json", "r") as f:
examples = json.load(f)
return examples
def generate_code(model_name, gen_prompt, max_new_tokens, temperature, seed):
url = (
f"https://hf.space/embed/loubnabnl/{model_name.lower()}-subspace/+/api/predict/"
)
r = requests.post(
url=url, json={"data": [gen_prompt, max_new_tokens, temperature, seed]}
)
generated_text = r.json()["data"][0]
return generated_text
st.set_page_config(page_icon=":laptop:", layout="wide")
st.sidebar.header("Models")
models = ["CodeParrot", "InCoder"]
selected_models = st.sidebar.multiselect(
"Select code generation models to compare", models, default=["CodeParrot"]
)
st.sidebar.header("Tasks")
tasks = [
" ",
"Pretraining datasets",
"Model architecture",
"Model evaluation",
"Code generation",
]
selected_task = st.sidebar.selectbox("Select a task", tasks)
if selected_task == " ":
st.title("Code Generation Models")
with open("utils/intro.txt", "r") as f:
intro = f.read()
st.markdown(intro)