Spaces:
Runtime error
Runtime error
# import dependencies | |
import gradio as gr | |
from openai import OpenAI | |
import os | |
import re | |
# define the openai key | |
api_key = "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" | |
# make an instance of the openai client | |
client = OpenAI(api_key = api_key) | |
# finetuned model instance | |
finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# function to humanize the text | |
def humanize_text(AI_text): | |
"""Humanizes the provided AI text using the fine-tuned model.""" | |
response = completion = client.chat.completions.create( | |
model=finetuned_model, | |
temperature = 0.90, | |
messages=[ | |
{"role": "system", "content": """ | |
You are a text humanizer. | |
You humanize AI generated text. | |
The text must appear like humanly written. | |
THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
{"role": "user", "content": f"THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
{"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {AI_text}"} | |
] | |
) | |
humanized_text = response.choices[0].message.content.strip() | |
return humanized_text | |
# Gradio interface definition | |
interface = gr.Interface( | |
fn=humanize_text, | |
inputs="textbox", | |
outputs="textbox", | |
title="AI Text Humanizer: NoaiGPT.com Demo", | |
description="Enter AI-generated text and get a human-written version.", | |
) | |
# Launch the Gradio app | |
interface.launch(debug = True) | |
# # import gradio as gr | |
# # from openai import OpenAI | |
# # import os | |
# # import re | |
# # from transformers import pipeline | |
# # # define the openai key | |
# # api_key = "sk-proj-UCoZZMs4MyfyHwXdHjT8T3BlbkFJjYkSZyPfIPNqXfXwoekm" | |
# # # make an instance of the openai client | |
# # client = OpenAI(api_key = api_key) | |
# # # finetuned model instance | |
# # finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# # # Load the AI detection model | |
# # pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert") | |
# # # Define the function to get predictions | |
# # def get_prediction(text): | |
# # return pipe(text)[0] | |
# # # function to humanize the text | |
# # def humanize_text(AI_text): | |
# # """Humanizes the provided AI text using the fine-tuned model.""" | |
# # humanized_text = AI_text | |
# # attempts = 0 | |
# # max_attempts = 5 | |
# # while attempts < max_attempts: | |
# # response = client.chat.completions.create( | |
# # model=finetuned_model, | |
# # temperature=0.85, | |
# # messages=[ | |
# # {"role": "system", "content": """ | |
# # You are a text humanizer. | |
# # You humanize AI generated text. | |
# # The text must appear like humanly written. | |
# # THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
# # THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
# # {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
# # {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"} | |
# # ] | |
# # ) | |
# # humanized_text = response.choices[0].message.content.strip() | |
# # # Check if the humanized text is still detected as AI | |
# # prediction = get_prediction(humanized_text) | |
# # if prediction['label'] != 'AI': | |
# # break | |
# # attempts += 1 | |
# # return humanized_text | |
# # # Gradio interface definition | |
# # interface = gr.Interface( | |
# # fn=humanize_text, | |
# # inputs="textbox", | |
# # outputs="textbox", | |
# # title="AI Text Humanizer: NoaiGPT.com Demo", | |
# # description="Enter AI-generated text and get a human-written version.", | |
# # ) | |
# # # Launch the Gradio app | |
# # interface.launch(debug=True) | |
# import gradio as gr | |
# from openai import OpenAI | |
# import os | |
# import re | |
# from transformers import pipeline | |
# # define the openai key | |
# api_key = "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" | |
# # make an instance of the openai client | |
# client = OpenAI(api_key=api_key) | |
# # finetuned model instance | |
# finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# # Load the AI detection model | |
# pipe = pipeline("text-classification", model="tommyliphys/ai-detector-distilbert") | |
# # Define the function to get predictions | |
# def get_prediction(text): | |
# return pipe(text)[0] | |
# # Function to clean the text | |
# def clean_text(text): | |
# # Remove double asterisks | |
# text = re.sub(r'\*\*', '', text) | |
# # Remove double hash symbols | |
# text = re.sub(r'##', '', text) | |
# return text | |
# # function to humanize the text | |
# def humanize_text(AI_text): | |
# """Humanizes the provided AI text using the fine-tuned model.""" | |
# humanized_text = AI_text | |
# attempts = 0 | |
# max_attempts = 10 | |
# while attempts < max_attempts: | |
# response = client.chat.completions.create( | |
# model=finetuned_model, | |
# temperature=0.90, | |
# messages=[ | |
# {"role": "system", "content": """ | |
# You are a text humanizer. | |
# You humanize AI generated text. | |
# The text must appear like humanly written. | |
# THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
# THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
# {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
# {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"} | |
# ] | |
# ) | |
# humanized_text = response.choices[0].message.content.strip() | |
# # Check if the humanized text is still detected as AI | |
# prediction = get_prediction(humanized_text) | |
# if prediction['label'] == 'human' and prediction['score'] > 0.9: | |
# break | |
# attempts += 1 | |
# # Clean the humanized text | |
# cleaned_text = clean_text(humanized_text) | |
# return cleaned_text | |
# # Gradio interface definition | |
# interface = gr.Interface( | |
# fn=humanize_text, | |
# inputs="textbox", | |
# outputs="textbox", | |
# title="AI Text Humanizer: NoaiGPT.com Demo", | |
# description="Enter AI-generated text and get a human-written version.", | |
# ) | |
# # Launch the Gradio app | |
# interface.launch(debug=True) | |
# import gradio as gr | |
# from openai import OpenAI | |
# import os | |
# import re | |
# # define the openai key | |
# api_key = "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" | |
# # make an instance of the openai client | |
# client = OpenAI(api_key=api_key) | |
# # finetuned model instance | |
# finetuned_model = "ft:gpt-3.5-turbo-0125:personal::9qGC8cwZ" | |
# # Function to clean the text | |
# def clean_text(text): | |
# # Remove double asterisks | |
# text = re.sub(r'\*\*', '', text) | |
# # Remove double hash symbols | |
# text = re.sub(r'##', '', text) | |
# return text | |
# # function to humanize the text | |
# def humanize_text(AI_text): | |
# """Humanizes the provided AI text using the fine-tuned model.""" | |
# humanized_text = AI_text | |
# attempts = 0 | |
# max_attempts = 10 | |
# while attempts < max_attempts: | |
# response = client.chat.completions.create( | |
# model=finetuned_model, | |
# temperature=0.90, | |
# messages=[ | |
# {"role": "system", "content": """ | |
# You are a text humanizer. | |
# You humanize AI generated text. | |
# The text must appear like humanly written. | |
# THE INPUT AND THE OUTPUT TEXT SHOULD HAVE THE SAME FORMAT. | |
# THE HEADINGS AND THE BULLETS IN THE INPUT SHOULD REMAIN IN PLACE"""}, | |
# {"role": "user", "content": "THE LANGUAGE OF THE INPUT AND THE OUTPUT MUST BE SAME. THE SENTENCES SHOULD NOT BE SHORT LENGTH - THEY SHOULD BE SAME AS IN THE INPUT. ALSO THE PARAGRAPHS SHOULD NOT BE SHORT EITHER - PARAGRAPHS MUST HAVE THE SAME LENGTH"}, | |
# {"role": "user", "content": f"Humanize the text. Keep the output format i.e. the bullets and the headings as it is and dont use the list of words that are not permissible. \nTEXT: {humanized_text}"} | |
# ] | |
# ) | |
# humanized_text = response.choices[0].message.content.strip() | |
# attempts += 1 | |
# # Clean the humanized text | |
# cleaned_text = clean_text(humanized_text) | |
# return cleaned_text | |
# # Gradio interface definition | |
# interface = gr.Interface( | |
# fn=humanize_text, | |
# inputs="textbox", | |
# outputs="textbox", | |
# title="AI Text Humanizer: NoaiGPT.com Demo", | |
# description="Enter AI-generated text and get a human-written version.", | |
# ) | |
# # Launch the Gradio app | |
# interface.launch(debug=True) |