File size: 2,832 Bytes
4289c7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1b424f6
 
 
 
4289c7d
 
 
 
1b424f6
4289c7d
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
import requests
import re
import pandas as pd
from ask_questions import answer_question
import numpy as np
import streamlit as st

df = pd.DataFrame()

def extract_url_from_string(string):
    return re.search("(?P<url>https?://[^\s]+)", string).group("url")

def process_data(data):
    audits = [data["lighthouseResult"]["audits"][i] for i in data["lighthouseResult"]["audits"]]
    audits_names = [i["title"] for i in audits]
    
    scoresdisplays = [data["lighthouseResult"]["audits"][i]["scoreDisplayMode"] for i in data["lighthouseResult"]["audits"]]

    df=pd.read_csv('processed/embeddings.csv', index_col=0)
    df['embeddings'] = df['embeddings'].apply(eval).apply(np.array)
    issues = []
    for i in audits:
        if i["scoreDisplayMode"] != "notApplicable" and (i["score"] != 1 and i["score"] != None) and "details" in i.keys() and i["scoreDisplayMode"] != "informative":
            title = i["title"]
            desc = i["description"]
            item = i["details"]["items"][0]
            typeOfIssue = i["details"]["type"]
            dicto = {"title": title, "description": desc, "item": item, "type": typeOfIssue}
            issues.append(dicto)
            print(title)
            print(i["details"]["type"])
            question = f"Title: {title}\nDescription: {desc}\nItem: {item}"
            #print(answer_question(df, question=question, debug=False))
            print("***********************************")
    return issues
            

def generate_response(website_url, url = "https://www.googleapis.com/pagespeedonline/v5/runPagespeed", api_key=st.secrets["page_speed_api_key"]):
    print("Website: " + website_url)
    print()
    name = website_url.split("//")[1].split(".")[1] # Get the name of the website

    params = {
        "url": website_url,
        "key": api_key,
        "category": ["performance", "accessibility", "best_practices", "seo"]
    }

    try:
        #output_file_path = f"Responses/{name}.json"
        if name not in st.session_state:
            st.session_state[name] = {}
        else:
            return st.session_state[name]
        response = requests.get(url, params=params)
        response.raise_for_status()  # Check for any request errors

        data = response.json()
        st.session_state[name] = data
        """
            with open(output_file_path, "w") as output_file:
                json.dump(data, output_file, indent=4)
        else:
            with open(output_file_path) as output_file:
                data = json.load(output_file)"""

        # Process the data as needed
        return data

    except requests.exceptions.RequestException as e:
        print("Error:", e)
#for i in list_of_urls:
#    data = generate_response(i)
#    process_data(data)
#https://chat.openai.com/share/71d7a128-b56d-4368-9eee-beda874e4200