File size: 4,945 Bytes
d1ca2ad
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
import os
os.environ["TOKENIZERS_PARALLELISM"] = "false"

import streamlit as st
from st_aggrid import AgGrid
import pandas as pd 
from transformers import pipeline, T5ForConditionalGeneration, T5Tokenizer

st.set_page_config(layout="wide")

style = '''
    <style>
        body {background-color: #F5F5F5; color: #000000;}
        header {visibility: hidden;}
        div.block-container {padding-top:4rem;}
        section[data-testid="stSidebar"] div:first-child {
        padding-top: 0;
    }
     .font {                                          
    text-align:center;
    font-family:sans-serif;font-size: 1.25rem;}
    </style>
'''
st.markdown(style, unsafe_allow_html=True)

st.markdown('<p style="font-family:sans-serif;font-size: 1.9rem;"> HertogAI Q&A table V1 using TAPAS and Text Generated</p>', unsafe_allow_html=True)
st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'>Pre-trained TAPAS model runs on max 64 rows and 32 columns data. Make sure the file data doesn't exceed these dimensions.</p>", unsafe_allow_html=True)

# Initialize TAPAS and Hugging Face Model (T5 for NLP generation)
tqa = pipeline(task="table-question-answering", 
              model="google/tapas-large-finetuned-wtq",
              device="cpu")

model_name = "t5-small"  # You can use a larger model or GPT as needed
tokenizer = T5Tokenizer.from_pretrained(model_name)
model = T5ForConditionalGeneration.from_pretrained(model_name)

# Function to generate natural language from TAPAS output
def generate_nlp_from_tapas(tapas_output, df):
    """
    Use Hugging Face's T5 model to generate natural language text from TAPAS output.
    """
    try:
        # Construct prompt using TAPAS output
        answer = tapas_output['answer']
        coordinates = tapas_output['coordinates']
        answer_data = [df.iloc[row, col] for row, col in coordinates]

        # Format the prompt for NLP model
        prompt = f"Answer: {answer}. Data Location: Rows {coordinates}, Values: {answer_data}. Please summarize this information in a natural language sentence."

        # Tokenize input and generate response
        inputs = tokenizer.encode(prompt, return_tensors="pt", truncation=True, max_length=512)
        outputs = model.generate(inputs, max_length=100, num_beams=5, early_stopping=True)

        # Decode and return the generated response
        response = tokenizer.decode(outputs[0], skip_special_tokens=True)
        return response
    except Exception as e:
        return f"Error generating response: {str(e)}"


file_name = st.sidebar.file_uploader("Upload file:", type=['csv', 'xlsx'])

if file_name is None:
    st.markdown('<p class="font">Please upload an excel or csv file </p>', unsafe_allow_html=True)
else:
    try:
        # Check file type and handle reading accordingly
        if file_name.name.endswith('.csv'):
            df = pd.read_csv(file_name, sep=';', encoding='ISO-8859-1')  # Adjust encoding if needed
        elif file_name.name.endswith('.xlsx'):
            df = pd.read_excel(file_name, engine='openpyxl')  # Use openpyxl to read .xlsx files
        else:
            st.error("Unsupported file type")
            df = None

        # Continue with further processing if df is loaded
        if df is not None:
            numeric_columns = df.select_dtypes(include=['object']).columns
            for col in numeric_columns:
                df[col] = pd.to_numeric(df[col], errors='ignore')

            st.write("Original Data:")
            st.write(df)

            # Create a copy for numerical operations
            df_numeric = df.copy()
            df = df.astype(str)

            grid_response = AgGrid(
                df.head(5),
                columns_auto_size_mode='FIT_CONTENTS',
                editable=True, 
                height=300, 
                width='100%',
            )
            
    except Exception as e:
        st.error(f"Error reading file: {str(e)}")

    question = st.text_input('Type your question')
    
    with st.spinner():
        if(st.button('Answer')):
            try:
                # Get the raw answer from TAPAS
                raw_answer = tqa(table=df, query=question, truncation=True)
                
                st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Raw Result: </p>", unsafe_allow_html=True)
                st.success(raw_answer)
                
                # Use Hugging Face's T5 model to generate NLP text from TAPAS output
                final_answer = generate_nlp_from_tapas(raw_answer, df)

                # Display the generated answer in a simple format
                st.markdown("<p style='font-family:sans-serif;font-size: 0.9rem;'> Generated Answer: </p>", unsafe_allow_html=True)
                st.success(final_answer)

            except Exception as e:
                st.warning(f"Error: {str(e)} - Please retype your question and ensure it is correctly formatted.")