hayuh commited on
Commit
6be2d6b
·
verified ·
1 Parent(s): a5d0a91

Upload 17 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,9 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
 
 
 
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ Ehlers-Danlos-1/2024_EDS_2.pdf filter=lfs diff=lfs merge=lfs -text
37
+ Ehlers-Danlos-1/2024_EDS_3.pdf filter=lfs diff=lfs merge=lfs -text
38
+ Ehlers-Danlos-1/2024_EDS_4.pdf filter=lfs diff=lfs merge=lfs -text
39
+ Ehlers-Danlos-1/2024_EDS_5.pdf filter=lfs diff=lfs merge=lfs -text
40
+ Ehlers-Danlos-1/Unknown_EDS_1.pdf filter=lfs diff=lfs merge=lfs -text
41
+ Ehlers-Danlos-1/Unknown_EDS_5.pdf filter=lfs diff=lfs merge=lfs -text
Ehlers-Danlos-1/2024_EDS_1.pdf ADDED
The diff for this file is too large to render. See raw diff
 
Ehlers-Danlos-1/2024_EDS_2.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:46fc736ff4174473e0a846b7ca8430c140d89cd2c9f663e105bc48b33f8d9c99
3
+ size 2616000
Ehlers-Danlos-1/2024_EDS_3.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7fef5c8c375297158ad7ad63166405ca7ce4ac511371a8454fe9df972755b0fe
3
+ size 10344738
Ehlers-Danlos-1/2024_EDS_4.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:25db35c77fd6aeba6b15278671a462b30ffbb6f97eb5f221e0459f6d11c0f8ed
3
+ size 1071576
Ehlers-Danlos-1/2024_EDS_5.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:57ef98bcb445da6abda66de35204634bd81d8c6dcdf53bfc3be54447ec9ad0ad
3
+ size 2772421
Ehlers-Danlos-1/2024_EDS_6.pdf ADDED
Binary file (146 kB). View file
 
Ehlers-Danlos-1/2024_EDS_7.pdf ADDED
The diff for this file is too large to render. See raw diff
 
Ehlers-Danlos-1/Unknown_EDS_1.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dbeaf13d3298a00bc1c7acfba3177a0c639f677e0f0941452709fe60542052d4
3
+ size 21553835
Ehlers-Danlos-1/Unknown_EDS_2.pdf ADDED
Binary file (428 kB). View file
 
Ehlers-Danlos-1/Unknown_EDS_3.pdf ADDED
Binary file (817 kB). View file
 
Ehlers-Danlos-1/Unknown_EDS_4.pdf ADDED
Binary file (392 kB). View file
 
Ehlers-Danlos-1/Unknown_EDS_5.pdf ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:5c5a77524b6bb4dca40798af5ff3e3c622216a13ac21a60d9befce255977b47a
3
+ size 1847313
app.py ADDED
@@ -0,0 +1,104 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import glob
3
+ from pathlib import Path
4
+ import gradio as gr
5
+ import nest_asyncio
6
+ import dill as pickle
7
+ import streamlit as st
8
+
9
+ # Ensure async compatibility in Jupyter
10
+ nest_asyncio.apply()
11
+
12
+ # Import OpenAI key with helper function
13
+ from helper import get_openai_api_key
14
+ OPENAI_API_KEY = get_openai_api_key()
15
+
16
+ # Define the path to the directory containing the PDF files
17
+ folder_path = 'Ehlers-Danlos-1'
18
+
19
+ # Get the list of all PDF files in the directory
20
+ pdf_files = glob.glob(os.path.join(folder_path, '*.pdf'))
21
+ print(pdf_files)
22
+
23
+ # Extract just the filenames (optional)
24
+ pdf_filenames = [os.path.basename(pdf) for pdf in pdf_files]
25
+ print(pdf_filenames)
26
+
27
+ # Import utilities
28
+ from utils import get_doc_tools
29
+
30
+ # Truncate function names if necessary
31
+ def truncate_function_name(name, max_length=64):
32
+ return name if len(name) <= max_length else name[:max_length]
33
+
34
+ # Path to save/load serialized tools
35
+ tools_cache_path = 'tools_cache.pkl'
36
+
37
+ # Initialize paper_to_tools_dict
38
+ paper_to_tools_dict = {}
39
+
40
+ # Check if the cache file exists and is not empty
41
+ if os.path.exists(tools_cache_path) and os.path.getsize(tools_cache_path) > 0:
42
+ try:
43
+ with open(tools_cache_path, 'rb') as f:
44
+ paper_to_tools_dict = pickle.load(f)
45
+ except EOFError:
46
+ print("Cache file is corrupted. Recreating tools.")
47
+ paper_to_tools_dict = {}
48
+ else:
49
+ print("Cache file does not exist or is empty. Recreating tools.")
50
+
51
+ # Create tools for each PDF if not loaded from cache
52
+ if not paper_to_tools_dict:
53
+ for pdf in pdf_files:
54
+ print(f"Getting tools for paper: {pdf}")
55
+ vector_tool, summary_tool = get_doc_tools(pdf, Path(pdf).stem)
56
+ paper_to_tools_dict[pdf] = [vector_tool, summary_tool]
57
+
58
+ # Save tools to cache
59
+ with open(tools_cache_path, 'wb') as f:
60
+ pickle.dump(paper_to_tools_dict, f)
61
+
62
+
63
+ # Combine all tools into a single list
64
+ all_tools = [t for pdf in pdf_files for t in paper_to_tools_dict[pdf]]
65
+
66
+ # Define an object index and retriever over these tools
67
+ from llama_index.core import VectorStoreIndex
68
+ from llama_index.core.objects import ObjectIndex
69
+
70
+ obj_index = ObjectIndex.from_objects(
71
+ all_tools,
72
+ index_cls=VectorStoreIndex,
73
+ )
74
+
75
+ obj_retriever = obj_index.as_retriever(similarity_top_k=3)
76
+
77
+ # Initialize the OpenAI LLM
78
+ from llama_index.llms.openai import OpenAI
79
+ llm = OpenAI(model="gpt-3.5-turbo")
80
+
81
+ # Set up the agent
82
+ from llama_index.core.agent import FunctionCallingAgentWorker
83
+ from llama_index.core.agent import AgentRunner
84
+
85
+ agent_worker = FunctionCallingAgentWorker.from_tools(
86
+ tool_retriever=obj_retriever,
87
+ llm=llm,
88
+ verbose=True
89
+ )
90
+ agent = AgentRunner(agent_worker)
91
+
92
+ # Define the function to query the agent
93
+ def ask_agent(question):
94
+ response = agent.query(question)
95
+ return str(response)
96
+
97
+ # Streamlit interface
98
+ st.title("EDS Research Agent")
99
+
100
+ question = st.text_input("Ask a question:")
101
+ if question:
102
+ answer = ask_agent(question)
103
+ st.write(answer)
104
+
helper.py ADDED
@@ -0,0 +1,13 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Add your utilities or helper functions to this file.
2
+
3
+ import os
4
+ from dotenv import load_dotenv, find_dotenv
5
+
6
+ # these expect to find a .env file at the directory above the lesson. # the format for that file is (without the comment) #API_KEYNAME=AStringThatIsTheLongAPIKeyFromSomeService
7
+ def load_env():
8
+ _ = load_dotenv(find_dotenv())
9
+
10
+ def get_openai_api_key():
11
+ load_env()
12
+ openai_api_key = os.getenv("OPENAI_API_KEY")
13
+ return openai_api_key
ragas_eval.py ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import sys
3
+ from helper import get_openai_api_key
4
+
5
+ venv_path = os.path.join(os.path.dirname(__file__), 'venv', 'Lib', 'python3.12', 'site-packages')
6
+ sys.path.append(venv_path)
7
+
8
+ os.environ["OPENAI_API_KEY"] = get_openai_api_key()
9
+
10
+ from langchain_community.document_loaders import DirectoryLoader
11
+ loader = DirectoryLoader("Ehlers-Danlos-1")
12
+ documents = loader.load()
13
+
14
+ for document in documents:
15
+ document.metadata['filename'] = document.metadata['source']
16
+
17
+ from ragas.testset.generator import TestsetGenerator
18
+ from ragas.testset.evolutions import simple, reasoning, multi_context
19
+ from langchain_openai import ChatOpenAI, OpenAIEmbeddings
20
+
21
+ # generator with openai models
22
+ generator_llm = ChatOpenAI(model="gpt-3.5-turbo")
23
+ critic_llm = ChatOpenAI(model="gpt-4")
24
+ embeddings = OpenAIEmbeddings()
25
+
26
+ generator = TestsetGenerator.from_langchain(
27
+ generator_llm,
28
+ critic_llm,
29
+ embeddings
30
+ )
31
+
32
+ # generate testset
33
+ testset = generator.generate_with_langchain_docs(documents, test_size=10, distributions={simple: 0.5, reasoning: 0.25, multi_context: 0.25})
34
+ print("!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
35
+ print(testset)
36
+ testset.to_pandas()
test.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import sys
2
+ import os
3
+
4
+ # Add the virtual environment's site-packages to sys.path
5
+ # Replace 'pythonX.Y' with your Python version, e.g., 'python3.8'
6
+ venv_path = os.path.join(os.path.dirname(__file__), 'venv', 'lib', 'site-packages')
7
+ sys.path.append(venv_path)
8
+
9
+ # Ensure the directory structure is recognized as a package
10
+ # You can verify by listing the contents of the directory
11
+ print("sys.path:", sys.path)
12
+ print("Contents of venv_path:", os.listdir(venv_path))
13
+
14
+ # Now import the TestsetGenerator
15
+ try:
16
+ from ragas.testset.generator import TestsetGenerator
17
+ print("Successfully imported TestsetGenerator.")
18
+ except ImportError as e:
19
+ print("ImportError:", e)
20
+
21
+ # Use the imported function or class
22
+ try:
23
+ generator = TestsetGenerator()
24
+ print("Successfully created a TestsetGenerator instance.")
25
+ except Exception as e:
26
+ print("Error creating TestsetGenerator instance:", e)
utils.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from llama_index.core import SimpleDirectoryReader
2
+ from llama_index.core.node_parser import SentenceSplitter
3
+ from llama_index.core import Settings
4
+ from llama_index.llms.openai import OpenAI
5
+ from llama_index.embeddings.openai import OpenAIEmbedding
6
+ from llama_index.core import SummaryIndex, VectorStoreIndex
7
+ from llama_index.core.tools import QueryEngineTool
8
+ from llama_index.core.query_engine.router_query_engine import RouterQueryEngine
9
+ from llama_index.core.selectors import LLMSingleSelector
10
+
11
+ from llama_index.core import SimpleDirectoryReader, VectorStoreIndex, SummaryIndex
12
+ from llama_index.core.node_parser import SentenceSplitter
13
+ from llama_index.core.tools import FunctionTool, QueryEngineTool
14
+ from llama_index.core.vector_stores import MetadataFilters, FilterCondition
15
+ from typing import List, Optional
16
+
17
+
18
+
19
+ def get_doc_tools(
20
+ file_path: str,
21
+ name: str,
22
+ ) -> str:
23
+ """Get vector query and summary query tools from a document."""
24
+
25
+ # load documents
26
+ documents = SimpleDirectoryReader(input_files=[file_path]).load_data()
27
+ splitter = SentenceSplitter(chunk_size=1024)
28
+ nodes = splitter.get_nodes_from_documents(documents)
29
+ vector_index = VectorStoreIndex(nodes)
30
+
31
+ def vector_query(
32
+ query: str,
33
+ page_numbers: Optional[List[str]] = None
34
+ ) -> str:
35
+ """Use to answer questions over a given paper.
36
+
37
+ Useful if you have specific questions over the paper.
38
+ Always leave page_numbers as None UNLESS there is a specific page you want to search for.
39
+
40
+ Args:
41
+ query (str): the string query to be embedded.
42
+ page_numbers (Optional[List[str]]): Filter by set of pages. Leave as NONE
43
+ if we want to perform a vector search
44
+ over all pages. Otherwise, filter by the set of specified pages.
45
+
46
+ """
47
+
48
+ page_numbers = page_numbers or []
49
+ metadata_dicts = [
50
+ {"key": "page_label", "value": p} for p in page_numbers
51
+ ]
52
+
53
+ query_engine = vector_index.as_query_engine(
54
+ similarity_top_k=2,
55
+ filters=MetadataFilters.from_dicts(
56
+ metadata_dicts,
57
+ condition=FilterCondition.OR
58
+ )
59
+ )
60
+ response = query_engine.query(query)
61
+ return response
62
+
63
+
64
+ vector_query_tool = FunctionTool.from_defaults(
65
+ name=f"vector_tool_{name}",
66
+ fn=vector_query
67
+ )
68
+
69
+ summary_index = SummaryIndex(nodes)
70
+ summary_query_engine = summary_index.as_query_engine(
71
+ response_mode="tree_summarize",
72
+ use_async=True,
73
+ )
74
+ summary_tool = QueryEngineTool.from_defaults(
75
+ name=f"summary_tool_{name}",
76
+ query_engine=summary_query_engine,
77
+ description=(
78
+ f"Useful for summarization questions related to {name}"
79
+ ),
80
+ )
81
+
82
+ return vector_query_tool, summary_tool