Spaces:
Runtime error
Runtime error
decoupling and differentiating the 2 different chains
Browse files- structured_apparatus_chain.py +113 -0
- structured_experiment_chain.py +121 -0
structured_apparatus_chain.py
ADDED
@@ -0,0 +1,113 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
#TODO: make a agent that uses HUMAMN as a tool to get:
|
3 |
+
- Purpose of science experiment
|
4 |
+
- What fields of study do they already know of
|
5 |
+
|
6 |
+
#IDEA: Platform generate more indepth experiments by generaing a data set and generate / collect scienfic data
|
7 |
+
|
8 |
+
### Chatbot
|
9 |
+
the chatbot helps the BOUNTY_BOARD_CHAIN generate science experiments
|
10 |
+
|
11 |
+
### EXPERIMENT and Provide feedback on experiments
|
12 |
+
|
13 |
+
### Interrgration
|
14 |
+
|
15 |
+
- I need to intergrate this code into the app. This includes creating an id for each post, and potentially and a comment section for each "Experiment"
|
16 |
+
- I addition i need to generate a mostly pinecone retriever to geenrate scientific experiments from the "community vectore search"
|
17 |
+
- potentially have prenium users store their private data, but i may not implement this during the hackathon
|
18 |
+
"""
|
19 |
+
|
20 |
+
# https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured
|
21 |
+
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
|
22 |
+
from langchain.prompts import PromptTemplate
|
23 |
+
from langchain_openai import ChatOpenAI
|
24 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
25 |
+
from langchain.memory import ConversationBufferMemory
|
26 |
+
from langchain_core.runnables import RunnablePassthrough
|
27 |
+
from langchain.retrievers import ArxivRetriever, pubmed
|
28 |
+
from langchain_core.output_parsers import StrOutputParser
|
29 |
+
from langchain.retrievers import ArxivRetriever
|
30 |
+
from langchain.retrievers import PubMedRetriever
|
31 |
+
from langchain.retrievers import WikipediaRetriever
|
32 |
+
from operator import itemgetter
|
33 |
+
|
34 |
+
|
35 |
+
response_schemas = [
|
36 |
+
ResponseSchema(name="Material", description="The base components needed to create this items from scratch DIY This item must be exact and not an estimation, also make sure each output has the obejcts name in context", type="list"),
|
37 |
+
ResponseSchema(name="Fields_of_study", description="List the field of study this can be used for", type="list"),
|
38 |
+
]
|
39 |
+
|
40 |
+
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
41 |
+
|
42 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
43 |
+
|
44 |
+
format_instructions = output_parser.get_format_instructions()
|
45 |
+
|
46 |
+
|
47 |
+
prompt = maker_prompt = PromptTemplate(
|
48 |
+
template="You must generate a well detailed list of items for creating a given item from scratch. \
|
49 |
+
Also describe the purpose for a text-to-3d model to use for extra context\n{format_instructions}\n{question}\n{context}",
|
50 |
+
input_variables=["question"],
|
51 |
+
partial_variables={"format_instructions": format_instructions},
|
52 |
+
memory = memory
|
53 |
+
)
|
54 |
+
|
55 |
+
|
56 |
+
def join_strings(*args: str) -> str:
|
57 |
+
"""
|
58 |
+
Join an arbitrary number of strings into one string.
|
59 |
+
|
60 |
+
Args:
|
61 |
+
*args: Variable number of strings to join.
|
62 |
+
|
63 |
+
Returns:
|
64 |
+
str: Joined string.
|
65 |
+
"""
|
66 |
+
return ''.join(args)
|
67 |
+
|
68 |
+
def format_docs(docs):
|
69 |
+
return "\n\n".join([join_strings(d.page_content, d.metadata['Entry ID'],d.metadata['Title'], ) for d in docs])
|
70 |
+
|
71 |
+
|
72 |
+
arxiv_retriever = ArxivRetriever(load_max_docs=2)
|
73 |
+
|
74 |
+
# model = ChatOpenAI(temperature=0)
|
75 |
+
model = ChatOpenAI(temperature=0,model="gpt-4")
|
76 |
+
|
77 |
+
|
78 |
+
retriver = arxiv_retriever = ArxivRetriever(load_max_docs=2)
|
79 |
+
|
80 |
+
pub_med_retriever = PubMedRetriever()
|
81 |
+
|
82 |
+
wikipedia_retriever = WikipediaRetriever()
|
83 |
+
|
84 |
+
arxiv_chain = (
|
85 |
+
{"context": arxiv_retriever, "question": RunnablePassthrough()}
|
86 |
+
| prompt
|
87 |
+
| model
|
88 |
+
| output_parser
|
89 |
+
)
|
90 |
+
|
91 |
+
pub_med_chain = (
|
92 |
+
{"context": pub_med_retriever, "question": RunnablePassthrough()}
|
93 |
+
| prompt
|
94 |
+
| model
|
95 |
+
| output_parser
|
96 |
+
)
|
97 |
+
|
98 |
+
wikipedia_chain = (
|
99 |
+
{"context": wikipedia_retriever, "question": RunnablePassthrough()}
|
100 |
+
| prompt
|
101 |
+
| model
|
102 |
+
| output_parser
|
103 |
+
)
|
104 |
+
|
105 |
+
|
106 |
+
|
107 |
+
|
108 |
+
if __name__ == "__main__":
|
109 |
+
query = "MicroScope"
|
110 |
+
pub_med_data = pub_med_chain.invoke(query)
|
111 |
+
wiki_data = wikipedia_chain.invoke(query)
|
112 |
+
|
113 |
+
x=0
|
structured_experiment_chain.py
ADDED
@@ -0,0 +1,121 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
"""
|
2 |
+
#TODO: make a agent that uses HUMAMN as a tool to get:
|
3 |
+
- Purpose of science experiment
|
4 |
+
- What fields of study do they already know of
|
5 |
+
|
6 |
+
#IDEA: Platform generate more indepth experiments by generaing a data set and generate / collect scienfic data
|
7 |
+
|
8 |
+
### Chatbot
|
9 |
+
the chatbot helps the BOUNTY_BOARD_CHAIN generate science experiments
|
10 |
+
|
11 |
+
### EXPERIMENT and Provide feedback on experiments
|
12 |
+
|
13 |
+
### Interrgration
|
14 |
+
|
15 |
+
- I need to intergrate this code into the app. This includes creating an id for each post, and potentially and a comment section for each "Experiment"
|
16 |
+
- I addition i need to generate a mostly pinecone retriever to geenrate scientific experiments from the "community vectore search"
|
17 |
+
- potentially have prenium users store their private data, but i may not implement this during the hackathon
|
18 |
+
"""
|
19 |
+
|
20 |
+
# https://python.langchain.com/docs/modules/model_io/output_parsers/types/structured
|
21 |
+
from langchain.output_parsers import ResponseSchema, StructuredOutputParser
|
22 |
+
from langchain.prompts import PromptTemplate
|
23 |
+
from langchain_openai import ChatOpenAI
|
24 |
+
from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
|
25 |
+
from langchain.memory import ConversationBufferMemory
|
26 |
+
from langchain_core.runnables import RunnablePassthrough
|
27 |
+
from langchain.retrievers import ArxivRetriever, pubmed
|
28 |
+
from langchain_core.output_parsers import StrOutputParser
|
29 |
+
from langchain.retrievers import ArxivRetriever
|
30 |
+
from langchain.retrievers import PubMedRetriever
|
31 |
+
from langchain.retrievers import WikipediaRetriever
|
32 |
+
from operator import itemgetter
|
33 |
+
|
34 |
+
response_schemas = [
|
35 |
+
ResponseSchema(name="Experiment_Name", description="the name given to the experiment"),
|
36 |
+
ResponseSchema(name="Material", description="list of materials need to perfrom the experiments", type="list"),
|
37 |
+
ResponseSchema(name="Sources", description="list of sources where the information was retrievered from", type="list"),
|
38 |
+
ResponseSchema(name="Protocal", description="detailed instructions On how to make the item or perform the experiment", type="list"),
|
39 |
+
ResponseSchema(name="Fields_of_study", description="the fields of study that his experiment uses", type="list"),
|
40 |
+
ResponseSchema(name="Purpose_of_Experiments", description="assume what the user is trying to acchieve"),
|
41 |
+
ResponseSchema(name="Safety_Precuation", description="What does the User need to know to avoid any potential harm"),
|
42 |
+
ResponseSchema(name="Level_of_Difficulty", description="How difficult is it to perform this experiment ecample beginner, novice, Intermidiate, Hard "),
|
43 |
+
#
|
44 |
+
]
|
45 |
+
|
46 |
+
|
47 |
+
output_parser = StructuredOutputParser.from_response_schemas(response_schemas)
|
48 |
+
|
49 |
+
memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
|
50 |
+
|
51 |
+
format_instructions = output_parser.get_format_instructions()
|
52 |
+
|
53 |
+
|
54 |
+
prompt = maker_prompt = PromptTemplate(
|
55 |
+
template="You must generate a well detailed list of items for creating a given item from scratch. \
|
56 |
+
Also describe the purpose for a text-to-3d model to use for extra context\n{format_instructions}\n{question}\n{context}",
|
57 |
+
input_variables=["question"],
|
58 |
+
partial_variables={"format_instructions": format_instructions},
|
59 |
+
memory = memory
|
60 |
+
)
|
61 |
+
|
62 |
+
|
63 |
+
def join_strings(*args: str) -> str:
|
64 |
+
"""
|
65 |
+
Join an arbitrary number of strings into one string.
|
66 |
+
|
67 |
+
Args:
|
68 |
+
*args: Variable number of strings to join.
|
69 |
+
|
70 |
+
Returns:
|
71 |
+
str: Joined string.
|
72 |
+
"""
|
73 |
+
return ''.join(args)
|
74 |
+
|
75 |
+
def format_docs(docs):
|
76 |
+
return "\n\n".join([join_strings(d.page_content, d.metadata['Entry ID'],d.metadata['Title'], ) for d in docs])
|
77 |
+
|
78 |
+
|
79 |
+
arxiv_retriever = ArxivRetriever(load_max_docs=2)
|
80 |
+
|
81 |
+
# model = ChatOpenAI(temperature=0)
|
82 |
+
model = ChatOpenAI(temperature=0,model="gpt-4")
|
83 |
+
|
84 |
+
|
85 |
+
retriver = arxiv_retriever = ArxivRetriever(load_max_docs=2)
|
86 |
+
|
87 |
+
pub_med_retriever = PubMedRetriever()
|
88 |
+
|
89 |
+
wikipedia_retriever = WikipediaRetriever()
|
90 |
+
|
91 |
+
arxiv_chain = (
|
92 |
+
{"context": arxiv_retriever, "question": RunnablePassthrough()}
|
93 |
+
| prompt
|
94 |
+
| model
|
95 |
+
| output_parser
|
96 |
+
)
|
97 |
+
|
98 |
+
pub_med_chain = (
|
99 |
+
{"context": pub_med_retriever, "question": RunnablePassthrough()}
|
100 |
+
| prompt
|
101 |
+
| model
|
102 |
+
| output_parser
|
103 |
+
)
|
104 |
+
|
105 |
+
wikipedia_chain = (
|
106 |
+
{"context": wikipedia_retriever, "question": RunnablePassthrough()}
|
107 |
+
| prompt
|
108 |
+
| model
|
109 |
+
| output_parser
|
110 |
+
)
|
111 |
+
|
112 |
+
|
113 |
+
|
114 |
+
|
115 |
+
if __name__ == "__main__":
|
116 |
+
query = "how to create alectronoic on a cellulose subtstrate"
|
117 |
+
pub_med_data = pub_med_chain.invoke(query)
|
118 |
+
wiki_data = wikipedia_chain.invoke(query)
|
119 |
+
|
120 |
+
x=0
|
121 |
+
|