BlooperDodge commited on
Commit
a8c2ce4
·
verified ·
1 Parent(s): de8dd35

Upload 8 files

Browse files
Medcleave.iml ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ <?xml version="1.0" encoding="UTF-8"?>
2
+ <module type="PYTHON_MODULE" version="4">
3
+ <component name="NewModuleRootManager" inherit-compiler-output="true">
4
+ <exclude-output />
5
+ <content url="file://$MODULE_DIR$" />
6
+ <orderEntry type="jdk" jdkName="Python 3.12 (Medcleave)" jdkType="Python SDK" />
7
+ <orderEntry type="sourceFolder" forTests="false" />
8
+ </component>
9
+ </module>
app.py ADDED
@@ -0,0 +1,176 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import faiss
2
+ import numpy as np
3
+ import torch
4
+ from transformers import AutoModel, AutoTokenizer, pipeline
5
+ import requests
6
+ from bs4 import BeautifulSoup
7
+ import os
8
+ import gradio as gr
9
+
10
+ # Step 1: Define PromptTemplate class using LangChain's format
11
+ class PromptTemplate:
12
+ def __init__(self, template):
13
+ self.template = template
14
+
15
+ def format(self, **kwargs):
16
+ formatted_text = self.template
17
+ for key, value in kwargs.items():
18
+ formatted_text = formatted_text.replace("{" + key + "}", str(value))
19
+ return formatted_text
20
+
21
+ # Step 2: Load embedding model and tokenizer
22
+ embedding_model_name = "ls-da3m0ns/bge_large_medical"
23
+ embedding_tokenizer = AutoTokenizer.from_pretrained(embedding_model_name)
24
+ embedding_model = AutoModel.from_pretrained(embedding_model_name)
25
+ embedding_model.eval() # Set model to evaluation mode
26
+
27
+ # Move the embedding model to GPU
28
+ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
29
+ embedding_model.to(device)
30
+
31
+ # Step 3: Load Faiss index
32
+ index_file = "faiss_index.index"
33
+ if os.path.exists(index_file):
34
+ index = faiss.read_index(index_file)
35
+ assert isinstance(index, faiss.IndexFlat), "Expected Faiss IndexFlat type"
36
+ assert index.d == 1024, f"Expected index dimension 1024, but got {index.d}"
37
+ else:
38
+ raise ValueError(f"Faiss index file '{index_file}' not found.")
39
+
40
+ # Step 4: Prepare URLs
41
+ urls_file = "crawled_urls.txt"
42
+ if os.path.exists(urls_file):
43
+ with open(urls_file, "r") as f:
44
+ urls = [line.strip() for line in f]
45
+ else:
46
+ raise ValueError(f"URLs file '{urls_file}' not found.")
47
+
48
+ # Step 5: Check if sample embeddings file exists, if not create it
49
+ sample_embeddings_file = "sample_embeddings.npy"
50
+ if not os.path.exists(sample_embeddings_file):
51
+ print("Sample embeddings file not found, creating new sample embeddings...")
52
+ # Generate sample data to fit PCA
53
+ sample_texts = [
54
+ "medical diagnosis",
55
+ "healthcare treatment",
56
+ "patient care",
57
+ "clinical research",
58
+ "disease prevention"
59
+ ]
60
+
61
+ sample_embeddings = []
62
+ for text in sample_texts:
63
+ inputs = embedding_tokenizer(text, return_tensors="pt").to(device)
64
+ with torch.no_grad():
65
+ outputs = embedding_model(**inputs)
66
+ embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
67
+ sample_embeddings.append(embedding)
68
+
69
+ sample_embeddings = np.vstack(sample_embeddings)
70
+ np.save(sample_embeddings_file, sample_embeddings)
71
+ else:
72
+ sample_embeddings = np.load(sample_embeddings_file)
73
+
74
+ # Step 6: Define function for similarity search
75
+ def search_similar(query_text, top_k=3):
76
+ inputs = embedding_tokenizer(query_text, return_tensors="pt").to(device)
77
+ with torch.no_grad():
78
+ outputs = embedding_model(**inputs)
79
+ query_embedding = outputs.last_hidden_state.mean(dim=1).cpu().numpy()
80
+
81
+ query_embedding = query_embedding / np.linalg.norm(query_embedding)
82
+ query_embedding = query_embedding.reshape(1, -1).astype(np.float32)
83
+ _, idx = index.search(query_embedding, top_k)
84
+
85
+ results = []
86
+ for i in range(top_k):
87
+ key = int(idx[0][i])
88
+ results.append(urls[key]) # Return URLs only for simplicity
89
+
90
+ return results
91
+
92
+ # Step 7: Function to extract content from URLs
93
+ def extract_content(url):
94
+ try:
95
+ response = requests.get(url)
96
+ response.raise_for_status()
97
+ soup = BeautifulSoup(response.content, 'html.parser')
98
+
99
+ # Example: Extracting relevant content based on query
100
+ paragraphs = soup.find_all('p')
101
+ relevant_content = ""
102
+ for para in paragraphs:
103
+ relevant_content += para.get_text().strip()
104
+
105
+ return relevant_content.strip() # Return relevant content as a single string
106
+ except requests.RequestException as e:
107
+ print(f"Error fetching content from {url}: {e}")
108
+ return ""
109
+
110
+ # Step 8: Use the LangChain text generation pipeline for generating answers
111
+ generation_model_name = "microsoft/Phi-3-mini-4k-instruct"
112
+ text_generator = pipeline("text-generation", model=generation_model_name, device=0)
113
+
114
+ # Step 9: Function to generate answer based on query and content
115
+ def generate_answer(query, contents):
116
+ answers = []
117
+ prompt_template = PromptTemplate("""
118
+
119
+ ### Medical Assistant Context ###
120
+ As a helpful medical assistant, I'm here to assist you with your query.
121
+
122
+ ### Medical Query ###
123
+ Query: {query}
124
+
125
+ ### Explanation ###
126
+ {generated_text}
127
+
128
+ ### Revised Response ###
129
+ Response: {generated_text}
130
+ """)
131
+
132
+ for content in contents:
133
+ if content:
134
+ prompt = prompt_template.format(query=query, content=content, generated_text="")
135
+ # Ensure prompt is wrapped in a list for text generation
136
+ generated_texts = text_generator([prompt], max_new_tokens=200, num_return_sequences=1, truncation=True)
137
+ # Debugging: print the generated_texts object
138
+ #print(f"DEBUG: generated_texts: {generated_texts}")
139
+ # Ensure generated_texts is a list and not None
140
+ if generated_texts and isinstance(generated_texts, list) and len(generated_texts) > 0:
141
+ # Extract the response text only from the generated result
142
+ response = generated_texts[0][0]["generated_text"]
143
+ response_start = response.find("Response:") + len("Response:")
144
+ answers.append(response[response_start:].strip())
145
+ else:
146
+ answers.append("No AI-generated text found.")
147
+ else:
148
+ answers.append("No content available to generate an answer.")
149
+ return answers
150
+
151
+ # Gradio interface
152
+ def process_query(query):
153
+ top_results = search_similar(query, top_k=3)
154
+ if top_results:
155
+ content = extract_content(top_results[0])
156
+ answer = generate_answer(query, [content])[0]
157
+
158
+ response = f"Rank 1: URL - {top_results[0]}\n"
159
+ response += f"Generated Answer:\n{answer}\n"
160
+
161
+ similar_urls = "\n".join(top_results[1:]) # The second and third URLs as similar URLs
162
+ return response, similar_urls
163
+ else:
164
+ return "No results found.", "No similar URLs found."
165
+
166
+ demo = gr.Interface(
167
+ fn=process_query,
168
+ inputs=gr.Textbox(label="Enter your query"),
169
+ outputs=[
170
+ gr.Textbox(label="Generated Answer"),
171
+ gr.Textbox(label="Similar URLs")
172
+ ]
173
+ )
174
+
175
+ if __name__ == "__main__":
176
+ demo.launch(share=True)
crawled_contents.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e7d4abd213fc62a689e50e351eb69762b2c6a38e074832321fc4f5e498f59a4f
3
+ size 2373777
crawled_urls.txt ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ https://go.drugbank.com/drugs/DB00001
2
+ https://go.drugbank.com/drugs/DB00002
3
+ https://go.drugbank.com/drugs/DB00003
4
+ https://go.drugbank.com/drugs/DB00004
5
+ https://go.drugbank.com/drugs/DB00005
6
+ https://go.drugbank.com/drugs/DB00006
7
+ https://go.drugbank.com/drugs/DB00007
8
+ https://go.drugbank.com/drugs/DB00008
9
+ https://go.drugbank.com/drugs/DB00009
10
+ https://go.drugbank.com/drugs/DB00010
11
+ https://go.drugbank.com/drugs/DB00011
12
+ https://go.drugbank.com/drugs/DB00012
13
+ https://go.drugbank.com/drugs/DB00013
14
+ https://go.drugbank.com/drugs/DB00014
15
+ https://go.drugbank.com/drugs/DB00015
16
+ https://go.drugbank.com/drugs/DB00016
17
+ https://go.drugbank.com/drugs/DB00017
18
+ https://go.drugbank.com/drugs/DB00018
19
+ https://go.drugbank.com/drugs/DB00019
20
+ https://go.drugbank.com/drugs/DB00020
21
+ https://go.drugbank.com/drugs/DB00021
22
+ https://go.drugbank.com/drugs/DB00022
23
+ https://go.drugbank.com/drugs/DB00023
24
+ https://go.drugbank.com/drugs/DB00024
25
+ https://go.drugbank.com/drugs/DB00025
26
+ https://go.drugbank.com/drugs/DB00026
27
+ https://go.drugbank.com/drugs/DB00027
28
+ https://go.drugbank.com/drugs/DB00028
29
+ https://go.drugbank.com/drugs/DB00029
30
+ https://go.drugbank.com/drugs/DB00030
31
+ https://go.drugbank.com/drugs/DB00031
32
+ https://go.drugbank.com/drugs/DB00032
33
+ https://go.drugbank.com/drugs/DB00033
34
+ https://go.drugbank.com/drugs/DB00034
35
+ https://go.drugbank.com/drugs/DB00035
36
+ https://go.drugbank.com/drugs/DB00036
37
+ https://go.drugbank.com/drugs/DB00037
38
+ https://go.drugbank.com/drugs/DB00038
39
+ https://go.drugbank.com/drugs/DB00039
40
+ https://go.drugbank.com/drugs/DB00040
41
+ https://go.drugbank.com/drugs/DB00041
42
+ https://go.drugbank.com/drugs/DB00042
43
+ https://go.drugbank.com/drugs/DB00043
44
+ https://go.drugbank.com/drugs/DB00044
45
+ https://go.drugbank.com/drugs/DB00045
46
+ https://go.drugbank.com/drugs/DB00046
47
+ https://go.drugbank.com/drugs/DB00047
48
+ https://go.drugbank.com/drugs/DB00048
49
+ https://go.drugbank.com/drugs/DB00049
50
+ https://go.drugbank.com/drugs/DB00050
51
+ https://go.drugbank.com/drugs/DB00051
52
+ https://go.drugbank.com/drugs/DB00052
53
+ https://go.drugbank.com/drugs/DB00053
54
+ https://go.drugbank.com/drugs/DB00054
55
+ https://go.drugbank.com/drugs/DB00055
56
+ https://go.drugbank.com/drugs/DB00056
57
+ https://go.drugbank.com/drugs/DB00057
58
+ https://go.drugbank.com/drugs/DB00058
59
+ https://go.drugbank.com/drugs/DB00059
60
+ https://go.drugbank.com/drugs/DB00060
61
+ https://go.drugbank.com/drugs/DB00061
62
+ https://go.drugbank.com/drugs/DB00062
63
+ https://go.drugbank.com/drugs/DB00063
64
+ https://go.drugbank.com/drugs/DB00064
65
+ https://go.drugbank.com/drugs/DB00065
66
+ https://go.drugbank.com/drugs/DB00066
67
+ https://go.drugbank.com/drugs/DB00067
68
+ https://go.drugbank.com/drugs/DB00068
69
+ https://go.drugbank.com/drugs/DB00069
70
+ https://go.drugbank.com/drugs/DB00070
71
+ https://go.drugbank.com/drugs/DB00071
72
+ https://go.drugbank.com/drugs/DB00072
73
+ https://go.drugbank.com/drugs/DB00073
74
+ https://go.drugbank.com/drugs/DB00074
75
+ https://go.drugbank.com/drugs/DB00075
76
+ https://go.drugbank.com/drugs/DB00076
77
+ https://go.drugbank.com/drugs/DB00078
78
+ https://go.drugbank.com/drugs/DB00080
79
+ https://go.drugbank.com/drugs/DB00081
80
+ https://go.drugbank.com/drugs/DB00082
81
+ https://go.drugbank.com/drugs/DB00083
82
+ https://go.drugbank.com/drugs/DB00084
83
+ https://go.drugbank.com/drugs/DB00085
84
+ https://go.drugbank.com/drugs/DB00086
85
+ https://go.drugbank.com/drugs/DB00087
86
+ https://go.drugbank.com/drugs/DB00088
87
+ https://go.drugbank.com/drugs/DB00089
88
+ https://go.drugbank.com/drugs/DB00090
89
+ https://go.drugbank.com/drugs/DB00091
90
+ https://go.drugbank.com/drugs/DB00092
91
+ https://go.drugbank.com/drugs/DB00093
92
+ https://go.drugbank.com/drugs/DB00094
93
+ https://go.drugbank.com/drugs/DB00095
94
+ https://go.drugbank.com/drugs/DB00096
95
+ https://go.drugbank.com/drugs/DB00097
96
+ https://go.drugbank.com/drugs/DB00098
97
+ https://go.drugbank.com/drugs/DB00099
98
+ https://go.drugbank.com/drugs/DB00100
crawler.py ADDED
@@ -0,0 +1,190 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from bs4 import BeautifulSoup
3
+ from urllib.parse import urljoin, urlparse
4
+ import os
5
+ from transformers import BertModel, BertTokenizer
6
+ import torch
7
+ import numpy as np
8
+ import faiss
9
+ from concurrent.futures import ThreadPoolExecutor
10
+ from retrying import retry
11
+ import time
12
+ from ratelimit import limits, sleep_and_retry
13
+ import threading
14
+
15
+ # Global counters for URLs and FAISS index initialization
16
+ total_urls_crawled = 0
17
+ index_file = 'faiss_index.bin' # FAISS index file path
18
+
19
+ # Set of visited URLs to prevent duplicates
20
+ visited_urls = set()
21
+
22
+ # Directory to save crawled URLs
23
+ urls_dir = 'crawled_urls'
24
+ os.makedirs(urls_dir, exist_ok=True)
25
+ urls_file = os.path.join(urls_dir, 'crawled_urls.txt')
26
+
27
+ # Initialize FAISS index
28
+ def initialize_faiss_index(dimension):
29
+ if os.path.exists(index_file):
30
+ os.remove(index_file)
31
+ print("Deleted previous FAISS index file.")
32
+ index = faiss.IndexFlatL2(dimension)
33
+ return index
34
+
35
+ # Initialize or load FAISS index
36
+ dimension = 768 # Dimension of BERT embeddings
37
+ index = initialize_faiss_index(dimension)
38
+
39
+ # Initialize tokenizer and model
40
+ tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
41
+ model = BertModel.from_pretrained('bert-base-uncased')
42
+
43
+ # Lock for thread-safe update of total_urls_crawled
44
+ lock = threading.Lock()
45
+
46
+ # Function to update and print live count of crawled URLs
47
+ def update_live_count():
48
+ global total_urls_crawled
49
+ while True:
50
+ with lock:
51
+ print(f"\rURLs crawled: {total_urls_crawled}", end='')
52
+ time.sleep(1) # Update every second
53
+
54
+ # Start live count update thread
55
+ live_count_thread = threading.Thread(target=update_live_count, daemon=True)
56
+ live_count_thread.start()
57
+
58
+ # Function to save crawled URLs to a file
59
+ def save_crawled_urls(url):
60
+ with open(urls_file, 'a') as f:
61
+ f.write(f"{url}\n")
62
+ f.flush() # Flush buffer to ensure immediate write
63
+ os.fsync(f.fileno()) # Ensure write is flushed to disk
64
+
65
+ # Function to get all links from a webpage with retry mechanism and rate limiting
66
+ @retry(stop_max_attempt_number=3, wait_fixed=2000)
67
+ @sleep_and_retry
68
+ @limits(calls=10, period=1) # Adjust calls and period based on website's rate limits
69
+ def get_links(url, domain):
70
+ global total_urls_crawled
71
+ links = []
72
+ try:
73
+ headers = {
74
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
75
+ }
76
+ response = requests.get(url, headers=headers, timeout=50)
77
+ response.raise_for_status()
78
+ soup = BeautifulSoup(response.content, 'html.parser')
79
+ for link in soup.find_all('a', href=True):
80
+ href = link['href']
81
+ normalized_url = normalize_url(href, domain)
82
+ if normalized_url and normalized_url not in visited_urls:
83
+ links.append(normalized_url)
84
+ visited_urls.add(normalized_url)
85
+ with lock:
86
+ total_urls_crawled += 1
87
+ save_crawled_urls(normalized_url) # Save crawled URL to file
88
+
89
+ # Convert text to BERT embeddings and add to FAISS index
90
+ try:
91
+ text = soup.get_text()
92
+ if text:
93
+ embeddings = convert_text_to_bert_embeddings(text, tokenizer, model)
94
+ index.add(np.array([embeddings]))
95
+ except Exception as e:
96
+ print(f"Error adding embeddings to FAISS index: {e}")
97
+
98
+ except requests.HTTPError as e:
99
+ if e.response.status_code == 404:
100
+ print(f"HTTP 404 Error: {e}")
101
+ else:
102
+ print(f"HTTP error occurred: {e}")
103
+ except requests.RequestException as e:
104
+ print(f"Error accessing {url}: {e}")
105
+ return links
106
+
107
+ # Function to normalize and validate URLs
108
+ def normalize_url(url, domain):
109
+ parsed_url = urlparse(url)
110
+ if not parsed_url.scheme:
111
+ url = urljoin(domain, url)
112
+ if url.startswith(domain):
113
+ return url
114
+ return None
115
+
116
+ # Function to recursively get all pages and collect links with retry mechanism and rate limiting
117
+ @retry(stop_max_attempt_number=3, wait_fixed=2000)
118
+ @sleep_and_retry
119
+ @limits(calls=10, period=1) # Adjust calls and period based on website's rate limits
120
+ def crawl_site(base_url, domain, depth=0, max_depth=10): # Increased max_depth to 10
121
+ if depth > max_depth or base_url in visited_urls:
122
+ return []
123
+ visited_urls.add(base_url)
124
+
125
+ links = get_links(base_url, domain)
126
+ print(f"Crawled {len(links)} links from {base_url} at depth {depth}.") # Debugging info
127
+
128
+ try:
129
+ headers = {
130
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
131
+ }
132
+ response = requests.get(base_url, headers=headers, timeout=30)
133
+ response.raise_for_status()
134
+ soup = BeautifulSoup(response.content, 'html.parser')
135
+ links_to_crawl = []
136
+ for link in soup.find_all('a', href=True):
137
+ href = link['href']
138
+ normalized_url = normalize_url(href, domain)
139
+ if normalized_url and normalized_url not in visited_urls:
140
+ links_to_crawl.append(normalized_url)
141
+
142
+ with ThreadPoolExecutor(max_workers=500) as executor:
143
+ results = executor.map(lambda url: crawl_site(url, domain, depth + 1, max_depth), links_to_crawl)
144
+ for result in results:
145
+ links.extend(result)
146
+
147
+ except requests.HTTPError as e:
148
+ if e.response.status_code == 404:
149
+ print(f"HTTP 404 Error: {e}")
150
+ else:
151
+ print(f"HTTP error occurred: {e}")
152
+ except requests.RequestException as e:
153
+ print(f"Error accessing {base_url}: {e}")
154
+
155
+ return links
156
+
157
+ # Function to convert text to BERT embeddings
158
+ def convert_text_to_bert_embeddings(text, tokenizer, model):
159
+ inputs = tokenizer(text, return_tensors='pt', max_length=512, truncation=True, padding=True)
160
+
161
+ with torch.no_grad():
162
+ outputs = model(**inputs)
163
+ embeddings = outputs.last_hidden_state.mean(dim=1).squeeze().numpy() # Average pool last layer's output
164
+
165
+ return embeddings
166
+
167
+ # Main process
168
+ def main():
169
+ global total_urls_crawled
170
+ domain = 'https://go.drugbank.com/' # Replace with your new domain
171
+ start_url = 'https://go.drugbank.com/drugs/DB00001' # Replace with your starting URL
172
+
173
+
174
+ try:
175
+ # Save the FAISS index at the beginning of the execution
176
+ faiss.write_index(index, index_file)
177
+ print("Initial FAISS index saved.")
178
+
179
+ urls = crawl_site(start_url, domain)
180
+ print(f"\n\nFound {total_urls_crawled} URLs.")
181
+
182
+ # Save the FAISS index at the end of execution
183
+ faiss.write_index(index, index_file)
184
+ print("Final FAISS index saved.")
185
+
186
+ except Exception as e:
187
+ print(f"Exception encountered: {e}")
188
+
189
+ if __name__ == "__main__":
190
+ main()
faiss_index.index ADDED
Binary file (401 kB). View file
 
requirements.txt ADDED
@@ -0,0 +1,7 @@
 
 
 
 
 
 
 
 
1
+ faiss-cpu
2
+ numpy
3
+ torch
4
+ transformers
5
+ requests
6
+ beautifulsoup4
7
+ gradio
sample_embeddings.npy ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:29c28c327a9952d067087d04c9550baf1b41db8028e4aee5a2d46c4f6ac91983
3
+ size 20608