Create extra_tools.py
Browse files- lab/extra_tools.py +37 -0
lab/extra_tools.py
ADDED
@@ -0,0 +1,37 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import wikipedia
|
2 |
+
import requests
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from crewai.tools import tool
|
5 |
+
|
6 |
+
@tool("Wikipedia Search Tool")
|
7 |
+
def search_wikipedia(query: str) -> str:
|
8 |
+
"""Run Wikipedia search and get page summaries."""
|
9 |
+
page_titles = wikipedia.search(query)
|
10 |
+
summaries = []
|
11 |
+
|
12 |
+
for page_title in page_titles[:3]: # First 3 results
|
13 |
+
try:
|
14 |
+
wiki_page = wikipedia.page(title=page_title, auto_suggest=False)
|
15 |
+
summaries.append(f"Page: {page_title}\nSummary: {wiki_page.summary}")
|
16 |
+
except wikipedia.PageError: # Page Not Found
|
17 |
+
pass
|
18 |
+
except wikipedia.DisambiguationError: # Disambiguation Error
|
19 |
+
pass
|
20 |
+
|
21 |
+
if not summaries:
|
22 |
+
return "No good Wikipedia Search Result was found"
|
23 |
+
|
24 |
+
return "\n\n".join(summaries)
|
25 |
+
|
26 |
+
|
27 |
+
@tool("Webpage Scraping Tool")
|
28 |
+
def scrap_webpage(target_url):
|
29 |
+
"""Scrap the content of a webpage."""
|
30 |
+
response = requests.get(target_url)
|
31 |
+
html_content = response.text
|
32 |
+
|
33 |
+
soup = BeautifulSoup(html_content, "html.parser")
|
34 |
+
stripped_content = soup.get_text()
|
35 |
+
|
36 |
+
|
37 |
+
return stripped_content
|