Spaces:
Running
Running
Tools: Bpy Doc, GPU Checker, Find Related
Browse files- .gitignore +3 -0
- Dockerfile +20 -0
- README.md +1 -1
- __init__.py +0 -0
- config.py +12 -0
- main.py +40 -0
- requirements-fastapi.txt +6 -0
- routers/__init__.py +0 -0
- routers/bpy_doc_v41.pkl +3 -0
- routers/tool_bpy_doc.py +63 -0
- routers/tool_calls.py +76 -0
- routers/tool_find_related.py +416 -0
- routers/tool_gpu_checker.py +196 -0
- routers/utils_gitea.py +89 -0
- static/favicon.ico +0 -0
- static/privace.txt +35 -0
.gitignore
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
.vs
|
2 |
+
__pycache__/
|
3 |
+
routers/cache
|
Dockerfile
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.11-slim
|
2 |
+
|
3 |
+
WORKDIR /code
|
4 |
+
|
5 |
+
COPY requirements-fastapi.txt ./
|
6 |
+
|
7 |
+
RUN pip install --no-cache-dir --upgrade -r /code/requirements-fastapi.txt
|
8 |
+
|
9 |
+
RUN useradd -m -u 1000 user
|
10 |
+
|
11 |
+
USER user
|
12 |
+
|
13 |
+
ENV HOME=/home/user \
|
14 |
+
PATH=/home/user/.local/bin:$PATH
|
15 |
+
|
16 |
+
WORKDIR $HOME/app
|
17 |
+
|
18 |
+
COPY --chown=user . $HOME/app/
|
19 |
+
|
20 |
+
CMD ["uvicorn", "main:app", "--host", "0.0.0.0", "--port", "7860"]
|
README.md
CHANGED
@@ -1,6 +1,6 @@
|
|
1 |
---
|
2 |
title: Tools
|
3 |
-
emoji:
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
|
|
1 |
---
|
2 |
title: Tools
|
3 |
+
emoji: 🌍
|
4 |
colorFrom: blue
|
5 |
colorTo: purple
|
6 |
sdk: docker
|
__init__.py
ADDED
File without changes
|
config.py
ADDED
@@ -0,0 +1,12 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from pydantic_settings import BaseSettings
|
2 |
+
import os
|
3 |
+
|
4 |
+
|
5 |
+
class Settings(BaseSettings):
|
6 |
+
huggingface_key: str = os.environ.get("huggingface_key")
|
7 |
+
cache_dir: str = "cache"
|
8 |
+
embedding_api: str = "sbert"
|
9 |
+
embedding_model: str = "mano-wii/BAAI_bge-base-en-v1.5-tunned-for-blender-issues"
|
10 |
+
|
11 |
+
|
12 |
+
settings = Settings()
|
main.py
ADDED
@@ -0,0 +1,40 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# main.py
|
2 |
+
|
3 |
+
from fastapi import FastAPI
|
4 |
+
from fastapi.middleware.cors import CORSMiddleware
|
5 |
+
from fastapi.staticfiles import StaticFiles
|
6 |
+
from huggingface_hub import login
|
7 |
+
from config import settings
|
8 |
+
from routers import tool_bpy_doc, tool_gpu_checker, tool_calls, tool_find_related
|
9 |
+
|
10 |
+
login(settings.huggingface_key)
|
11 |
+
|
12 |
+
app = FastAPI(openapi_url="/api/v1/openapi.json",
|
13 |
+
docs_url="/api/v1/docs")
|
14 |
+
|
15 |
+
app.add_middleware(
|
16 |
+
CORSMiddleware,
|
17 |
+
allow_origins=["*"],
|
18 |
+
allow_methods=["*"],
|
19 |
+
allow_headers=["*"],
|
20 |
+
allow_credentials=True,
|
21 |
+
)
|
22 |
+
|
23 |
+
app.include_router(
|
24 |
+
tool_bpy_doc.router, prefix="/api/v1", tags=["Tools"])
|
25 |
+
|
26 |
+
app.include_router(
|
27 |
+
tool_gpu_checker.router, prefix="/api/v1", tags=["Tools"])
|
28 |
+
|
29 |
+
app.include_router(
|
30 |
+
tool_find_related.router, prefix="/api/v1", tags=["Tools"])
|
31 |
+
|
32 |
+
app.include_router(
|
33 |
+
tool_calls.router, prefix="/api/v1", tags=["Function Calls"])
|
34 |
+
|
35 |
+
|
36 |
+
@app.get("/")
|
37 |
+
async def root():
|
38 |
+
return {"message": "Tool Endpoints"}
|
39 |
+
|
40 |
+
app.mount("/api/v1/static", StaticFiles(directory="static"), name="static")
|
requirements-fastapi.txt
ADDED
@@ -0,0 +1,6 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
fastapi
|
2 |
+
uvicorn[standard]
|
3 |
+
python-multipart
|
4 |
+
pydantic-settings
|
5 |
+
huggingface_hub
|
6 |
+
sentence_transformers
|
routers/__init__.py
ADDED
File without changes
|
routers/bpy_doc_v41.pkl
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:fd35f7e45cbd214cf92f14f53dccfe209deebc4d1f444061f8089dc4440d483c
|
3 |
+
size 1876094
|
routers/tool_bpy_doc.py
ADDED
@@ -0,0 +1,63 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# bpydoc.py
|
2 |
+
|
3 |
+
import pickle
|
4 |
+
from fastapi import APIRouter
|
5 |
+
|
6 |
+
|
7 |
+
router = APIRouter()
|
8 |
+
|
9 |
+
with open("routers/bpy_doc_v41.pkl", 'rb') as file:
|
10 |
+
bpy_doc_map = pickle.load(file)
|
11 |
+
|
12 |
+
|
13 |
+
def bpy_doc_get_documentation(api):
|
14 |
+
parts = api.split('.')
|
15 |
+
api = ""
|
16 |
+
data = bpy_doc_map
|
17 |
+
ctx = []
|
18 |
+
for part in parts:
|
19 |
+
try:
|
20 |
+
data = data[part]
|
21 |
+
api += part
|
22 |
+
ctx.append((api, data["__info"]['descr']))
|
23 |
+
api += '.'
|
24 |
+
except Exception as ex:
|
25 |
+
descr = f"{type(ex).__name__}: {ex}. Perhaps this object was implemented in a later version."
|
26 |
+
ctx.append((api, descr))
|
27 |
+
break
|
28 |
+
|
29 |
+
documentation = ""
|
30 |
+
for obj, descr in ctx:
|
31 |
+
documentation += f"{obj}:\n"
|
32 |
+
documentation += f"{descr}\n\n"
|
33 |
+
|
34 |
+
if len(data) > 1:
|
35 |
+
documentation += f"Members of {api}:\n"
|
36 |
+
info = data["__info"]
|
37 |
+
if info['bases']:
|
38 |
+
documentation += f"Inherits from {info['bases']}\n"
|
39 |
+
|
40 |
+
for key, val in data.items():
|
41 |
+
if key != "__info":
|
42 |
+
descr = ""
|
43 |
+
if isinstance(val, list):
|
44 |
+
val = val[0]
|
45 |
+
descr = "`bpy_prop_collection` of {}".format(
|
46 |
+
val['__info']["descr"].replace('\n', ' '))
|
47 |
+
else:
|
48 |
+
descr = val['__info']["descr"].replace('\n', ' ')
|
49 |
+
|
50 |
+
documentation += f"- {key}: {descr}\n"
|
51 |
+
|
52 |
+
return documentation
|
53 |
+
|
54 |
+
|
55 |
+
@router.get("/bpy_doc")
|
56 |
+
def bpy_doc(api: str = ""):
|
57 |
+
message = bpy_doc_get_documentation(api)
|
58 |
+
return {"message": message}
|
59 |
+
|
60 |
+
|
61 |
+
if __name__ == "__main__":
|
62 |
+
test = bpy_doc("bpy.context")
|
63 |
+
print(test)
|
routers/tool_calls.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import json
|
2 |
+
from .tool_gpu_checker import gpu_checker_get_message
|
3 |
+
from .tool_bpy_doc import bpy_doc_get_documentation
|
4 |
+
from fastapi import APIRouter, Body
|
5 |
+
from typing import List, Dict
|
6 |
+
from pydantic import BaseModel
|
7 |
+
|
8 |
+
|
9 |
+
class ToolCallFunction(BaseModel):
|
10 |
+
name: str
|
11 |
+
arguments: str
|
12 |
+
|
13 |
+
|
14 |
+
class ToolCallInput(BaseModel):
|
15 |
+
id: str
|
16 |
+
type: str
|
17 |
+
function: ToolCallFunction
|
18 |
+
|
19 |
+
|
20 |
+
router = APIRouter()
|
21 |
+
|
22 |
+
|
23 |
+
def process_tool_call(tool_call: ToolCallInput) -> Dict:
|
24 |
+
function_name = tool_call.function.name
|
25 |
+
function_args = json.loads(tool_call.function.arguments)
|
26 |
+
output = {"tool_call_id": tool_call.id, "output": ""}
|
27 |
+
if function_name == "get_bpy_api_info":
|
28 |
+
output["output"] = bpy_doc_get_documentation(
|
29 |
+
function_args.get("api", ""))
|
30 |
+
elif function_name == "check_gpu":
|
31 |
+
output["output"] = gpu_checker_get_message(
|
32 |
+
function_args.get("gpu", ""))
|
33 |
+
return output
|
34 |
+
|
35 |
+
|
36 |
+
@router.post("/function_call", response_model=List[Dict])
|
37 |
+
def function_call(tool_calls: List[ToolCallInput] = Body(..., description="List of tool calls in the request body")):
|
38 |
+
"""
|
39 |
+
Endpoint to process tool calls.
|
40 |
+
Args:
|
41 |
+
tool_calls (List[ToolCallInput]): List of tool calls.
|
42 |
+
Returns:
|
43 |
+
List[Dict]: List of tool outputs with tool_call_id and output.
|
44 |
+
"""
|
45 |
+
tool_outputs = [process_tool_call(tool_input) for tool_input in tool_calls]
|
46 |
+
return tool_outputs
|
47 |
+
|
48 |
+
|
49 |
+
if __name__ == "__main__":
|
50 |
+
tool_calls_data = [
|
51 |
+
{
|
52 |
+
"id": "call_abc123",
|
53 |
+
"type": "function",
|
54 |
+
"function": {
|
55 |
+
"name": "get_bpy_api_info",
|
56 |
+
"arguments": "{\"api\":\"bpy.context.scene.world\"}"
|
57 |
+
}
|
58 |
+
},
|
59 |
+
{
|
60 |
+
"id": "call_abc456",
|
61 |
+
"type": "function",
|
62 |
+
"function": {
|
63 |
+
"name": "check_gpu",
|
64 |
+
"arguments": "{\"gpu\":\"Mesa Intel(R) Iris(R) Plus Graphics 640 (Kaby Lake GT3e) (KBL GT3) Intel 4.6 (Core Profile) Mesa 22.2.5\"}"
|
65 |
+
}
|
66 |
+
}
|
67 |
+
]
|
68 |
+
|
69 |
+
tool_calls = [
|
70 |
+
ToolCallInput(id=tc['id'], type=tc['type'],
|
71 |
+
function=ToolCallFunction(**tc['function']))
|
72 |
+
for tc in tool_calls_data
|
73 |
+
]
|
74 |
+
|
75 |
+
test = function_call(tool_calls)
|
76 |
+
print(test)
|
routers/tool_find_related.py
ADDED
@@ -0,0 +1,416 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# find_related.py
|
2 |
+
|
3 |
+
import re
|
4 |
+
import torch
|
5 |
+
import threading
|
6 |
+
from datetime import datetime, timedelta
|
7 |
+
from sentence_transformers import SentenceTransformer, util
|
8 |
+
from fastapi import APIRouter
|
9 |
+
|
10 |
+
try:
|
11 |
+
from .utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
12 |
+
from config import settings
|
13 |
+
except:
|
14 |
+
import os
|
15 |
+
import sys
|
16 |
+
from utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
17 |
+
sys.path.append(os.path.abspath(
|
18 |
+
os.path.join(os.path.dirname(__file__), '..')))
|
19 |
+
from config import settings
|
20 |
+
|
21 |
+
|
22 |
+
def _create_issue_string(title, body):
|
23 |
+
cleaned_body = body.replace('\r', '')
|
24 |
+
cleaned_body = cleaned_body.replace('**System Information**\n', '')
|
25 |
+
cleaned_body = cleaned_body.replace('**Blender Version**\n', '')
|
26 |
+
cleaned_body = cleaned_body.replace(
|
27 |
+
'Worked: (newest version of Blender that worked as expected)\n', '')
|
28 |
+
cleaned_body = cleaned_body.replace('**Short description of error**\n', '')
|
29 |
+
cleaned_body = cleaned_body.replace('**Addon Information**\n', '')
|
30 |
+
cleaned_body = cleaned_body.replace(
|
31 |
+
'**Exact steps for others to reproduce the error**\n', '')
|
32 |
+
cleaned_body = cleaned_body.replace(
|
33 |
+
'[Please describe the exact steps needed to reproduce the issue]\n', '')
|
34 |
+
cleaned_body = cleaned_body.replace(
|
35 |
+
'[Please fill out a short description of the error here]\n', '')
|
36 |
+
cleaned_body = cleaned_body.replace(
|
37 |
+
'[Based on the default startup or an attached .blend file (as simple as possible)]\n', '')
|
38 |
+
cleaned_body = re.sub(
|
39 |
+
r', branch: .+?, commit date: \d{4}-\d{2}-\d{2} \d{2}:\d{2}, hash: `.+?`', '', cleaned_body)
|
40 |
+
cleaned_body = re.sub(
|
41 |
+
r'\/?attachments\/[a-zA-Z0-9\-]+', 'attachment', cleaned_body)
|
42 |
+
cleaned_body = re.sub(
|
43 |
+
r'https?:\/\/[^\s/]+(?:\/[^\s/]+)*\/([^\s/]+)', lambda match: match.group(1), cleaned_body)
|
44 |
+
|
45 |
+
return title + '\n' + cleaned_body
|
46 |
+
|
47 |
+
|
48 |
+
def _find_latest_date(issues, default_str=None):
|
49 |
+
# Handle the case where 'issues' is empty
|
50 |
+
if not issues:
|
51 |
+
return default_str
|
52 |
+
|
53 |
+
return max((issue['updated_at'] for issue in issues), default=default_str)
|
54 |
+
|
55 |
+
|
56 |
+
class EmbeddingContext:
|
57 |
+
# These don't change
|
58 |
+
TOKEN_LEN_MAX_FOR_EMBEDDING = 512
|
59 |
+
TOKEN_LEN_MAX_BALCKLIST = 2 * TOKEN_LEN_MAX_FOR_EMBEDDING
|
60 |
+
issue_attr_filter = {'number', 'title', 'body', 'state', 'updated_at'}
|
61 |
+
|
62 |
+
# Set when creating the object
|
63 |
+
lock = None
|
64 |
+
model = None
|
65 |
+
model_name = ''
|
66 |
+
config_type = ''
|
67 |
+
|
68 |
+
# Updates constantly
|
69 |
+
data = {}
|
70 |
+
black_list = {'blender': {'blender': {109399, 113157, 114706},
|
71 |
+
'blender-addons': set()}}
|
72 |
+
|
73 |
+
def __init__(self):
|
74 |
+
self.lock = threading.Lock()
|
75 |
+
|
76 |
+
config_type = settings.embedding_api
|
77 |
+
model_name = settings.embedding_model
|
78 |
+
|
79 |
+
if config_type == 'sbert':
|
80 |
+
self.model = SentenceTransformer(model_name, use_auth_token=False)
|
81 |
+
self.model.max_seq_length = self.TOKEN_LEN_MAX_FOR_EMBEDDING
|
82 |
+
print("Max Sequence Length:", self.model.max_seq_length)
|
83 |
+
|
84 |
+
self.encode = self.encode_sbert
|
85 |
+
if torch.cuda.is_available():
|
86 |
+
self.model = self.model.to('cuda')
|
87 |
+
|
88 |
+
elif config_type == 'openai':
|
89 |
+
# openai.api_base = settings.openai.api_base
|
90 |
+
self.encode = self.encode_openai
|
91 |
+
|
92 |
+
self.model_name = model_name
|
93 |
+
self.config_type = config_type
|
94 |
+
|
95 |
+
def encode(self, texts_to_embed):
|
96 |
+
pass
|
97 |
+
|
98 |
+
def encode_sbert(self, texts_to_embed):
|
99 |
+
return self.model.encode(texts_to_embed, show_progress_bar=True, convert_to_tensor=True, normalize_embeddings=True)
|
100 |
+
|
101 |
+
def encode_openai(self, texts_to_embed):
|
102 |
+
import math
|
103 |
+
import openai
|
104 |
+
import time
|
105 |
+
|
106 |
+
tokens_count = 0
|
107 |
+
for text in texts_to_embed:
|
108 |
+
tokens_count += len(self.get_tokens(text))
|
109 |
+
|
110 |
+
chunks_num = math.ceil(tokens_count / 500000)
|
111 |
+
chunk_size = math.ceil(len(texts_to_embed) / chunks_num)
|
112 |
+
|
113 |
+
embeddings = []
|
114 |
+
for i in range(chunks_num):
|
115 |
+
start = i * chunk_size
|
116 |
+
end = start + chunk_size
|
117 |
+
chunk = texts_to_embed[start:end]
|
118 |
+
|
119 |
+
embeddings_tmp = openai.Embedding.create(
|
120 |
+
input=chunk, model=self.model_name)['data']
|
121 |
+
if embeddings_tmp is None:
|
122 |
+
break
|
123 |
+
|
124 |
+
embeddings.extend(embeddings_tmp)
|
125 |
+
|
126 |
+
if i < chunks_num - 1:
|
127 |
+
time.sleep(60) # Wait 1 minute before the next call
|
128 |
+
|
129 |
+
return torch.stack([torch.tensor(embedding['embedding'], dtype=torch.float32) for embedding in embeddings])
|
130 |
+
|
131 |
+
def get_tokens(self, text):
|
132 |
+
if self.model:
|
133 |
+
return self.model.tokenizer.tokenize(text)
|
134 |
+
|
135 |
+
tokens = []
|
136 |
+
for token in re.split(r'(\W|\b)', text):
|
137 |
+
if token.strip():
|
138 |
+
tokens.append(token)
|
139 |
+
|
140 |
+
return tokens
|
141 |
+
|
142 |
+
def create_strings_to_embbed(self, issues, black_list):
|
143 |
+
texts_to_embed = [_create_issue_string(
|
144 |
+
issue['title'], issue['body']) for issue in issues]
|
145 |
+
|
146 |
+
# Create issue blacklist (for keepping track)
|
147 |
+
token_count = 0
|
148 |
+
for i, text in enumerate(texts_to_embed):
|
149 |
+
tokens = self.get_tokens(text)
|
150 |
+
tokens_len = len(tokens)
|
151 |
+
token_count += tokens_len
|
152 |
+
|
153 |
+
if tokens_len > self.TOKEN_LEN_MAX_BALCKLIST:
|
154 |
+
# Only use the first TOKEN_LEN_MAX tokens
|
155 |
+
black_list.add(int(issues[i]['number']))
|
156 |
+
texts_to_embed[i] = ' '.join(
|
157 |
+
tokens[:self.TOKEN_LEN_MAX_BALCKLIST])
|
158 |
+
|
159 |
+
return texts_to_embed
|
160 |
+
|
161 |
+
def embeddings_generate(self, owner, repo):
|
162 |
+
if not owner in self.black_list:
|
163 |
+
self.black_list[owner] = {repo: {}}
|
164 |
+
elif not repo in self.black_list[owner]:
|
165 |
+
self.black_list[owner][repo] = {}
|
166 |
+
|
167 |
+
black_list = self.black_list[owner][repo]
|
168 |
+
|
169 |
+
issues = gitea_fetch_issues(owner, repo, state='open', since=None,
|
170 |
+
issue_attr_filter=self.issue_attr_filter, exclude=black_list)
|
171 |
+
|
172 |
+
issues = sorted(issues, key=lambda issue: int(issue['number']))
|
173 |
+
|
174 |
+
print("Embedding Issues...")
|
175 |
+
texts_to_embed = self.create_strings_to_embbed(issues, black_list)
|
176 |
+
embeddings = self.encode(texts_to_embed)
|
177 |
+
|
178 |
+
data = {
|
179 |
+
# Get the most recent date
|
180 |
+
'updated_at': _find_latest_date(issues),
|
181 |
+
'numbers': [int(issue['number']) for issue in issues],
|
182 |
+
'titles': [issue['title'] for issue in issues],
|
183 |
+
'embeddings': embeddings,
|
184 |
+
}
|
185 |
+
|
186 |
+
if not owner in self.data:
|
187 |
+
self.data[owner] = {repo: {}}
|
188 |
+
elif not repo in self.data[owner]:
|
189 |
+
self.data[owner][repo] = {}
|
190 |
+
|
191 |
+
self.data[owner][repo] = data
|
192 |
+
|
193 |
+
def embeddings_updated_get(self, owner, repo):
|
194 |
+
with self.lock:
|
195 |
+
try:
|
196 |
+
data = self.data[owner][repo]
|
197 |
+
except:
|
198 |
+
self.embeddings_generate(owner, repo)
|
199 |
+
data = self.data[owner][repo]
|
200 |
+
|
201 |
+
black_list = self.black_list[owner][repo]
|
202 |
+
date_old = data['updated_at']
|
203 |
+
|
204 |
+
issues = gitea_fetch_issues(
|
205 |
+
owner, repo, since=date_old, issue_attr_filter=self.issue_attr_filter, exclude=black_list)
|
206 |
+
|
207 |
+
# WORKAROUND:
|
208 |
+
# Consider that if the time hasn't changed, it's the same issue.
|
209 |
+
issues = [
|
210 |
+
issue for issue in issues if issue['updated_at'] != date_old]
|
211 |
+
|
212 |
+
if len(issues) == 0:
|
213 |
+
return data
|
214 |
+
|
215 |
+
# Get the most recent date
|
216 |
+
date_new = _find_latest_date(issues, date_old)
|
217 |
+
|
218 |
+
# autopep8: off
|
219 |
+
numbers_old = data['numbers']
|
220 |
+
titles_old = data['titles']
|
221 |
+
embeddings_old = data['embeddings']
|
222 |
+
|
223 |
+
last_index = len(numbers_old) - 1
|
224 |
+
|
225 |
+
issues = sorted(issues, key=lambda issue: int(issue['number']))
|
226 |
+
issues_clos = [issue for issue in issues if issue['state'] == 'closed']
|
227 |
+
issues_open = [issue for issue in issues if issue['state'] == 'open']
|
228 |
+
|
229 |
+
numbers_clos = [int(issue['number']) for issue in issues_clos]
|
230 |
+
numbers_open = [int(issue['number']) for issue in issues_open]
|
231 |
+
|
232 |
+
old_closed = []
|
233 |
+
for number_clos in numbers_clos:
|
234 |
+
for i_old in range(last_index, -1, -1):
|
235 |
+
number_old = numbers_old[i_old]
|
236 |
+
if number_old < number_clos:
|
237 |
+
break
|
238 |
+
if number_old == number_clos:
|
239 |
+
old_closed.append(i_old)
|
240 |
+
break
|
241 |
+
|
242 |
+
mask_open = torch.ones(len(numbers_open), dtype=torch.bool)
|
243 |
+
need_sort = False
|
244 |
+
change_map = []
|
245 |
+
for i_open, number_open in enumerate(numbers_open):
|
246 |
+
for i_old in range(last_index, -1, -1):
|
247 |
+
number_old = numbers_old[i_old]
|
248 |
+
if number_old < number_open:
|
249 |
+
need_sort = need_sort or (i_old != last_index)
|
250 |
+
break
|
251 |
+
if number_old == number_open:
|
252 |
+
change_map.append((i_old, i_open))
|
253 |
+
mask_open[i_open] = False
|
254 |
+
break
|
255 |
+
|
256 |
+
texts_to_embed = self.create_strings_to_embbed(issues_open, black_list)
|
257 |
+
embeddings = self.encode(texts_to_embed)
|
258 |
+
|
259 |
+
for i_old, i_open in change_map:
|
260 |
+
titles_old[i_old] = issues_open[i_open]['title']
|
261 |
+
embeddings_old[i_old] = embeddings[i_open]
|
262 |
+
|
263 |
+
if old_closed:
|
264 |
+
total = (len(numbers_old) - len(old_closed)) + (len(numbers_open) - len(change_map))
|
265 |
+
numbers_new = [None] * total
|
266 |
+
titles_new = [None] * total
|
267 |
+
embeddings_new = torch.empty((total, *embeddings.shape[1:]), dtype=embeddings.dtype, device=embeddings.device)
|
268 |
+
|
269 |
+
i_new = 0
|
270 |
+
i_old = 0
|
271 |
+
for i_closed in old_closed + [len(numbers_old)]:
|
272 |
+
while i_old < i_closed:
|
273 |
+
numbers_new[i_new] = numbers_old[i_old]
|
274 |
+
titles_new[i_new] = titles_old[i_old]
|
275 |
+
embeddings_new[i_new] = embeddings_old[i_old]
|
276 |
+
i_new += 1
|
277 |
+
i_old += 1
|
278 |
+
i_old += 1
|
279 |
+
|
280 |
+
for i_open in range(len(numbers_open)):
|
281 |
+
if not mask_open[i_open]:
|
282 |
+
continue
|
283 |
+
titles_new[i_new] = issues_open[i_open]['title']
|
284 |
+
numbers_new[i_new] = numbers_open[i_open]
|
285 |
+
embeddings_new[i_new] = embeddings[i_open]
|
286 |
+
i_new += 1
|
287 |
+
|
288 |
+
assert i_new == total
|
289 |
+
else:
|
290 |
+
titles_new = titles_old + [issue['title'] for i, issue in enumerate(issues_open) if mask_open[i]]
|
291 |
+
numbers_new = numbers_old + [number for i, number in enumerate(numbers_open) if mask_open[i]]
|
292 |
+
embeddings_new = torch.cat([embeddings_old, embeddings[mask_open]])
|
293 |
+
|
294 |
+
if need_sort:
|
295 |
+
sorted_indices = sorted(range(len(numbers_new)), key=lambda k: numbers_new[k])
|
296 |
+
titles_new = [titles_new[i] for i in sorted_indices]
|
297 |
+
numbers_new = [numbers_new[i] for i in sorted_indices]
|
298 |
+
embeddings_new = embeddings_new[sorted_indices]
|
299 |
+
|
300 |
+
data['updated_at'] = date_new
|
301 |
+
data['titles'] = titles_new
|
302 |
+
data['numbers'] = numbers_new
|
303 |
+
data['embeddings'] = embeddings_new
|
304 |
+
|
305 |
+
# autopep8: on
|
306 |
+
return data
|
307 |
+
|
308 |
+
|
309 |
+
router = APIRouter()
|
310 |
+
EMBEDDING_CTX = EmbeddingContext()
|
311 |
+
# EMBEDDING_CTX.embeddings_generate('blender', 'blender')
|
312 |
+
# EMBEDDING_CTX.embeddings_generate('blender', 'blender-addons')
|
313 |
+
|
314 |
+
|
315 |
+
def _sort_similarity(data, query_emb, limit):
|
316 |
+
duplicates = []
|
317 |
+
ret = util.semantic_search(
|
318 |
+
query_emb, data['embeddings'], top_k=limit, score_function=util.dot_score)
|
319 |
+
for score in ret[0]:
|
320 |
+
corpus_id = score['corpus_id']
|
321 |
+
text = f"#{data['numbers'][corpus_id]}: {data['titles'][corpus_id]}"
|
322 |
+
duplicates.append(text)
|
323 |
+
|
324 |
+
return duplicates
|
325 |
+
|
326 |
+
|
327 |
+
cached_search = {'text': '', 'repo': '', 'issues': []}
|
328 |
+
|
329 |
+
|
330 |
+
def text_search(owner, repo, text_to_embed, limit=None):
|
331 |
+
global cached_search
|
332 |
+
global EMBEDDING_CTX
|
333 |
+
if not text_to_embed:
|
334 |
+
return []
|
335 |
+
|
336 |
+
if text_to_embed == cached_search['text'] and repo == cached_search['repo']:
|
337 |
+
return cached_search['issues'][:limit]
|
338 |
+
|
339 |
+
data = EMBEDDING_CTX.embeddings_updated_get(owner, repo)
|
340 |
+
|
341 |
+
new_embedding = EMBEDDING_CTX.encode([text_to_embed])
|
342 |
+
result = _sort_similarity(data, new_embedding, 500)
|
343 |
+
|
344 |
+
cached_search = {'text': text_to_embed, 'repo': repo, 'issues': result}
|
345 |
+
return result[:limit]
|
346 |
+
|
347 |
+
|
348 |
+
def find_relatedness(gitea_issue, limit=20):
|
349 |
+
owner = gitea_issue['repository']['owner']
|
350 |
+
repo = gitea_issue['repository']['name']
|
351 |
+
title = gitea_issue['title']
|
352 |
+
body = gitea_issue['body']
|
353 |
+
number = int(gitea_issue['number'])
|
354 |
+
|
355 |
+
data = EMBEDDING_CTX.embeddings_updated_get(owner, repo)
|
356 |
+
new_embedding = None
|
357 |
+
|
358 |
+
# Check if the embedding already exist.
|
359 |
+
for i in range(len(data['numbers']) - 1, -1, -1):
|
360 |
+
number_cached = data['numbers'][i]
|
361 |
+
if number_cached < number:
|
362 |
+
break
|
363 |
+
if number_cached == number:
|
364 |
+
new_embedding = data['embeddings'][i]
|
365 |
+
break
|
366 |
+
|
367 |
+
if new_embedding is None:
|
368 |
+
text_to_embed = _create_issue_string(title, body)
|
369 |
+
new_embedding = EMBEDDING_CTX.encode([text_to_embed])
|
370 |
+
|
371 |
+
duplicates = _sort_similarity(data, new_embedding, limit=limit)
|
372 |
+
if not duplicates:
|
373 |
+
return ''
|
374 |
+
|
375 |
+
number_cached = int(re.search(r'#(\d+):', duplicates[0]).group(1))
|
376 |
+
if number_cached == number:
|
377 |
+
return '\n'.join(duplicates[1:])
|
378 |
+
|
379 |
+
return '\n'.join(duplicates)
|
380 |
+
|
381 |
+
|
382 |
+
@router.get("/find_related")
|
383 |
+
def find_related(owner: str = 'blender', repo: str = 'blender', number: int = 1, limit: int = 50):
|
384 |
+
issue = gitea_json_issue_get(owner, repo, number)
|
385 |
+
related = find_relatedness(issue, limit=limit)
|
386 |
+
return related
|
387 |
+
|
388 |
+
|
389 |
+
if __name__ == "__main__":
|
390 |
+
import os
|
391 |
+
import pickle
|
392 |
+
repo = 'blender-addons'
|
393 |
+
cache_dir = f"routers/cache/{repo}"
|
394 |
+
file_path = os.path.join(cache_dir, "data.pkl")
|
395 |
+
|
396 |
+
if not os.path.exists(cache_dir):
|
397 |
+
os.makedirs(cache_dir, exist_ok=True)
|
398 |
+
with open(file_path, "wb") as file:
|
399 |
+
EMBEDDING_CTX.embeddings_generate('blender', repo)
|
400 |
+
pickle.dump(
|
401 |
+
EMBEDDING_CTX.data['blender'][repo], file, protocol=pickle.HIGHEST_PROTOCOL)
|
402 |
+
else:
|
403 |
+
with open(file_path, 'rb') as file:
|
404 |
+
EMBEDDING_CTX.data['blender'] = {repo: pickle.load(file)}
|
405 |
+
|
406 |
+
# 'blender/blender/111434' must print #96153, #83604 and #79762
|
407 |
+
issue = gitea_json_issue_get('blender', repo, 105027)
|
408 |
+
print(issue['title'])
|
409 |
+
|
410 |
+
related = find_relatedness(issue, limit=50)
|
411 |
+
|
412 |
+
if related == '':
|
413 |
+
print("No potential duplicates found.")
|
414 |
+
else:
|
415 |
+
print("These are the 20 most related issues:")
|
416 |
+
print(related)
|
routers/tool_gpu_checker.py
ADDED
@@ -0,0 +1,196 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# gpuchecker.py
|
2 |
+
|
3 |
+
import re
|
4 |
+
from fastapi import APIRouter
|
5 |
+
|
6 |
+
|
7 |
+
router = APIRouter()
|
8 |
+
|
9 |
+
|
10 |
+
def _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info):
|
11 |
+
for model_pattern, descr in supported_models.items():
|
12 |
+
if match := re.search(model_pattern, graphics_card_info, re.I):
|
13 |
+
return True, match.group(), descr.format(*match.groups())
|
14 |
+
|
15 |
+
for model_pattern, descr in unsupported_models.items():
|
16 |
+
if match := re.search(model_pattern, graphics_card_info, re.I):
|
17 |
+
return False, match.group(), descr.format(*match.groups())
|
18 |
+
|
19 |
+
return False, None, None
|
20 |
+
|
21 |
+
|
22 |
+
def _check_amd(graphics_card_info):
|
23 |
+
supported_models = {
|
24 |
+
r"Radeon\s*6\d{2}([A-Z])?\b": "this model belongs to the RDNA 2 architecture",
|
25 |
+
r"(Radeon\s*)?R9\s*[A-Z0-9]+": "R9 models belongs to the GCN 1st gen or newer architecture",
|
26 |
+
r"(Radeon\s*)?(Pro\s*)?\bW7\d{3}(X)?\b(\s*Duo)?": "Radeon Pro W7000 models belongs to the RDNA 3 architecture",
|
27 |
+
r"(Radeon\s*)?(Pro\s*)?\bW([5-6])\d{3}(X)?\b(\s*Duo)?": "Radeon Pro W{2}000 models belongs to the RDNA 2 architecture",
|
28 |
+
r"(AMD\s*)?6800 XT": "this model belongs to the RDNA 2 architecture",
|
29 |
+
r"Radeon\s*(\(TM\)\s*)?RX Vega(\s*\d{2}\b)": "Radeon RX Vega models belongs to the GCN 5th gen architecture",
|
30 |
+
r"Radeon Pro Vega ((\d{2}(X)?|II)\b)?(\s*Duo)?": "Radeon Pro Vega models belongs to the GCN 5th gen architecture",
|
31 |
+
r"Radeon\s*(\(TM\)\s*)?Pro [4-5]\d{2}(X)?": "Radeon Pro 400/500 series belongs to the GCN 4th gen architecture",
|
32 |
+
r"Radeon VII": "RX models belongs to the GCN 5 architecture",
|
33 |
+
r"Radeon Graphics \(renoir": "this model belongs to the GCN 5th gen architecture (Vega)",
|
34 |
+
r"Radeon\s*(\(TM\)\s*)?Vega 8 (Graphics )?\(raven[^)]+\)": "this model belongs to the GCN 5th gen architecture (Vega)",
|
35 |
+
r"Radeon\s*(\(TM\)\s*)?(Pro\s*)?WX\s*(5\d{3}\b)": "this model belongs to the GCN 4th gen architecture",
|
36 |
+
r"FirePro": "FirePro models belongs to the GCN 1st gen or newer architecture",
|
37 |
+
r"HD\s*7\d{3}": "HD 7XXX models belongs to the GCN 1st gen or newer architecture",
|
38 |
+
r"(Radeon\s*)?RX\s*([5-7]\d{3})(M|X)?(\s*(XT|Series|S|XTX))?\b": "RX models belongs to the GCN 1st gen or newer architecture",
|
39 |
+
r"(Radeon\s*)?(RX\s*)?6(3|4)0\b": "it has Polaris 23 chip that belongs to GCN 4th gen architecture",
|
40 |
+
r"(Radeon\s*)?62(0|5)\b": "it has Polaris 24 chip that belongs to GCN 3st gen architecture",
|
41 |
+
r"(Radeon\s*)?610\b": "it has Banks chip that belongs to GCN 1st gen architecture",
|
42 |
+
r"(Radeon\s*)?RX\s*580(X?)\b": "it has Polaris 20 XT chip that belongs to GCN 4th gen architecture",
|
43 |
+
r"(Radeon\s*)?RX\s*570\b": "it has Ellesmere Pro chip that belongs to GCN 4th gen architecture",
|
44 |
+
r"(Radeon\s*)?RX\s*560X\b": "it has Polaris 31 XL chip that belongs to GCN 4th gen architecture",
|
45 |
+
r"(Radeon\s*)?RX\s*560\b": "it has Baffin XT chip that belongs to GCN 4th gen architecture",
|
46 |
+
r"(Radeon\s*)?5(40X|50X)\b": "it has Polaris 23 XT chip that belongs to GCN 4th gen architecture",
|
47 |
+
r"(Radeon\s*)?RX\s*5(40|50)\b": "it has Lexa Pro chip that belongs to GCN 4th gen architecture",
|
48 |
+
r"(Radeon\s*)?(\(TM\)\s*)?RX\s*4[6-8]0(\b|D)": "it has Ellesmere chip that belongs to GCN 4st gen architecture",
|
49 |
+
r"(Radeon\s*)?5(30X|35)\b": "it has Polaris 24 XT chip that belongs to GCN 3rd gen architecture",
|
50 |
+
r"(Radeon\s*)?530\b": "it has Weston chip that belongs to GCN 3rd gen architecture",
|
51 |
+
r"(Radeon\s*)?520\b": "it has Banks chip that belongs to GCN 1st gen architecture",
|
52 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R4": "Radeon R4 models belongs to the GCN 1st gen or newer architecture",
|
53 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R5 (M)?335": "Radeon R5 M335 belongs to the GCN 1st gen architecture",
|
54 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R7 (M)?2\d{2}(E|X)?\b": "Radeon R7 200 models belongs to GCN 1st or 2nd gen architecture",
|
55 |
+
r"(Radeon\s*)?(\(TM\)\s*)?R5 (M)?24\d(E|X)?\b": "Radeon R5 240 models belongs to GCN 1st gen architecture",
|
56 |
+
# r"Radeon\s*(\(TM\)\s*)?(Pro\s*)?Vega (Pro\s*)?": "this model belongs to the GCN 4th gen architecture",
|
57 |
+
# Add more model-to-architecture mappings as needed
|
58 |
+
}
|
59 |
+
|
60 |
+
unsupported_models = {
|
61 |
+
r"HD ([5-6])\d{3}": "HD {0}XXX models have TeraScale architecture that is older than GCN 1st gen",
|
62 |
+
r"Radeon R5 (M)?2(2|3)\d(X)?\b": "Radeon R5 220/230 models belongs to Terascale 2 architecture that is older than GCN 1st gen",
|
63 |
+
r"(AMD\s*ATI\s*)?Radeon\s*680M": "AMD ATI Radeon 680M has TeraScale architecture that is older than GCN 1st gen",
|
64 |
+
# Add more model-to-architecture mappings as needed
|
65 |
+
}
|
66 |
+
|
67 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
68 |
+
|
69 |
+
|
70 |
+
def _check_nvidia(graphics_card_info):
|
71 |
+
supported_models = {
|
72 |
+
r"(GeForce )?(RTX\s*)?(?<!\d)([2-4])0[5-9]\d(\s*(RTX|Ti))?\b": "RTX {2}0 series are newer than GTX 400",
|
73 |
+
r"(GeForce )?(GTX\s*)?(?<!\d)16[5-9]\d(\s*(GTX|Ti))?\b": "GTX 16 series are newer than GTX 400",
|
74 |
+
r"(GeForce )?(GTX\s*)?(?<!\d)10[5-9]\d(\s*(GTX|Ti))?\b": "GTX 10 series are newer than GTX 400",
|
75 |
+
r"(GTX )?TITAN": "GTX TITAN models are newer than GTX 400",
|
76 |
+
r"(RTX )?\bA(\d+)": "RTX A models are newer than GTX 400",
|
77 |
+
r"Quadro FX \d+": "Quadro FX series uses a Quadro-based architecture",
|
78 |
+
r"Quadro RTX \d+": "Quadro RTX series uses a Quadro-based architecture",
|
79 |
+
r"Quadro (K|M|P|GP|GV)?\d+(M)?": "it uses a Quadro-based architecture",
|
80 |
+
r"NVS 8\d{2}(s)?\b": "it uses a Maxwell based architecture",
|
81 |
+
r"(Quadro )?NVS 110M\b": "it uses a Maxwell based architecture",
|
82 |
+
r"(GeForce )?GT 730\b": "GeForce from 700 series are newer than GTX 400. It also has 2 or 4 GB",
|
83 |
+
r"(GeForce )?GTX ([4-9])\d{2}(\s*(GTX|Ti))?\b": "GPUs from GTX {1}00 series are newer than GTX 400",
|
84 |
+
r"(GeForce )?\bMX\d{3}\b": "MX models are newer than GTX 400",
|
85 |
+
r"Tesla (.+)": "it has a Tesla architecture",
|
86 |
+
# Add more model-to-architecture mappings as needed
|
87 |
+
}
|
88 |
+
|
89 |
+
unsupported_models = {
|
90 |
+
r"(GeForce )(GTX )?3\d{2}": "GTX 3XX models are older than GeForce 400",
|
91 |
+
r"(Quadro )?NVS 50\b": "although quadro, it only supports opengl 1.3 and is older than 10 years",
|
92 |
+
r"(Quadro )?NVS \d{3}(s)?\b": "it is older than 10 years",
|
93 |
+
r"(Quadro )?NVS 1[1-2]0M\b": "it is Curie-based and older than 10 years",
|
94 |
+
r"(Quadro )?NVS 1\d{2}M\b": "although it is Tesla-based it is older than 10 years",
|
95 |
+
r"(Quadro )?NVS 4200M\b": "although it has Fermi architecture (newer than Tesla) it is older than 10 years",
|
96 |
+
# Add unsupported model-to-architecture mappings if needed
|
97 |
+
}
|
98 |
+
|
99 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
100 |
+
|
101 |
+
|
102 |
+
def _check_intel(graphics_card_info):
|
103 |
+
supported_models = {
|
104 |
+
r"(Mesa\s*)?(Iris\s*)?Xe Graphics": "Tiger Lake is newer than Broadwell architecture",
|
105 |
+
r"Iris Plus Graphics G7": "Ice Lake is newer than Broadwell architecture",
|
106 |
+
r"UHD\s*(Graphics )?6[3-5]\d\b": "Coffee Lake or Comet Lake are newer than Broadwell architecture",
|
107 |
+
r"UHD\s*(Graphics )?62\d\b": "Kaby Lake is newer than Broadwell architecture",
|
108 |
+
r"HD\s*(Graphics )?(P)?6[1-3]\d\b": "Kaby Lake is newer than Broadwell architecture",
|
109 |
+
r"UHD\s*(Graphics )?60\d": "Gemini Lake is newer than Broadwell architecture",
|
110 |
+
r"UHD Graphics": "Kaby Lake, Coffee Lake or Comet Lake are newer than Broadwell architecture",
|
111 |
+
r"Iris": "Coffee Lake is newer than Broadwell architecture",
|
112 |
+
r"HD (Graphics )?5\d{2}\b": "Skylake is newer than Broadwell architecture",
|
113 |
+
r"Iris (Graphics )?6\d{3}\b": "it has the Broadwell architecture",
|
114 |
+
r"Intel(\(R\))? (Arc(\(TM\))?\s*)?(A)?7\d{2}\b": "the A770 model is based on the Intel Arc architecture that is newer than Broadwell",
|
115 |
+
r"Intel\s*(Arc\s*)?(A)?7\d{2}\b": "the A770 model is based on the Intel Arc architecture that is newer than Broadwell",
|
116 |
+
# Add more model-to-architecture mappings as needed
|
117 |
+
}
|
118 |
+
|
119 |
+
unsupported_models = {
|
120 |
+
r"HD (Graphics )?4\d{3}\b": "it has the Haswell architecture that is older than Broadwell architecture",
|
121 |
+
r"HD Graphics 3\d{3}\b": "Sandy Bridge is older than Broadwell architecture"
|
122 |
+
# Add unsupported model-to-architecture mappings if needed
|
123 |
+
}
|
124 |
+
|
125 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
126 |
+
|
127 |
+
|
128 |
+
def _check_apple(graphics_card_info):
|
129 |
+
supported_models = {
|
130 |
+
r"(Apple\s*)?(`)?\bM1(`)?(\s*Max)?": "it is one of the new ARM-based system designed by Apple Inc",
|
131 |
+
r"(Apple\s*)?(`)?\bM2(`)?(\s*Max)?": "it is one of the new ARM-based system designed by Apple Inc",
|
132 |
+
# Add more model-to-architecture mappings as needed
|
133 |
+
}
|
134 |
+
|
135 |
+
unsupported_models = {
|
136 |
+
# Add unsupported model-to-architecture mappings if needed
|
137 |
+
}
|
138 |
+
|
139 |
+
return _check_graphics_card_info(supported_models, unsupported_models, graphics_card_info)
|
140 |
+
|
141 |
+
|
142 |
+
def _check_apple_os_version(os_version, is_apple_silicon):
|
143 |
+
major, minor = map(int, os_version.split(".")[:2])
|
144 |
+
if is_apple_silicon:
|
145 |
+
if major >= 11:
|
146 |
+
return True
|
147 |
+
else:
|
148 |
+
if major >= 10 and minor >= 15:
|
149 |
+
return True
|
150 |
+
return False
|
151 |
+
|
152 |
+
|
153 |
+
def gpu_checker_get_message(text):
|
154 |
+
is_supported = False
|
155 |
+
vendor = None
|
156 |
+
model = None
|
157 |
+
descr = None
|
158 |
+
|
159 |
+
if "nvidia" in text.lower() or "rtx" in text.lower() or "gtx" in text.lower() or "geforce" in text.lower():
|
160 |
+
vendor = 'NVIDIA'
|
161 |
+
is_supported, model, descr = _check_nvidia(text)
|
162 |
+
|
163 |
+
elif "amd " in text.lower() or "ati " in text.lower() or "radeon" in text.lower():
|
164 |
+
vendor = 'AMD'
|
165 |
+
is_supported, model, descr = _check_amd(text)
|
166 |
+
|
167 |
+
elif "intel" in text.lower():
|
168 |
+
vendor = 'Intel'
|
169 |
+
is_supported, model, descr = _check_intel(text)
|
170 |
+
|
171 |
+
elif "apple" in text.lower() or re.search(r'\bM1\b', text):
|
172 |
+
vendor = 'Apple'
|
173 |
+
is_supported, model, descr = _check_apple(text)
|
174 |
+
|
175 |
+
if not vendor:
|
176 |
+
return "Could not find graphics card information"
|
177 |
+
elif not model:
|
178 |
+
return f"Could not determine the card model from {vendor}"
|
179 |
+
|
180 |
+
message = f"The {vendor} card {model} is {'supported' if is_supported else 'not supported'} as {descr}"
|
181 |
+
if not is_supported:
|
182 |
+
message += """
|
183 |
+
|
184 |
+
This GPU is below the minimum requirements for Blender, so Blender no longer provide support for it. https://www.blender.org/download/requirements/
|
185 |
+
Installing the latest graphics driver sometimes helps to make such GPUs work, see here for more information. https://docs.blender.org/manual/en/dev/troubleshooting/gpu/index.html
|
186 |
+
If that doesn't help, you can use Blender 2.79: https://www.blender.org/download/previous-versions/
|
187 |
+
"""
|
188 |
+
|
189 |
+
return message
|
190 |
+
|
191 |
+
|
192 |
+
@router.get("/gpu_checker")
|
193 |
+
def gpu_checker(gpu_info: str = ""):
|
194 |
+
message = gpu_checker_get_message(gpu_info)
|
195 |
+
|
196 |
+
return {"message": message}
|
routers/utils_gitea.py
ADDED
@@ -0,0 +1,89 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# utils_gitea.py
|
2 |
+
|
3 |
+
import json
|
4 |
+
import urllib.error
|
5 |
+
import urllib.parse
|
6 |
+
import urllib.request
|
7 |
+
from concurrent.futures import ThreadPoolExecutor, as_completed
|
8 |
+
|
9 |
+
BASE_API_URL = "https://projects.blender.org/api/v1"
|
10 |
+
|
11 |
+
|
12 |
+
def url_json_get(url, data=None):
|
13 |
+
try:
|
14 |
+
if data:
|
15 |
+
data = json.dumps(data).encode('utf-8')
|
16 |
+
request = urllib.request.Request(url, data=data, method='POST')
|
17 |
+
request.add_header('Content-Type', 'application/json')
|
18 |
+
else:
|
19 |
+
request = urllib.request.Request(url)
|
20 |
+
|
21 |
+
response = urllib.request.urlopen(request)
|
22 |
+
response_data = json.loads(response.read())
|
23 |
+
return response_data
|
24 |
+
|
25 |
+
except urllib.error.URLError as ex:
|
26 |
+
print("Error making HTTP request:", ex)
|
27 |
+
return None
|
28 |
+
|
29 |
+
|
30 |
+
def url_json_get_all_pages(url, item_filter=None, limit=50, exclude=set(), verbose=False):
|
31 |
+
assert limit <= 50, "50 is the maximum limit of items per page"
|
32 |
+
|
33 |
+
url_for_page = f"{url}&limit={limit}&page="
|
34 |
+
|
35 |
+
with urllib.request.urlopen(url_for_page + '1') as response:
|
36 |
+
headers_first = response.info()
|
37 |
+
json_data_first = json.loads(response.read())
|
38 |
+
|
39 |
+
total_count = int(headers_first.get('X-Total-Count'))
|
40 |
+
total_pages = (total_count + limit - 1) // limit
|
41 |
+
|
42 |
+
def fetch_page(page):
|
43 |
+
if page == 1:
|
44 |
+
json_data = json_data_first
|
45 |
+
else:
|
46 |
+
json_data = url_json_get(url_for_page + str(page))
|
47 |
+
|
48 |
+
if verbose:
|
49 |
+
print(f"Fetched page {page}")
|
50 |
+
|
51 |
+
data = []
|
52 |
+
for item in json_data:
|
53 |
+
if exclude and int(item["number"]) in exclude:
|
54 |
+
continue
|
55 |
+
data.append({k: item[k] for k in item_filter}
|
56 |
+
if item_filter else item)
|
57 |
+
|
58 |
+
return data
|
59 |
+
|
60 |
+
with ThreadPoolExecutor() as executor:
|
61 |
+
futures = [executor.submit(fetch_page, page)
|
62 |
+
for page in range(1, total_pages + 1)]
|
63 |
+
all_results = [future.result() for future in as_completed(futures)]
|
64 |
+
|
65 |
+
return [item for sublist in all_results for item in sublist]
|
66 |
+
|
67 |
+
|
68 |
+
def gitea_json_issue_get(owner, repo, number):
|
69 |
+
"""
|
70 |
+
Get issue/pull JSON data.
|
71 |
+
"""
|
72 |
+
url = f"{BASE_API_URL}/repos/{owner}/{repo}/issues/{number}"
|
73 |
+
return url_json_get(url)
|
74 |
+
|
75 |
+
|
76 |
+
def gitea_fetch_issues(owner, repo, state='all', labels='', issue_attr_filter=None, since=None, exclude=set()):
|
77 |
+
query_params = {
|
78 |
+
'labels': labels,
|
79 |
+
'state': state,
|
80 |
+
'type': 'issues'}
|
81 |
+
|
82 |
+
if since:
|
83 |
+
query_params['since'] = since
|
84 |
+
|
85 |
+
BASE_API_URL = "https://projects.blender.org/api/v1"
|
86 |
+
base_url = f"{BASE_API_URL}/repos/{owner}/{repo}/issues"
|
87 |
+
encoded_query_params = urllib.parse.urlencode(query_params)
|
88 |
+
issues_url = f"{base_url}?{encoded_query_params}"
|
89 |
+
return url_json_get_all_pages(issues_url, item_filter=issue_attr_filter, exclude=exclude, verbose=True)
|
static/favicon.ico
ADDED
static/privace.txt
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
**Privacy Policy for Blender Assistant**
|
2 |
+
|
3 |
+
Effective Date: November 24, 2023
|
4 |
+
|
5 |
+
This Privacy Policy outlines the information we do not collect and provides an overview of the usage of the Blender Assistant and endpoints accessible at `https://mano-wii-function-calling.hf.space/api/v1`.
|
6 |
+
|
7 |
+
### Information We Do Not Collect
|
8 |
+
|
9 |
+
Blender Assistant does not collect the following information:
|
10 |
+
|
11 |
+
- **Conversations**: The chat API used connects directly to OpenAI's servers, so only OpenAI has access to the conversations.
|
12 |
+
- **Personally Identifiable Information**: The API does not gather personally identifiable information which includes IP or emails addresses.
|
13 |
+
- **Cookies**: The API does not utilize cookies.
|
14 |
+
|
15 |
+
### API Description
|
16 |
+
|
17 |
+
The API provide a chat and tools to provide assistance to Blender users.
|
18 |
+
|
19 |
+
## Tools:
|
20 |
+
"/gpu_checker": Determine if a GPU model is supported by Blender.
|
21 |
+
"/bpy_doc": Returns the documentation for a bpy python object or error description if not supported.
|
22 |
+
"/get_issue": Get the title, body, user and assets of the current report on the page.
|
23 |
+
"/get_messages": Get a list of all messages in the report.
|
24 |
+
|
25 |
+
### Your Consent
|
26 |
+
|
27 |
+
By using the Blender Assistant API, you consent to our privacy policy as described herein.
|
28 |
+
|
29 |
+
### Changes to Our Privacy Policy
|
30 |
+
|
31 |
+
If we decide to modify our privacy policy, we will update it and provide the revised version.
|
32 |
+
|
33 |
+
This document is CC-BY-SA. It was last updated on November 19, 2023.
|
34 |
+
|
35 |
+
For any questions or concerns regarding this privacy policy or the Blender 3D Information API, please contact us at [email protected].
|