Spaces:
Running
Running
Germano Cavalcante
commited on
Commit
•
c7f8eb7
1
Parent(s):
a78f82b
Revert "WORKAROUND to try to get around the error 'Max retries exceeded with url:'"
Browse filesThis reverts commit a78f82bfb528fc3e9db823d897fde74069a3e36a.
routers/tool_find_related.py
CHANGED
@@ -1,17 +1,17 @@
|
|
1 |
# find_related.py
|
2 |
|
3 |
-
import os
|
4 |
import re
|
5 |
-
import threading
|
6 |
import torch
|
|
|
7 |
from datetime import datetime, timedelta
|
8 |
-
from fastapi import APIRouter
|
9 |
from sentence_transformers import SentenceTransformer, util
|
|
|
10 |
|
11 |
try:
|
12 |
from .utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
13 |
from config import settings
|
14 |
except:
|
|
|
15 |
import sys
|
16 |
from utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
17 |
sys.path.append(os.path.abspath(
|
@@ -77,8 +77,6 @@ class EmbeddingContext:
|
|
77 |
model_name = settings.embedding_model
|
78 |
|
79 |
if config_type == 'sbert':
|
80 |
-
# WORKAROUND to try to get around the error 'Max retries exceeded with url:'
|
81 |
-
os.environ['CURL_CA_BUNDLE'] = ''
|
82 |
self.model = SentenceTransformer(model_name, use_auth_token=False)
|
83 |
self.model.max_seq_length = self.TOKEN_LEN_MAX_FOR_EMBEDDING
|
84 |
print("Max Sequence Length:", self.model.max_seq_length)
|
|
|
1 |
# find_related.py
|
2 |
|
|
|
3 |
import re
|
|
|
4 |
import torch
|
5 |
+
import threading
|
6 |
from datetime import datetime, timedelta
|
|
|
7 |
from sentence_transformers import SentenceTransformer, util
|
8 |
+
from fastapi import APIRouter
|
9 |
|
10 |
try:
|
11 |
from .utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
12 |
from config import settings
|
13 |
except:
|
14 |
+
import os
|
15 |
import sys
|
16 |
from utils_gitea import gitea_fetch_issues, gitea_json_issue_get
|
17 |
sys.path.append(os.path.abspath(
|
|
|
77 |
model_name = settings.embedding_model
|
78 |
|
79 |
if config_type == 'sbert':
|
|
|
|
|
80 |
self.model = SentenceTransformer(model_name, use_auth_token=False)
|
81 |
self.model.max_seq_length = self.TOKEN_LEN_MAX_FOR_EMBEDDING
|
82 |
print("Max Sequence Length:", self.model.max_seq_length)
|