code
stringlengths 161
67.2k
| apis
sequencelengths 1
24
| extract_api
stringlengths 164
53.3k
|
---|---|---|
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((1068, 1103), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1088, 1103), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1161, 1204), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1189, 1204), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1262, 1307), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1292, 1307), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1361, 1402), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1387, 1402), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1458, 1491), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1476, 1491), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1544, 1584), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1569, 1584), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1635, 1666), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1651, 1666), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.argilla_callback import argilla_callback_handler
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.deepeval_callback import deepeval_callback_handler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "deepeval":
handler = deepeval_callback_handler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
elif eval_mode == "argilla":
handler = argilla_callback_handler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.deepeval_callback.deepeval_callback_handler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.argilla_callback.argilla_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((1144, 1179), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1164, 1179), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1237, 1280), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1265, 1280), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1338, 1383), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1368, 1383), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1437, 1478), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1463, 1478), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1534, 1567), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1552, 1567), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1620, 1660), 'llama_index.callbacks.deepeval_callback.deepeval_callback_handler', 'deepeval_callback_handler', ([], {}), '(**eval_params)\n', (1645, 1660), False, 'from llama_index.callbacks.deepeval_callback import deepeval_callback_handler\n'), ((1711, 1742), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1727, 1742), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n'), ((1794, 1833), 'llama_index.callbacks.argilla_callback.argilla_callback_handler', 'argilla_callback_handler', ([], {}), '(**eval_params)\n', (1818, 1833), False, 'from llama_index.callbacks.argilla_callback import argilla_callback_handler\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.schema import BaseNode, MetadataMode, TextNode
from llama_index.vector_stores.types import (
MetadataFilters,
VectorStore,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(VectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
self.index_name = index_name
self.text_field = text_field
self.vector_field = vector_field
self.batch_size = batch_size
self.distance_strategy = distance_strategy
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index
return f"llama_index-py-vs/{llama_index.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if await self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships")
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.schema.TextNode",
"llama_index.vector_stores.utils.metadata_dict_to_node",
"llama_index.vector_stores.utils.node_to_metadata_dict"
] | [((534, 553), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (543, 553), False, 'from logging import getLogger\n'), ((2379, 2432), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2411, 2432), False, 'import elasticsearch\n'), ((3755, 3771), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3763, 3771), True, 'import numpy as np\n'), ((5436, 5456), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5454, 5456), False, 'import nest_asyncio\n'), ((16834, 16874), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (16838, 16874), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12320, 12395), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12330, 12395), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((9785, 9809), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (9807, 9809), False, 'import asyncio\n'), ((11712, 11757), 'llama_index.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (11733, 11757), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12467, 12531), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (12477, 12531), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13405, 13429), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (13427, 13429), False, 'import asyncio\n'), ((15612, 15636), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (15634, 15636), False, 'import asyncio\n'), ((18734, 18765), 'llama_index.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (18755, 18765), False, 'from llama_index.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3807, 3829), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3813, 3829), True, 'import numpy as np\n'), ((11946, 11958), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (11956, 11958), False, 'import uuid\n'), ((19408, 19551), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (19416, 19551), False, 'from llama_index.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Global eval handlers."""
from typing import Any
from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler
from llama_index.callbacks.base_handler import BaseCallbackHandler
from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler
from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler
from llama_index.callbacks.promptlayer_handler import PromptLayerHandler
from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler
from llama_index.callbacks.wandb_callback import WandbCallbackHandler
def set_global_handler(eval_mode: str, **eval_params: Any) -> None:
"""Set global eval handlers."""
import llama_index
llama_index.global_handler = create_global_handler(eval_mode, **eval_params)
def create_global_handler(eval_mode: str, **eval_params: Any) -> BaseCallbackHandler:
"""Get global eval handler."""
if eval_mode == "wandb":
handler: BaseCallbackHandler = WandbCallbackHandler(**eval_params)
elif eval_mode == "openinference":
handler = OpenInferenceCallbackHandler(**eval_params)
elif eval_mode == "arize_phoenix":
handler = arize_phoenix_callback_handler(**eval_params)
elif eval_mode == "honeyhive":
handler = honeyhive_callback_handler(**eval_params)
elif eval_mode == "promptlayer":
handler = PromptLayerHandler(**eval_params)
elif eval_mode == "simple":
handler = SimpleLLMHandler(**eval_params)
else:
raise ValueError(f"Eval mode {eval_mode} not supported.")
return handler
| [
"llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler",
"llama_index.callbacks.simple_llm_handler.SimpleLLMHandler",
"llama_index.callbacks.wandb_callback.WandbCallbackHandler",
"llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler",
"llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler",
"llama_index.callbacks.promptlayer_handler.PromptLayerHandler"
] | [((990, 1025), 'llama_index.callbacks.wandb_callback.WandbCallbackHandler', 'WandbCallbackHandler', ([], {}), '(**eval_params)\n', (1010, 1025), False, 'from llama_index.callbacks.wandb_callback import WandbCallbackHandler\n'), ((1083, 1126), 'llama_index.callbacks.open_inference_callback.OpenInferenceCallbackHandler', 'OpenInferenceCallbackHandler', ([], {}), '(**eval_params)\n', (1111, 1126), False, 'from llama_index.callbacks.open_inference_callback import OpenInferenceCallbackHandler\n'), ((1184, 1229), 'llama_index.callbacks.arize_phoenix_callback.arize_phoenix_callback_handler', 'arize_phoenix_callback_handler', ([], {}), '(**eval_params)\n', (1214, 1229), False, 'from llama_index.callbacks.arize_phoenix_callback import arize_phoenix_callback_handler\n'), ((1283, 1324), 'llama_index.callbacks.honeyhive_callback.honeyhive_callback_handler', 'honeyhive_callback_handler', ([], {}), '(**eval_params)\n', (1309, 1324), False, 'from llama_index.callbacks.honeyhive_callback import honeyhive_callback_handler\n'), ((1380, 1413), 'llama_index.callbacks.promptlayer_handler.PromptLayerHandler', 'PromptLayerHandler', ([], {}), '(**eval_params)\n', (1398, 1413), False, 'from llama_index.callbacks.promptlayer_handler import PromptLayerHandler\n'), ((1464, 1495), 'llama_index.callbacks.simple_llm_handler.SimpleLLMHandler', 'SimpleLLMHandler', ([], {}), '(**eval_params)\n', (1480, 1495), False, 'from llama_index.callbacks.simple_llm_handler import SimpleLLMHandler\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.legacy.core.response.schema.Response"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.legacy.bridge.pydantic import BaseModel # type: ignore
from llama_index.legacy.callbacks.schema import CBEventType, EventPayload
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.indices.query.schema import QueryBundle
from llama_index.legacy.prompts.mixin import PromptDictType
from llama_index.legacy.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.legacy.types import RESPONSE_TEXT_TYPE
from llama_index.legacy.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.indices.query.schema.QueryBundle",
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.legacy.core.response.schema.Response"
] | [((1114, 1141), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1131, 1141), False, 'import logging\n'), ((2809, 2842), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2840, 2842), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4901, 4950), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4905, 4950), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6844, 6870), 'llama_index.legacy.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6852, 6870), False, 'from llama_index.legacy.core.response.schema import Response\n'), ((6927, 6955), 'llama_index.legacy.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6938, 6955), False, 'from llama_index.legacy.indices.query.schema import QueryBundle\n'), ((8366, 8388), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8374, 8388), False, 'from llama_index.legacy.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Elasticsearch vector store."""
import asyncio
import uuid
from logging import getLogger
from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast
import nest_asyncio
import numpy as np
from llama_index.legacy.bridge.pydantic import PrivateAttr
from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode
from llama_index.legacy.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.legacy.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
logger = getLogger(__name__)
DISTANCE_STRATEGIES = Literal[
"COSINE",
"DOT_PRODUCT",
"EUCLIDEAN_DISTANCE",
]
def _get_elasticsearch_client(
*,
es_url: Optional[str] = None,
cloud_id: Optional[str] = None,
api_key: Optional[str] = None,
username: Optional[str] = None,
password: Optional[str] = None,
) -> Any:
"""Get AsyncElasticsearch client.
Args:
es_url: Elasticsearch URL.
cloud_id: Elasticsearch cloud ID.
api_key: Elasticsearch API key.
username: Elasticsearch username.
password: Elasticsearch password.
Returns:
AsyncElasticsearch client.
Raises:
ConnectionError: If Elasticsearch client cannot connect to Elasticsearch.
"""
try:
import elasticsearch
except ImportError:
raise ImportError(
"Could not import elasticsearch python package. "
"Please install it with `pip install elasticsearch`."
)
if es_url and cloud_id:
raise ValueError(
"Both es_url and cloud_id are defined. Please provide only one."
)
connection_params: Dict[str, Any] = {}
if es_url:
connection_params["hosts"] = [es_url]
elif cloud_id:
connection_params["cloud_id"] = cloud_id
else:
raise ValueError("Please provide either elasticsearch_url or cloud_id.")
if api_key:
connection_params["api_key"] = api_key
elif username and password:
connection_params["basic_auth"] = (username, password)
sync_es_client = elasticsearch.Elasticsearch(
**connection_params, headers={"user-agent": ElasticsearchStore.get_user_agent()}
)
async_es_client = elasticsearch.AsyncElasticsearch(**connection_params)
try:
sync_es_client.info() # so don't have to 'await' to just get info
except Exception as e:
logger.error(f"Error connecting to Elasticsearch: {e}")
raise
return async_es_client
def _to_elasticsearch_filter(standard_filters: MetadataFilters) -> Dict[str, Any]:
"""Convert standard filters to Elasticsearch filter.
Args:
standard_filters: Standard Llama-index filters.
Returns:
Elasticsearch filter.
"""
if len(standard_filters.legacy_filters()) == 1:
filter = standard_filters.legacy_filters()[0]
return {
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
else:
operands = []
for filter in standard_filters.legacy_filters():
operands.append(
{
"term": {
f"metadata.{filter.key}.keyword": {
"value": filter.value,
}
}
}
)
return {"bool": {"must": operands}}
def _to_llama_similarities(scores: List[float]) -> List[float]:
if scores is None or len(scores) == 0:
return []
scores_to_norm: np.ndarray = np.array(scores)
return np.exp(scores_to_norm - np.max(scores_to_norm)).tolist()
class ElasticsearchStore(BasePydanticVectorStore):
"""Elasticsearch vector store.
Args:
index_name: Name of the Elasticsearch index.
es_client: Optional. Pre-existing AsyncElasticsearch client.
es_url: Optional. Elasticsearch URL.
es_cloud_id: Optional. Elasticsearch cloud ID.
es_api_key: Optional. Elasticsearch API key.
es_user: Optional. Elasticsearch username.
es_password: Optional. Elasticsearch password.
text_field: Optional. Name of the Elasticsearch field that stores the text.
vector_field: Optional. Name of the Elasticsearch field that stores the
embedding.
batch_size: Optional. Batch size for bulk indexing. Defaults to 200.
distance_strategy: Optional. Distance strategy to use for similarity search.
Defaults to "COSINE".
Raises:
ConnectionError: If AsyncElasticsearch client cannot connect to Elasticsearch.
ValueError: If neither es_client nor es_url nor es_cloud_id is provided.
"""
stores_text: bool = True
index_name: str
es_client: Optional[Any]
es_url: Optional[str]
es_cloud_id: Optional[str]
es_api_key: Optional[str]
es_user: Optional[str]
es_password: Optional[str]
text_field: str = "content"
vector_field: str = "embedding"
batch_size: int = 200
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE"
_client = PrivateAttr()
def __init__(
self,
index_name: str,
es_client: Optional[Any] = None,
es_url: Optional[str] = None,
es_cloud_id: Optional[str] = None,
es_api_key: Optional[str] = None,
es_user: Optional[str] = None,
es_password: Optional[str] = None,
text_field: str = "content",
vector_field: str = "embedding",
batch_size: int = 200,
distance_strategy: Optional[DISTANCE_STRATEGIES] = "COSINE",
) -> None:
nest_asyncio.apply()
if es_client is not None:
self._client = es_client.options(
headers={"user-agent": self.get_user_agent()}
)
elif es_url is not None or es_cloud_id is not None:
self._client = _get_elasticsearch_client(
es_url=es_url,
username=es_user,
password=es_password,
cloud_id=es_cloud_id,
api_key=es_api_key,
)
else:
raise ValueError(
"""Either provide a pre-existing AsyncElasticsearch or valid \
credentials for creating a new connection."""
)
super().__init__(
index_name=index_name,
es_client=es_client,
es_url=es_url,
es_cloud_id=es_cloud_id,
es_api_key=es_api_key,
es_user=es_user,
es_password=es_password,
text_field=text_field,
vector_field=vector_field,
batch_size=batch_size,
distance_strategy=distance_strategy,
)
@property
def client(self) -> Any:
"""Get async elasticsearch client."""
return self._client
@staticmethod
def get_user_agent() -> str:
"""Get user agent for elasticsearch client."""
import llama_index.legacy
return f"llama_index-py-vs/{llama_index.legacy.__version__}"
async def _create_index_if_not_exists(
self, index_name: str, dims_length: Optional[int] = None
) -> None:
"""Create the AsyncElasticsearch index if it doesn't already exist.
Args:
index_name: Name of the AsyncElasticsearch index to create.
dims_length: Length of the embedding vectors.
"""
if self.client.indices.exists(index=index_name):
logger.debug(f"Index {index_name} already exists. Skipping creation.")
else:
if dims_length is None:
raise ValueError(
"Cannot create index without specifying dims_length "
"when the index doesn't already exist. We infer "
"dims_length from the first embedding. Check that "
"you have provided an embedding function."
)
if self.distance_strategy == "COSINE":
similarityAlgo = "cosine"
elif self.distance_strategy == "EUCLIDEAN_DISTANCE":
similarityAlgo = "l2_norm"
elif self.distance_strategy == "DOT_PRODUCT":
similarityAlgo = "dot_product"
else:
raise ValueError(f"Similarity {self.distance_strategy} not supported.")
index_settings = {
"mappings": {
"properties": {
self.vector_field: {
"type": "dense_vector",
"dims": dims_length,
"index": True,
"similarity": similarityAlgo,
},
self.text_field: {"type": "text"},
"metadata": {
"properties": {
"document_id": {"type": "keyword"},
"doc_id": {"type": "keyword"},
"ref_doc_id": {"type": "keyword"},
}
},
}
}
}
logger.debug(
f"Creating index {index_name} with mappings {index_settings['mappings']}"
)
await self.client.indices.create(index=index_name, **index_settings)
def add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the Elasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch['async'] python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.async_add(nodes, create_index_if_not_exists=create_index_if_not_exists)
)
async def async_add(
self,
nodes: List[BaseNode],
*,
create_index_if_not_exists: bool = True,
**add_kwargs: Any,
) -> List[str]:
"""Asynchronous method to add nodes to Elasticsearch index.
Args:
nodes: List of nodes with embeddings.
create_index_if_not_exists: Optional. Whether to create
the AsyncElasticsearch index if it
doesn't already exist.
Defaults to True.
Returns:
List of node IDs that were added to the index.
Raises:
ImportError: If elasticsearch python package is not installed.
BulkIndexError: If AsyncElasticsearch async_bulk indexing fails.
"""
try:
from elasticsearch.helpers import BulkIndexError, async_bulk
except ImportError:
raise ImportError(
"Could not import elasticsearch[async] python package. "
"Please install it with `pip install 'elasticsearch[async]'`."
)
if len(nodes) == 0:
return []
if create_index_if_not_exists:
dims_length = len(nodes[0].get_embedding())
await self._create_index_if_not_exists(
index_name=self.index_name, dims_length=dims_length
)
embeddings: List[List[float]] = []
texts: List[str] = []
metadatas: List[dict] = []
ids: List[str] = []
for node in nodes:
ids.append(node.node_id)
embeddings.append(node.get_embedding())
texts.append(node.get_content(metadata_mode=MetadataMode.NONE))
metadatas.append(node_to_metadata_dict(node, remove_text=True))
requests = []
return_ids = []
for i, text in enumerate(texts):
metadata = metadatas[i] if metadatas else {}
_id = ids[i] if ids else str(uuid.uuid4())
request = {
"_op_type": "index",
"_index": self.index_name,
self.vector_field: embeddings[i],
self.text_field: text,
"metadata": metadata,
"_id": _id,
}
requests.append(request)
return_ids.append(_id)
await async_bulk(
self.client, requests, chunk_size=self.batch_size, refresh=True
)
try:
success, failed = await async_bulk(
self.client, requests, stats_only=True, refresh=True
)
logger.debug(f"Added {success} and failed to add {failed} texts to index")
logger.debug(f"added texts {ids} to index")
return return_ids
except BulkIndexError as e:
logger.error(f"Error adding texts: {e}")
firstError = e.errors[0].get("index", {}).get("error", {})
logger.error(f"First error reason: {firstError.get('reason')}")
raise
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to Elasticsearch delete_by_query.
Raises:
Exception: If Elasticsearch delete_by_query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.adelete(ref_doc_id, **delete_kwargs)
)
async def adelete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Async delete node from Elasticsearch index.
Args:
ref_doc_id: ID of the node to delete.
delete_kwargs: Optional. Additional arguments to
pass to AsyncElasticsearch delete_by_query.
Raises:
Exception: If AsyncElasticsearch delete_by_query fails.
"""
try:
async with self.client as client:
res = await client.delete_by_query(
index=self.index_name,
query={"term": {"metadata.ref_doc_id": ref_doc_id}},
refresh=True,
**delete_kwargs,
)
if res["deleted"] == 0:
logger.warning(f"Could not find text {ref_doc_id} to delete")
else:
logger.debug(f"Deleted text {ref_doc_id} from index")
except Exception:
logger.error(f"Error deleting text: {ref_doc_id}")
raise
def query(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes.
Args:
query_embedding (List[float]): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the Elasticsearch query.
es_filter: Optional. Elasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If Elasticsearch query fails.
"""
return asyncio.get_event_loop().run_until_complete(
self.aquery(query, custom_query, es_filter, **kwargs)
)
async def aquery(
self,
query: VectorStoreQuery,
custom_query: Optional[
Callable[[Dict, Union[VectorStoreQuery, None]], Dict]
] = None,
es_filter: Optional[List[Dict]] = None,
**kwargs: Any,
) -> VectorStoreQueryResult:
"""Asynchronous query index for top k most similar nodes.
Args:
query_embedding (VectorStoreQuery): query embedding
custom_query: Optional. custom query function that takes in the es query
body and returns a modified query body.
This can be used to add additional query
parameters to the AsyncElasticsearch query.
es_filter: Optional. AsyncElasticsearch filter to apply to the
query. If filter is provided in the query,
this filter will be ignored.
Returns:
VectorStoreQueryResult: Result of the query.
Raises:
Exception: If AsyncElasticsearch query fails.
"""
query_embedding = cast(List[float], query.query_embedding)
es_query = {}
if query.filters is not None and len(query.filters.legacy_filters()) > 0:
filter = [_to_elasticsearch_filter(query.filters)]
else:
filter = es_filter or []
if query.mode in (
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.HYBRID,
):
es_query["knn"] = {
"filter": filter,
"field": self.vector_field,
"query_vector": query_embedding,
"k": query.similarity_top_k,
"num_candidates": query.similarity_top_k * 10,
}
if query.mode in (
VectorStoreQueryMode.TEXT_SEARCH,
VectorStoreQueryMode.HYBRID,
):
es_query["query"] = {
"bool": {
"must": {"match": {self.text_field: {"query": query.query_str}}},
"filter": filter,
}
}
if query.mode == VectorStoreQueryMode.HYBRID:
es_query["rank"] = {"rrf": {}}
if custom_query is not None:
es_query = custom_query(es_query, query)
logger.debug(f"Calling custom_query, Query body now: {es_query}")
async with self.client as client:
response = await client.search(
index=self.index_name,
**es_query,
size=query.similarity_top_k,
_source={"excludes": [self.vector_field]},
)
top_k_nodes = []
top_k_ids = []
top_k_scores = []
hits = response["hits"]["hits"]
for hit in hits:
source = hit["_source"]
metadata = source.get("metadata", None)
text = source.get(self.text_field, None)
node_id = hit["_id"]
try:
node = metadata_dict_to_node(metadata)
node.text = text
except Exception:
# Legacy support for old metadata format
logger.warning(
f"Could not parse metadata from hit {hit['_source']['metadata']}"
)
node_info = source.get("node_info")
relationships = source.get("relationships") or {}
start_char_idx = None
end_char_idx = None
if isinstance(node_info, dict):
start_char_idx = node_info.get("start", None)
end_char_idx = node_info.get("end", None)
node = TextNode(
text=text,
metadata=metadata,
id_=node_id,
start_char_idx=start_char_idx,
end_char_idx=end_char_idx,
relationships=relationships,
)
top_k_nodes.append(node)
top_k_ids.append(node_id)
top_k_scores.append(hit.get("_rank", hit["_score"]))
if query.mode == VectorStoreQueryMode.HYBRID:
total_rank = sum(top_k_scores)
top_k_scores = [total_rank - rank / total_rank for rank in top_k_scores]
return VectorStoreQueryResult(
nodes=top_k_nodes,
ids=top_k_ids,
similarities=_to_llama_similarities(top_k_scores),
)
| [
"llama_index.legacy.vector_stores.utils.metadata_dict_to_node",
"llama_index.legacy.bridge.pydantic.PrivateAttr",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.vector_stores.utils.node_to_metadata_dict"
] | [((640, 659), 'logging.getLogger', 'getLogger', (['__name__'], {}), '(__name__)\n', (649, 659), False, 'from logging import getLogger\n'), ((2343, 2396), 'elasticsearch.AsyncElasticsearch', 'elasticsearch.AsyncElasticsearch', ([], {}), '(**connection_params)\n', (2375, 2396), False, 'import elasticsearch\n'), ((3719, 3735), 'numpy.array', 'np.array', (['scores'], {}), '(scores)\n', (3727, 3735), True, 'import numpy as np\n'), ((5274, 5287), 'llama_index.legacy.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (5285, 5287), False, 'from llama_index.legacy.bridge.pydantic import PrivateAttr\n'), ((5793, 5813), 'nest_asyncio.apply', 'nest_asyncio.apply', ([], {}), '()\n', (5811, 5813), False, 'import nest_asyncio\n'), ((17423, 17463), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (17427, 17463), False, 'from typing import Any, Callable, Dict, List, Literal, Optional, Union, cast\n'), ((12909, 12984), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'chunk_size': 'self.batch_size', 'refresh': '(True)'}), '(self.client, requests, chunk_size=self.batch_size, refresh=True)\n', (12919, 12984), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((10374, 10398), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (10396, 10398), False, 'import asyncio\n'), ((12301, 12346), 'llama_index.legacy.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)'}), '(node, remove_text=True)\n', (12322, 12346), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13056, 13120), 'elasticsearch.helpers.async_bulk', 'async_bulk', (['self.client', 'requests'], {'stats_only': '(True)', 'refresh': '(True)'}), '(self.client, requests, stats_only=True, refresh=True)\n', (13066, 13120), False, 'from elasticsearch.helpers import BulkIndexError, async_bulk\n'), ((13994, 14018), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (14016, 14018), False, 'import asyncio\n'), ((16201, 16225), 'asyncio.get_event_loop', 'asyncio.get_event_loop', ([], {}), '()\n', (16223, 16225), False, 'import asyncio\n'), ((19323, 19354), 'llama_index.legacy.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (['metadata'], {}), '(metadata)\n', (19344, 19354), False, 'from llama_index.legacy.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((3771, 3793), 'numpy.max', 'np.max', (['scores_to_norm'], {}), '(scores_to_norm)\n', (3777, 3793), True, 'import numpy as np\n'), ((12535, 12547), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (12545, 12547), False, 'import uuid\n'), ((20003, 20146), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': 'text', 'metadata': 'metadata', 'id_': 'node_id', 'start_char_idx': 'start_char_idx', 'end_char_idx': 'end_char_idx', 'relationships': 'relationships'}), '(text=text, metadata=metadata, id_=node_id, start_char_idx=\n start_char_idx, end_char_idx=end_char_idx, relationships=relationships)\n', (20011, 20146), False, 'from llama_index.legacy.schema import BaseNode, MetadataMode, TextNode\n')] |
"""Google Generative AI Vector Store.
The GenAI Semantic Retriever API is a managed end-to-end service that allows
developers to create a corpus of documents to perform semantic search on
related passages given a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
import uuid
from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast
from llama_index.core.bridge.pydantic import ( # type: ignore
BaseModel,
Field,
PrivateAttr,
)
from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryResult,
)
if TYPE_CHECKING:
from google.auth import credentials
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_default_doc_id = "default-doc"
"""Google GenerativeAI service context.
Use this to provide the correct service context for `GoogleVectorStore`.
See the docstring for `GoogleVectorStore` for usage example.
"""
def set_google_config(
*,
api_endpoint: Optional[str] = None,
user_agent: Optional[str] = None,
page_size: Optional[int] = None,
auth_credentials: Optional["credentials.Credentials"] = None,
**kwargs: Any,
) -> None:
"""
Set the configuration for Google Generative AI API.
Parameters are optional, Normally, the defaults should work fine.
If provided, they will override the default values in the Config class.
See the docstring in `genai_extension.py` for more details.
auth_credentials: Optional["credentials.Credentials"] = None,
Use this to pass Google Auth credentials such as using a service account.
Refer to for auth credentials documentation:
https://developers.google.com/identity/protocols/oauth2/service-account#creatinganaccount.
Example:
from google.oauth2 import service_account
credentials = service_account.Credentials.from_service_account_file(
"/path/to/service.json",
scopes=[
"https://www.googleapis.com/auth/cloud-platform",
"https://www.googleapis.com/auth/generative-language.retriever",
],
)
set_google_config(auth_credentials=credentials)
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
config_attrs = {
"api_endpoint": api_endpoint,
"user_agent": user_agent,
"page_size": page_size,
"auth_credentials": auth_credentials,
"testing": kwargs.get("testing", None),
}
attrs = {k: v for k, v in config_attrs.items() if v is not None}
config = genaix.Config(**attrs)
genaix.set_config(config)
class NoSuchCorpusException(Exception):
def __init__(self, *, corpus_id: str) -> None:
super().__init__(f"No such corpus {corpus_id} found")
class GoogleVectorStore(BasePydanticVectorStore):
"""Google GenerativeAI Vector Store.
Currently, it computes the embedding vectors on the server side.
Example:
google_vector_store = GoogleVectorStore.from_corpus(
corpus_id="my-corpus-id",
include_metadata=True,
metadata_keys=['file_name', 'creation_date']
)
index = VectorStoreIndex.from_vector_store(
vector_store=google_vector_store
)
Attributes:
corpus_id: The corpus ID that this vector store instance will read and
write to.
include_metadata (bool): Indicates whether to include custom metadata in the query
results. Defaults to False.
metadata_keys (Optional[List[str]]): Specifies which metadata keys to include in the
query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
"""
# Semantic Retriever stores the document node's text as string and embeds
# the vectors on the server automatically.
stores_text: bool = True
is_embedding_query: bool = False
# This is not the Google's corpus name but an ID generated in the LlamaIndex
# world.
corpus_id: str = Field(frozen=True)
"""Corpus ID that this instance of the vector store is using."""
# Configuration options for handling metadata in query results
include_metadata: bool = False
metadata_keys: Optional[List[str]] = None
_client: Any = PrivateAttr()
def __init__(self, *, client: Any, **kwargs: Any):
"""Raw constructor.
Use the class method `from_corpus` or `create_corpus` instead.
Args:
client: The low-level retriever class from google.ai.generativelanguage.
"""
try:
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(**kwargs)
assert isinstance(client, genai.RetrieverServiceClient)
self._client = client
@classmethod
def from_corpus(
cls,
*,
corpus_id: str,
include_metadata: bool = False,
metadata_keys: Optional[List[str]] = None,
) -> "GoogleVectorStore":
"""Create an instance that points to an existing corpus.
Args:
corpus_id (str): ID of an existing corpus on Google's server.
include_metadata (bool, optional): Specifies whether to include custom metadata in the
query results. Defaults to False, meaning metadata will not be included.
metadata_keys (Optional[List[str]], optional): Specifies which metadata keys to include
in the query results if include_metadata is set to True. If None, all metadata keys
are included. Defaults to None.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
NoSuchCorpusException if no such corpus is found.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.from_corpus(corpus_id={corpus_id})")
client = genaix.build_semantic_retriever()
if genaix.get_corpus(corpus_id=corpus_id, client=client) is None:
raise NoSuchCorpusException(corpus_id=corpus_id)
return cls(
corpus_id=corpus_id,
client=client,
include_metadata=include_metadata,
metadata_keys=metadata_keys,
)
@classmethod
def create_corpus(
cls, *, corpus_id: Optional[str] = None, display_name: Optional[str] = None
) -> "GoogleVectorStore":
"""Create an instance that points to a newly created corpus.
Examples:
store = GoogleVectorStore.create_corpus()
print(f"Created corpus with ID: {store.corpus_id})
store = GoogleVectorStore.create_corpus(
display_name="My first corpus"
)
store = GoogleVectorStore.create_corpus(
corpus_id="my-corpus-1",
display_name="My first corpus"
)
Args:
corpus_id: ID of the new corpus to be created. If not provided,
Google server will provide one for you.
display_name: Title of the corpus. If not provided, Google server
will provide one for you.
Returns:
An instance of the vector store that points to the specified corpus.
Raises:
An exception if the corpus already exists or the user hits the
quota limit.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(
f"\n\nGoogleVectorStore.create_corpus(new_corpus_id={corpus_id}, new_display_name={display_name})"
)
client = genaix.build_semantic_retriever()
new_corpus_id = corpus_id or str(uuid.uuid4())
new_corpus = genaix.create_corpus(
corpus_id=new_corpus_id, display_name=display_name, client=client
)
name = genaix.EntityName.from_str(new_corpus.name)
return cls(corpus_id=name.corpus_id, client=client)
@classmethod
def class_name(cls) -> str:
return "GoogleVectorStore"
@property
def client(self) -> Any:
return self._client
def add(self, nodes: List[BaseNode], **add_kwargs: Any) -> List[str]:
"""Add nodes with embedding to vector store.
If a node has a source node, the source node's ID will be used to create
a document. Otherwise, a default document for that corpus will be used
to house the node.
Furthermore, if the source node has a metadata field "file_name", it
will be used as the title of the document. If the source node has no
such field, Google server will assign a title to the document.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.add([
TextNode(
text="Hello, my darling",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
TextNode(
text="Goodbye, my baby",
relationships={
NodeRelationship.SOURCE: RelatedNodeInfo(
node_id="doc-456",
metadata={"file_name": "Title for doc-456"},
)
},
),
])
The above code will create one document with ID `doc-456` and title
`Title for doc-456`. This document will house both nodes.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.add(nodes={nodes})")
client = cast(genai.RetrieverServiceClient, self.client)
created_node_ids: List[str] = []
for nodeGroup in _group_nodes_by_source(nodes):
source = nodeGroup.source_node
document_id = source.node_id
document = genaix.get_document(
corpus_id=self.corpus_id, document_id=document_id, client=client
)
if not document:
genaix.create_document(
corpus_id=self.corpus_id,
display_name=source.metadata.get("file_name", None),
document_id=document_id,
metadata=source.metadata,
client=client,
)
created_chunks = genaix.batch_create_chunk(
corpus_id=self.corpus_id,
document_id=document_id,
texts=[node.get_content() for node in nodeGroup.nodes],
metadatas=[node.metadata for node in nodeGroup.nodes],
client=client,
)
created_node_ids.extend([chunk.name for chunk in created_chunks])
return created_node_ids
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""Delete nodes by ref_doc_id.
Both the underlying nodes and the document will be deleted from Google
server.
Args:
ref_doc_id: The document ID to be deleted.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.delete(ref_doc_id={ref_doc_id})")
client = cast(genai.RetrieverServiceClient, self.client)
genaix.delete_document(
corpus_id=self.corpus_id, document_id=ref_doc_id, client=client
)
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query vector store.
Example:
store = GoogleVectorStore.from_corpus(corpus_id="123")
store.query(
query=VectorStoreQuery(
query_str="What is the meaning of life?",
# Only nodes with this author.
filters=MetadataFilters(
filters=[
ExactMatchFilter(
key="author",
value="Arthur Schopenhauer",
)
]
),
# Only from these docs. If not provided,
# the entire corpus is searched.
doc_ids=["doc-456"],
similarity_top_k=3,
)
)
Args:
query: See `llama_index.core.vector_stores.types.VectorStoreQuery`.
"""
try:
import llama_index.vector_stores.google.genai_extension as genaix
import google.ai.generativelanguage as genai
except ImportError:
raise ImportError(_import_err_msg)
_logger.debug(f"\n\nGoogleVectorStore.query(query={query})")
query_str = query.query_str
if query_str is None:
raise ValueError("VectorStoreQuery.query_str should not be None.")
client = cast(genai.RetrieverServiceClient, self.client)
relevant_chunks: List[genai.RelevantChunk] = []
if query.doc_ids is None:
# The chunks from query_corpus should be sorted in reverse order by
# relevant score.
relevant_chunks = genaix.query_corpus(
corpus_id=self.corpus_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
else:
for doc_id in query.doc_ids:
relevant_chunks.extend(
genaix.query_document(
corpus_id=self.corpus_id,
document_id=doc_id,
query=query_str,
filter=_convert_filter(query.filters),
k=query.similarity_top_k,
client=client,
)
)
# Make sure the chunks are reversed sorted according to relevant
# scores even across multiple documents.
relevant_chunks.sort(key=lambda c: c.chunk_relevance_score, reverse=True)
nodes = []
include_metadata = self.include_metadata
metadata_keys = self.metadata_keys
for chunk in relevant_chunks:
metadata = {}
if include_metadata:
for custom_metadata in chunk.chunk.custom_metadata:
# Use getattr to safely extract values
value = getattr(custom_metadata, "string_value", None)
if (
value is None
): # If string_value is not set, check for numeric_value
value = getattr(custom_metadata, "numeric_value", None)
# Add to the metadata dictionary only those keys that are present in metadata_keys
if value is not None and (
metadata_keys is None or custom_metadata.key in metadata_keys
):
metadata[custom_metadata.key] = value
text_node = TextNode(
text=chunk.chunk.data.string_value,
id=_extract_chunk_id(chunk.chunk.name),
metadata=metadata, # Adding metadata to the node
)
nodes.append(text_node)
return VectorStoreQueryResult(
nodes=nodes,
ids=[_extract_chunk_id(chunk.chunk.name) for chunk in relevant_chunks],
similarities=[chunk.chunk_relevance_score for chunk in relevant_chunks],
)
def _extract_chunk_id(entity_name: str) -> str:
try:
import llama_index.vector_stores.google.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
id = genaix.EntityName.from_str(entity_name).chunk_id
assert id is not None
return id
class _NodeGroup(BaseModel):
"""Every node in nodes have the same source node."""
source_node: RelatedNodeInfo
nodes: List[BaseNode]
def _group_nodes_by_source(nodes: Sequence[BaseNode]) -> List[_NodeGroup]:
"""Returns a list of lists of nodes where each list has all the nodes
from the same document.
"""
groups: Dict[str, _NodeGroup] = {}
for node in nodes:
source_node: RelatedNodeInfo
if isinstance(node.source_node, RelatedNodeInfo):
source_node = node.source_node
else:
source_node = RelatedNodeInfo(node_id=_default_doc_id)
if source_node.node_id not in groups:
groups[source_node.node_id] = _NodeGroup(source_node=source_node, nodes=[])
groups[source_node.node_id].nodes.append(node)
return list(groups.values())
def _convert_filter(fs: Optional[MetadataFilters]) -> Dict[str, Any]:
if fs is None:
return {}
assert isinstance(fs, MetadataFilters)
return {f.key: f.value for f in fs.filters}
| [
"llama_index.vector_stores.google.genai_extension.delete_document",
"llama_index.vector_stores.google.genai_extension.Config",
"llama_index.vector_stores.google.genai_extension.get_corpus",
"llama_index.vector_stores.google.genai_extension.EntityName.from_str",
"llama_index.vector_stores.google.genai_extension.create_corpus",
"llama_index.vector_stores.google.genai_extension.build_semantic_retriever",
"llama_index.vector_stores.google.genai_extension.get_document",
"llama_index.core.bridge.pydantic.Field",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.vector_stores.google.genai_extension.set_config",
"llama_index.core.schema.RelatedNodeInfo"
] | [((812, 839), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (829, 839), False, 'import logging\n'), ((2859, 2881), 'llama_index.vector_stores.google.genai_extension.Config', 'genaix.Config', ([], {}), '(**attrs)\n', (2872, 2881), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((2886, 2911), 'llama_index.vector_stores.google.genai_extension.set_config', 'genaix.set_config', (['config'], {}), '(config)\n', (2903, 2911), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((4343, 4361), 'llama_index.core.bridge.pydantic.Field', 'Field', ([], {'frozen': '(True)'}), '(frozen=True)\n', (4348, 4361), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((4600, 4613), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (4611, 4613), False, 'from llama_index.core.bridge.pydantic import BaseModel, Field, PrivateAttr\n'), ((6413, 6446), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (6444, 6446), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8216, 8249), 'llama_index.vector_stores.google.genai_extension.build_semantic_retriever', 'genaix.build_semantic_retriever', ([], {}), '()\n', (8247, 8249), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8326, 8417), 'llama_index.vector_stores.google.genai_extension.create_corpus', 'genaix.create_corpus', ([], {'corpus_id': 'new_corpus_id', 'display_name': 'display_name', 'client': 'client'}), '(corpus_id=new_corpus_id, display_name=display_name,\n client=client)\n', (8346, 8417), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((8451, 8494), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['new_corpus.name'], {}), '(new_corpus.name)\n', (8477, 8494), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10566, 10613), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (10570, 10613), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12317, 12364), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (12321, 12364), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((12373, 12464), 'llama_index.vector_stores.google.genai_extension.delete_document', 'genaix.delete_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'ref_doc_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=ref_doc_id,\n client=client)\n', (12395, 12464), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((13968, 14015), 'typing.cast', 'cast', (['genai.RetrieverServiceClient', 'self.client'], {}), '(genai.RetrieverServiceClient, self.client)\n', (13972, 14015), False, 'from typing import TYPE_CHECKING, Any, Dict, List, Optional, Sequence, cast\n'), ((16832, 16871), 'llama_index.vector_stores.google.genai_extension.EntityName.from_str', 'genaix.EntityName.from_str', (['entity_name'], {}), '(entity_name)\n', (16858, 16871), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((6458, 6511), 'llama_index.vector_stores.google.genai_extension.get_corpus', 'genaix.get_corpus', ([], {'corpus_id': 'corpus_id', 'client': 'client'}), '(corpus_id=corpus_id, client=client)\n', (6475, 6511), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((10819, 10908), 'llama_index.vector_stores.google.genai_extension.get_document', 'genaix.get_document', ([], {'corpus_id': 'self.corpus_id', 'document_id': 'document_id', 'client': 'client'}), '(corpus_id=self.corpus_id, document_id=document_id,\n client=client)\n', (10838, 10908), True, 'import llama_index.vector_stores.google.genai_extension as genaix\n'), ((17496, 17536), 'llama_index.core.schema.RelatedNodeInfo', 'RelatedNodeInfo', ([], {'node_id': '_default_doc_id'}), '(node_id=_default_doc_id)\n', (17511, 17536), False, 'from llama_index.core.schema import BaseNode, RelatedNodeInfo, TextNode\n'), ((8291, 8303), 'uuid.uuid4', 'uuid.uuid4', ([], {}), '()\n', (8301, 8303), False, 'import uuid\n')] |
"""Google GenerativeAI Attributed Question and Answering (AQA) service.
The GenAI Semantic AQA API is a managed end to end service that allows
developers to create responses grounded on specified passages based on
a user query. For more information visit:
https://developers.generativeai.google/guide
"""
import logging
from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast
from llama_index.bridge.pydantic import BaseModel # type: ignore
from llama_index.callbacks.schema import CBEventType, EventPayload
from llama_index.core.response.schema import Response
from llama_index.indices.query.schema import QueryBundle
from llama_index.prompts.mixin import PromptDictType
from llama_index.response_synthesizers.base import BaseSynthesizer, QueryTextType
from llama_index.schema import MetadataMode, NodeWithScore, TextNode
from llama_index.types import RESPONSE_TEXT_TYPE
from llama_index.vector_stores.google.generativeai import google_service_context
if TYPE_CHECKING:
import google.ai.generativelanguage as genai
_logger = logging.getLogger(__name__)
_import_err_msg = "`google.generativeai` package not found, please run `pip install google-generativeai`"
_separator = "\n\n"
class SynthesizedResponse(BaseModel):
"""Response of `GoogleTextSynthesizer.get_response`."""
answer: str
"""The grounded response to the user's question."""
attributed_passages: List[str]
"""The list of passages the AQA model used for its response."""
answerable_probability: float
"""The model's estimate of the probability that its answer is correct and grounded in the input passages."""
class GoogleTextSynthesizer(BaseSynthesizer):
"""Google's Attributed Question and Answering service.
Given a user's query and a list of passages, Google's server will return
a response that is grounded to the provided list of passages. It will not
base the response on parametric memory.
"""
_client: Any
_temperature: float
_answer_style: Any
_safety_setting: List[Any]
def __init__(
self,
*,
temperature: float,
answer_style: Any,
safety_setting: List[Any],
**kwargs: Any,
):
"""Create a new Google AQA.
Prefer to use the factory `from_defaults` instead for type safety.
See `from_defaults` for more documentation.
"""
try:
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
super().__init__(
service_context=google_service_context,
output_cls=SynthesizedResponse,
**kwargs,
)
self._client = genaix.build_generative_service()
self._temperature = temperature
self._answer_style = answer_style
self._safety_setting = safety_setting
# Type safe factory that is only available if Google is installed.
@classmethod
def from_defaults(
cls,
temperature: float = 0.7,
answer_style: int = 1,
safety_setting: List["genai.SafetySetting"] = [],
) -> "GoogleTextSynthesizer":
"""Create a new Google AQA.
Example:
responder = GoogleTextSynthesizer.create(
temperature=0.7,
answer_style=AnswerStyle.ABSTRACTIVE,
safety_setting=[
SafetySetting(
category=HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
),
]
)
Args:
temperature: 0.0 to 1.0.
answer_style: See `google.ai.generativelanguage.GenerateAnswerRequest.AnswerStyle`
The default is ABSTRACTIVE (1).
safety_setting: See `google.ai.generativelanguage.SafetySetting`.
Returns:
an instance of GoogleTextSynthesizer.
"""
return cls(
temperature=temperature,
answer_style=answer_style,
safety_setting=safety_setting,
)
def get_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> SynthesizedResponse:
"""Generate a grounded response on provided passages.
Args:
query_str: The user's question.
text_chunks: A list of passages that should be used to answer the
question.
Returns:
A `SynthesizedResponse` object.
"""
try:
import google.ai.generativelanguage as genai
import llama_index.vector_stores.google.generativeai.genai_extension as genaix
except ImportError:
raise ImportError(_import_err_msg)
client = cast(genai.GenerativeServiceClient, self._client)
response = genaix.generate_answer(
prompt=query_str,
passages=list(text_chunks),
answer_style=self._answer_style,
safety_settings=self._safety_setting,
temperature=self._temperature,
client=client,
)
return SynthesizedResponse(
answer=response.answer,
attributed_passages=[
passage.text for passage in response.attributed_passages
],
answerable_probability=response.answerable_probability,
)
async def aget_response(
self,
query_str: str,
text_chunks: Sequence[str],
**response_kwargs: Any,
) -> RESPONSE_TEXT_TYPE:
# TODO: Implement a true async version.
return self.get_response(query_str, text_chunks, **response_kwargs)
def synthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
"""Returns a grounded response based on provided passages.
Returns:
Response's `source_nodes` will begin with a list of attributed
passages. These passages are the ones that were used to construct
the grounded response. These passages will always have no score,
the only way to mark them as attributed passages. Then, the list
will follow with the originally provided passages, which will have
a score from the retrieval.
Response's `metadata` may also have have an entry with key
`answerable_probability`, which is the model's estimate of the
probability that its answer is correct and grounded in the input
passages.
"""
if len(nodes) == 0:
return Response("Empty Response")
if isinstance(query, str):
query = QueryBundle(query_str=query)
with self._callback_manager.event(
CBEventType.SYNTHESIZE, payload={EventPayload.QUERY_STR: query.query_str}
) as event:
internal_response = self.get_response(
query_str=query.query_str,
text_chunks=[
n.node.get_content(metadata_mode=MetadataMode.LLM) for n in nodes
],
**response_kwargs,
)
additional_source_nodes = list(additional_source_nodes or [])
external_response = self._prepare_external_response(
internal_response, nodes + additional_source_nodes
)
event.on_end(payload={EventPayload.RESPONSE: external_response})
return external_response
async def asynthesize(
self,
query: QueryTextType,
nodes: List[NodeWithScore],
additional_source_nodes: Optional[Sequence[NodeWithScore]] = None,
**response_kwargs: Any,
) -> Response:
# TODO: Implement a true async version.
return self.synthesize(query, nodes, additional_source_nodes, **response_kwargs)
def _prepare_external_response(
self,
response: SynthesizedResponse,
source_nodes: List[NodeWithScore],
) -> Response:
return Response(
response=response.answer,
source_nodes=[
NodeWithScore(node=TextNode(text=passage))
for passage in response.attributed_passages
]
+ source_nodes,
metadata={
"answerable_probability": response.answerable_probability,
},
)
def _get_prompts(self) -> PromptDictType:
# Not used.
return {}
def _update_prompts(self, prompts_dict: PromptDictType) -> None:
# Not used.
...
| [
"llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service",
"llama_index.core.response.schema.Response",
"llama_index.schema.TextNode",
"llama_index.indices.query.schema.QueryBundle"
] | [((1051, 1078), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (1068, 1078), False, 'import logging\n'), ((2739, 2772), 'llama_index.vector_stores.google.generativeai.genai_extension.build_generative_service', 'genaix.build_generative_service', ([], {}), '()\n', (2770, 2772), True, 'import llama_index.vector_stores.google.generativeai.genai_extension as genaix\n'), ((4824, 4873), 'typing.cast', 'cast', (['genai.GenerativeServiceClient', 'self._client'], {}), '(genai.GenerativeServiceClient, self._client)\n', (4828, 4873), False, 'from typing import TYPE_CHECKING, Any, List, Optional, Sequence, cast\n'), ((6767, 6793), 'llama_index.core.response.schema.Response', 'Response', (['"""Empty Response"""'], {}), "('Empty Response')\n", (6775, 6793), False, 'from llama_index.core.response.schema import Response\n'), ((6850, 6878), 'llama_index.indices.query.schema.QueryBundle', 'QueryBundle', ([], {'query_str': 'query'}), '(query_str=query)\n', (6861, 6878), False, 'from llama_index.indices.query.schema import QueryBundle\n'), ((8289, 8311), 'llama_index.schema.TextNode', 'TextNode', ([], {'text': 'passage'}), '(text=passage)\n', (8297, 8311), False, 'from llama_index.schema import MetadataMode, NodeWithScore, TextNode\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""FastAPI app creation, logger configuration and main API routes."""
import llama_index
from private_gpt.di import global_injector
from private_gpt.launcher import create_app
# Add LlamaIndex simple observability
llama_index.set_global_handler("simple")
app = create_app(global_injector)
| [
"llama_index.set_global_handler"
] | [((217, 257), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (247, 257), False, 'import llama_index\n'), ((265, 292), 'private_gpt.launcher.create_app', 'create_app', (['global_injector'], {}), '(global_injector)\n', (275, 292), False, 'from private_gpt.launcher import create_app\n')] |
"""
Astra DB Vector store index.
An index based on a DB table with vector search capabilities,
powered by the astrapy library
"""
import json
import logging
from typing import Any, Dict, List, Optional, cast
from warnings import warn
import llama_index.core
from llama_index.core.bridge.pydantic import PrivateAttr
from astrapy.db import AstraDB
from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings
from llama_index.core.schema import BaseNode, MetadataMode
from llama_index.core.vector_stores.types import (
BasePydanticVectorStore,
ExactMatchFilter,
FilterOperator,
MetadataFilter,
MetadataFilters,
VectorStoreQuery,
VectorStoreQueryMode,
VectorStoreQueryResult,
)
from llama_index.core.vector_stores.utils import (
metadata_dict_to_node,
node_to_metadata_dict,
)
_logger = logging.getLogger(__name__)
DEFAULT_MMR_PREFETCH_FACTOR = 4.0
MAX_INSERT_BATCH_SIZE = 20
NON_INDEXED_FIELDS = ["metadata._node_content", "content"]
class AstraDBVectorStore(BasePydanticVectorStore):
"""
Astra DB Vector Store.
An abstraction of a Astra table with
vector-similarity-search. Documents, and their embeddings, are stored
in an Astra table and a vector-capable index is used for searches.
The table does not need to exist beforehand: if necessary it will
be created behind the scenes.
All Astra operations are done through the astrapy library.
Args:
collection_name (str): collection name to use. If not existing, it will be created.
token (str): The Astra DB Application Token to use.
api_endpoint (str): The Astra DB JSON API endpoint for your database.
embedding_dimension (int): length of the embedding vectors in use.
namespace (Optional[str]): The namespace to use. If not provided, 'default_keyspace'
ttl_seconds (Optional[int]): expiration time for inserted entries.
Default is no expiration.
"""
stores_text: bool = True
flat_metadata: bool = True
_embedding_dimension: int = PrivateAttr()
_ttl_seconds: Optional[int] = PrivateAttr()
_astra_db: Any = PrivateAttr()
_astra_db_collection: Any = PrivateAttr()
def __init__(
self,
*,
collection_name: str,
token: str,
api_endpoint: str,
embedding_dimension: int,
namespace: Optional[str] = None,
ttl_seconds: Optional[int] = None,
) -> None:
super().__init__()
# Set all the required class parameters
self._embedding_dimension = embedding_dimension
self._ttl_seconds = ttl_seconds
_logger.debug("Creating the Astra DB table")
# Build the Astra DB object
self._astra_db = AstraDB(
api_endpoint=api_endpoint,
token=token,
namespace=namespace,
caller_name=getattr(llama_index, "__name__", "llama_index"),
caller_version=getattr(llama_index.core, "__version__", None),
)
from astrapy.api import APIRequestError
try:
# Create and connect to the newly created collection
self._astra_db_collection = self._astra_db.create_collection(
collection_name=collection_name,
dimension=embedding_dimension,
options={"indexing": {"deny": NON_INDEXED_FIELDS}},
)
except APIRequestError:
# possibly the collection is preexisting and has legacy
# indexing settings: verify
get_coll_response = self._astra_db.get_collections(
options={"explain": True}
)
collections = (get_coll_response["status"] or {}).get("collections") or []
preexisting = [
collection
for collection in collections
if collection["name"] == collection_name
]
if preexisting:
pre_collection = preexisting[0]
# if it has no "indexing", it is a legacy collection;
# otherwise it's unexpected warn and proceed at user's risk
pre_col_options = pre_collection.get("options") or {}
if "indexing" not in pre_col_options:
warn(
(
f"Collection '{collection_name}' is detected as "
"having indexing turned on for all fields "
"(either created manually or by older versions "
"of this plugin). This implies stricter "
"limitations on the amount of text"
" each entry can store. Consider reindexing anew on a"
" fresh collection to be able to store longer texts."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
options_json = json.dumps(pre_col_options["indexing"])
warn(
(
f"Collection '{collection_name}' has unexpected 'indexing'"
f" settings (options.indexing = {options_json})."
" This can result in odd behaviour when running "
" metadata filtering and/or unwarranted limitations"
" on storing long texts. Consider reindexing anew on a"
" fresh collection."
),
UserWarning,
stacklevel=2,
)
self._astra_db_collection = self._astra_db.collection(
collection_name=collection_name,
)
else:
# other exception
raise
def add(
self,
nodes: List[BaseNode],
**add_kwargs: Any,
) -> List[str]:
"""
Add nodes to index.
Args:
nodes: List[BaseNode]: list of node with embeddings
"""
# Initialize list of objects to track
nodes_list = []
# Process each node individually
for node in nodes:
# Get the metadata
metadata = node_to_metadata_dict(
node,
remove_text=True,
flat_metadata=self.flat_metadata,
)
# One dictionary of node data per node
nodes_list.append(
{
"_id": node.node_id,
"content": node.get_content(metadata_mode=MetadataMode.NONE),
"metadata": metadata,
"$vector": node.get_embedding(),
}
)
# Log the number of rows being added
_logger.debug(f"Adding {len(nodes_list)} rows to table")
# Initialize an empty list to hold the batches
batched_list = []
# Iterate over the node_list in steps of MAX_INSERT_BATCH_SIZE
for i in range(0, len(nodes_list), MAX_INSERT_BATCH_SIZE):
# Append a slice of node_list to the batched_list
batched_list.append(nodes_list[i : i + MAX_INSERT_BATCH_SIZE])
# Perform the bulk insert
for i, batch in enumerate(batched_list):
_logger.debug(f"Processing batch #{i + 1} of size {len(batch)}")
# Go to astrapy to perform the bulk insert
self._astra_db_collection.insert_many(batch)
# Return the list of ids
return [str(n["_id"]) for n in nodes_list]
def delete(self, ref_doc_id: str, **delete_kwargs: Any) -> None:
"""
Delete nodes using with ref_doc_id.
Args:
ref_doc_id (str): The id of the document to delete.
"""
_logger.debug("Deleting a document from the Astra table")
self._astra_db_collection.delete(id=ref_doc_id, **delete_kwargs)
@property
def client(self) -> Any:
"""Return the underlying Astra vector table object."""
return self._astra_db_collection
@staticmethod
def _query_filters_to_dict(query_filters: MetadataFilters) -> Dict[str, Any]:
# Allow only legacy ExactMatchFilter and MetadataFilter with FilterOperator.EQ
if not all(
(
isinstance(f, ExactMatchFilter)
or (isinstance(f, MetadataFilter) and f.operator == FilterOperator.EQ)
)
for f in query_filters.filters
):
raise NotImplementedError(
"Only filters with operator=FilterOperator.EQ are supported"
)
return {f"metadata.{f.key}": f.value for f in query_filters.filters}
def query(self, query: VectorStoreQuery, **kwargs: Any) -> VectorStoreQueryResult:
"""Query index for top k most similar nodes."""
# Get the currently available query modes
_available_query_modes = [
VectorStoreQueryMode.DEFAULT,
VectorStoreQueryMode.MMR,
]
# Reject query if not available
if query.mode not in _available_query_modes:
raise NotImplementedError(f"Query mode {query.mode} not available.")
# Get the query embedding
query_embedding = cast(List[float], query.query_embedding)
# Process the metadata filters as needed
if query.filters is not None:
query_metadata = self._query_filters_to_dict(query.filters)
else:
query_metadata = {}
# Get the scores depending on the query mode
if query.mode == VectorStoreQueryMode.DEFAULT:
# Call the vector_find method of AstraPy
matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=query.similarity_top_k,
filter=query_metadata,
)
# Get the scores associated with each
top_k_scores = [match["$similarity"] for match in matches]
elif query.mode == VectorStoreQueryMode.MMR:
# Querying a larger number of vectors and then doing MMR on them.
if (
kwargs.get("mmr_prefetch_factor") is not None
and kwargs.get("mmr_prefetch_k") is not None
):
raise ValueError(
"'mmr_prefetch_factor' and 'mmr_prefetch_k' "
"cannot coexist in a call to query()"
)
else:
if kwargs.get("mmr_prefetch_k") is not None:
prefetch_k0 = int(kwargs["mmr_prefetch_k"])
else:
prefetch_k0 = int(
query.similarity_top_k
* kwargs.get("mmr_prefetch_factor", DEFAULT_MMR_PREFETCH_FACTOR)
)
# Get the most we can possibly need to fetch
prefetch_k = max(prefetch_k0, query.similarity_top_k)
# Call AstraPy to fetch them
prefetch_matches = self._astra_db_collection.vector_find(
vector=query_embedding,
limit=prefetch_k,
filter=query_metadata,
)
# Get the MMR threshold
mmr_threshold = query.mmr_threshold or kwargs.get("mmr_threshold")
# If we have found documents, we can proceed
if prefetch_matches:
zipped_indices, zipped_embeddings = zip(
*enumerate(match["$vector"] for match in prefetch_matches)
)
pf_match_indices, pf_match_embeddings = list(zipped_indices), list(
zipped_embeddings
)
else:
pf_match_indices, pf_match_embeddings = [], []
# Call the Llama utility function to get the top k
mmr_similarities, mmr_indices = get_top_k_mmr_embeddings(
query_embedding,
pf_match_embeddings,
similarity_top_k=query.similarity_top_k,
embedding_ids=pf_match_indices,
mmr_threshold=mmr_threshold,
)
# Finally, build the final results based on the mmr values
matches = [prefetch_matches[mmr_index] for mmr_index in mmr_indices]
top_k_scores = mmr_similarities
# We have three lists to return
top_k_nodes = []
top_k_ids = []
# Get every match
for match in matches:
# Check whether we have a llama-generated node content field
if "_node_content" not in match["metadata"]:
match["metadata"]["_node_content"] = json.dumps(match)
# Create a new node object from the node metadata
node = metadata_dict_to_node(match["metadata"], text=match["content"])
# Append to the respective lists
top_k_nodes.append(node)
top_k_ids.append(match["_id"])
# return our final result
return VectorStoreQueryResult(
nodes=top_k_nodes,
similarities=top_k_scores,
ids=top_k_ids,
)
| [
"llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings",
"llama_index.core.bridge.pydantic.PrivateAttr",
"llama_index.core.vector_stores.utils.node_to_metadata_dict",
"llama_index.core.vector_stores.utils.metadata_dict_to_node",
"llama_index.core.vector_stores.types.VectorStoreQueryResult"
] | [((852, 879), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (869, 879), False, 'import logging\n'), ((2070, 2083), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2081, 2083), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2118, 2131), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2129, 2131), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2153, 2166), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2164, 2166), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((2199, 2212), 'llama_index.core.bridge.pydantic.PrivateAttr', 'PrivateAttr', ([], {}), '()\n', (2210, 2212), False, 'from llama_index.core.bridge.pydantic import PrivateAttr\n'), ((9525, 9565), 'typing.cast', 'cast', (['List[float]', 'query.query_embedding'], {}), '(List[float], query.query_embedding)\n', (9529, 9565), False, 'from typing import Any, Dict, List, Optional, cast\n'), ((13259, 13347), 'llama_index.core.vector_stores.types.VectorStoreQueryResult', 'VectorStoreQueryResult', ([], {'nodes': 'top_k_nodes', 'similarities': 'top_k_scores', 'ids': 'top_k_ids'}), '(nodes=top_k_nodes, similarities=top_k_scores, ids=\n top_k_ids)\n', (13281, 13347), False, 'from llama_index.core.vector_stores.types import BasePydanticVectorStore, ExactMatchFilter, FilterOperator, MetadataFilter, MetadataFilters, VectorStoreQuery, VectorStoreQueryMode, VectorStoreQueryResult\n'), ((6510, 6589), 'llama_index.core.vector_stores.utils.node_to_metadata_dict', 'node_to_metadata_dict', (['node'], {'remove_text': '(True)', 'flat_metadata': 'self.flat_metadata'}), '(node, remove_text=True, flat_metadata=self.flat_metadata)\n', (6531, 6589), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((13019, 13082), 'llama_index.core.vector_stores.utils.metadata_dict_to_node', 'metadata_dict_to_node', (["match['metadata']"], {'text': "match['content']"}), "(match['metadata'], text=match['content'])\n", (13040, 13082), False, 'from llama_index.core.vector_stores.utils import metadata_dict_to_node, node_to_metadata_dict\n'), ((12133, 12305), 'llama_index.core.indices.query.embedding_utils.get_top_k_mmr_embeddings', 'get_top_k_mmr_embeddings', (['query_embedding', 'pf_match_embeddings'], {'similarity_top_k': 'query.similarity_top_k', 'embedding_ids': 'pf_match_indices', 'mmr_threshold': 'mmr_threshold'}), '(query_embedding, pf_match_embeddings,\n similarity_top_k=query.similarity_top_k, embedding_ids=pf_match_indices,\n mmr_threshold=mmr_threshold)\n', (12157, 12305), False, 'from llama_index.core.indices.query.embedding_utils import get_top_k_mmr_embeddings\n'), ((12919, 12936), 'json.dumps', 'json.dumps', (['match'], {}), '(match)\n', (12929, 12936), False, 'import json\n'), ((4284, 4638), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' is detected as having indexing turned on for all fields (either created manually or by older versions of this plugin). This implies stricter limitations on the amount of text each entry can store. Consider reindexing anew on a fresh collection to be able to store longer texts."\n , UserWarning, stacklevel=2)\n', (4288, 4638), False, 'from warnings import warn\n'), ((5177, 5216), 'json.dumps', 'json.dumps', (["pre_col_options['indexing']"], {}), "(pre_col_options['indexing'])\n", (5187, 5216), False, 'import json\n'), ((5237, 5553), 'warnings.warn', 'warn', (['f"""Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."""', 'UserWarning'], {'stacklevel': '(2)'}), '(\n f"Collection \'{collection_name}\' has unexpected \'indexing\' settings (options.indexing = {options_json}). This can result in odd behaviour when running metadata filtering and/or unwarranted limitations on storing long texts. Consider reindexing anew on a fresh collection."\n , UserWarning, stacklevel=2)\n', (5241, 5553), False, 'from warnings import warn\n')] |
from unittest.mock import MagicMock, patch
import pytest
from llama_index.legacy.core.response.schema import Response
from llama_index.legacy.schema import Document
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.indices.managed.google.generativeai import (
GoogleIndex,
set_google_config,
)
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_from_corpus(mock_get_corpus: MagicMock) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
# Act
store = GoogleIndex.from_corpus(corpus_id="123")
# Assert
assert store.corpus_id == "123"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
def test_create_corpus(mock_create_corpus: MagicMock) -> None:
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_create_corpus.side_effect = fake_create_corpus
# Act
store = GoogleIndex.create_corpus(display_name="My first corpus")
# Assert
assert len(store.corpus_id) > 0
assert mock_create_corpus.call_count == 1
request = mock_create_corpus.call_args.args[0]
assert request.corpus.name == f"corpora/{store.corpus_id}"
assert request.corpus.display_name == "My first corpus"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_corpus")
@patch("google.ai.generativelanguage.RetrieverServiceClient.create_document")
@patch("google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_document")
def test_from_documents(
mock_get_document: MagicMock,
mock_batch_create_chunk: MagicMock,
mock_create_document: MagicMock,
mock_create_corpus: MagicMock,
) -> None:
from google.api_core import exceptions as gapi_exception
def fake_create_corpus(request: genai.CreateCorpusRequest) -> genai.Corpus:
return request.corpus
# Arrange
mock_get_document.side_effect = gapi_exception.NotFound("")
mock_create_corpus.side_effect = fake_create_corpus
mock_create_document.return_value = genai.Document(name="corpora/123/documents/456")
mock_batch_create_chunk.side_effect = [
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/777"),
]
),
genai.BatchCreateChunksResponse(
chunks=[
genai.Chunk(name="corpora/123/documents/456/chunks/888"),
]
),
]
# Act
index = GoogleIndex.from_documents(
[
Document(text="Hello, my darling"),
Document(text="Goodbye, my baby"),
]
)
# Assert
assert mock_create_corpus.call_count == 1
create_corpus_request = mock_create_corpus.call_args.args[0]
assert create_corpus_request.corpus.name == f"corpora/{index.corpus_id}"
create_document_request = mock_create_document.call_args.args[0]
assert create_document_request.parent == f"corpora/{index.corpus_id}"
assert mock_batch_create_chunk.call_count == 2
first_batch_request = mock_batch_create_chunk.call_args_list[0].args[0]
assert (
first_batch_request.requests[0].chunk.data.string_value == "Hello, my darling"
)
second_batch_request = mock_batch_create_chunk.call_args_list[1].args[0]
assert (
second_batch_request.requests[0].chunk.data.string_value == "Goodbye, my baby"
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.RetrieverServiceClient.query_corpus")
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
@patch("google.ai.generativelanguage.RetrieverServiceClient.get_corpus")
def test_as_query_engine(
mock_get_corpus: MagicMock,
mock_generate_answer: MagicMock,
mock_query_corpus: MagicMock,
) -> None:
# Arrange
mock_get_corpus.return_value = genai.Corpus(name="corpora/123")
mock_query_corpus.return_value = genai.QueryCorpusResponse(
relevant_chunks=[
genai.RelevantChunk(
chunk=genai.Chunk(
name="corpora/123/documents/456/chunks/789",
data=genai.ChunkData(string_value="It's 42"),
),
chunk_relevance_score=0.9,
)
]
)
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
index = GoogleIndex.from_corpus(corpus_id="123")
query_engine = index.as_query_engine(
answer_style=genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
response = query_engine.query("What is the meaning of life?")
# Assert
assert mock_query_corpus.call_count == 1
query_corpus_request = mock_query_corpus.call_args.args[0]
assert query_corpus_request.name == "corpora/123"
assert query_corpus_request.query == "What is the meaning of life?"
assert isinstance(response, Response)
assert response.response == "42"
assert mock_generate_answer.call_count == 1
generate_answer_request = mock_generate_answer.call_args.args[0]
assert (
generate_answer_request.contents[0].parts[0].text
== "What is the meaning of life?"
)
assert (
generate_answer_request.answer_style
== genai.GenerateAnswerRequest.AnswerStyle.EXTRACTIVE
)
passages = generate_answer_request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.indices.managed.google.generativeai.set_google_config",
"llama_index.legacy.schema.Document",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus",
"llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus"
] | [((693, 752), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (711, 752), False, 'import pytest\n'), ((754, 798), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (759, 798), False, 'from unittest.mock import MagicMock, patch\n'), ((1012, 1071), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1030, 1071), False, 'import pytest\n'), ((1073, 1144), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (1078, 1144), False, 'from unittest.mock import MagicMock, patch\n'), ((1402, 1461), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1420, 1461), False, 'import pytest\n'), ((1463, 1537), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (1468, 1537), False, 'from unittest.mock import MagicMock, patch\n'), ((2137, 2196), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (2155, 2196), False, 'import pytest\n'), ((2198, 2272), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_corpus')\n", (2203, 2272), False, 'from unittest.mock import MagicMock, patch\n'), ((2274, 2350), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.create_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.create_document')\n", (2279, 2350), False, 'from unittest.mock import MagicMock, patch\n'), ((2352, 2437), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.batch_create_chunks'\n )\n", (2357, 2437), False, 'from unittest.mock import MagicMock, patch\n'), ((2434, 2507), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_document"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_document')\n", (2439, 2507), False, 'from unittest.mock import MagicMock, patch\n'), ((4398, 4457), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (4416, 4457), False, 'import pytest\n'), ((4459, 4532), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.query_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.query_corpus')\n", (4464, 4532), False, 'from unittest.mock import MagicMock, patch\n'), ((4534, 4611), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (4539, 4611), False, 'from unittest.mock import MagicMock, patch\n'), ((4613, 4684), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.RetrieverServiceClient.get_corpus"""'], {}), "('google.ai.generativelanguage.RetrieverServiceClient.get_corpus')\n", (4618, 4684), False, 'from unittest.mock import MagicMock, patch\n'), ((570, 671), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (587, 671), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((868, 920), 'llama_index.legacy.indices.managed.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (885, 920), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((934, 953), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (951, 953), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((1252, 1284), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (1264, 1284), True, 'import google.ai.generativelanguage as genai\n'), ((1308, 1348), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (1331, 1348), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((1805, 1862), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.create_corpus', 'GoogleIndex.create_corpus', ([], {'display_name': '"""My first corpus"""'}), "(display_name='My first corpus')\n", (1830, 1862), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((2913, 2940), 'google.api_core.exceptions.NotFound', 'gapi_exception.NotFound', (['""""""'], {}), "('')\n", (2936, 2940), True, 'from google.api_core import exceptions as gapi_exception\n'), ((3037, 3085), 'google.ai.generativelanguage.Document', 'genai.Document', ([], {'name': '"""corpora/123/documents/456"""'}), "(name='corpora/123/documents/456')\n", (3051, 3085), True, 'import google.ai.generativelanguage as genai\n'), ((4874, 4906), 'google.ai.generativelanguage.Corpus', 'genai.Corpus', ([], {'name': '"""corpora/123"""'}), "(name='corpora/123')\n", (4886, 4906), True, 'import google.ai.generativelanguage as genai\n'), ((6624, 6664), 'llama_index.legacy.indices.managed.google.generativeai.GoogleIndex.from_corpus', 'GoogleIndex.from_corpus', ([], {'corpus_id': '"""123"""'}), "(corpus_id='123')\n", (6647, 6664), False, 'from llama_index.legacy.indices.managed.google.generativeai import GoogleIndex, set_google_config\n'), ((3531, 3565), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Hello, my darling"""'}), "(text='Hello, my darling')\n", (3539, 3565), False, 'from llama_index.legacy.schema import Document\n'), ((3579, 3612), 'llama_index.legacy.schema.Document', 'Document', ([], {'text': '"""Goodbye, my baby"""'}), "(text='Goodbye, my baby')\n", (3587, 3612), False, 'from llama_index.legacy.schema import Document\n'), ((3208, 3264), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/777"""'}), "(name='corpora/123/documents/456/chunks/777')\n", (3219, 3264), True, 'import google.ai.generativelanguage as genai\n'), ((3369, 3425), 'google.ai.generativelanguage.Chunk', 'genai.Chunk', ([], {'name': '"""corpora/123/documents/456/chunks/888"""'}), "(name='corpora/123/documents/456/chunks/888')\n", (3380, 3425), True, 'import google.ai.generativelanguage as genai\n'), ((5155, 5194), 'google.ai.generativelanguage.ChunkData', 'genai.ChunkData', ([], {'string_value': '"""It\'s 42"""'}), '(string_value="It\'s 42")\n', (5170, 5194), True, 'import google.ai.generativelanguage as genai\n'), ((5431, 5452), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (5441, 5452), True, 'import google.ai.generativelanguage as genai\n'), ((5775, 5889), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (5819, 5889), True, 'import google.ai.generativelanguage as genai\n'), ((6237, 6351), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (6281, 6351), True, 'import google.ai.generativelanguage as genai\n'), ((5611, 5651), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (5621, 5651), True, 'import google.ai.generativelanguage as genai\n'), ((6103, 6134), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (6113, 6134), True, 'import google.ai.generativelanguage as genai\n')] |
from unittest.mock import MagicMock, patch
import pytest
try:
import google.ai.generativelanguage as genai
has_google = True
except ImportError:
has_google = False
from llama_index.legacy.response_synthesizers.google.generativeai import (
GoogleTextSynthesizer,
set_google_config,
)
from llama_index.legacy.schema import NodeWithScore, TextNode
SKIP_TEST_REASON = "Google GenerativeAI is not installed"
if has_google:
import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix
set_google_config(
api_endpoint="No-such-endpoint-to-prevent-hitting-real-backend",
testing=True,
)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.auth.credentials.Credentials")
def test_set_google_config(mock_credentials: MagicMock) -> None:
set_google_config(auth_credentials=mock_credentials)
config = genaix.get_config()
assert config.auth_credentials == mock_credentials
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_get_response(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42.")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/789",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.7,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults(
temperature=0.5,
answer_style=genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE,
safety_setting=[
genai.SafetySetting(
category=genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT,
threshold=genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE,
)
],
)
response = synthesizer.get_response(
query_str="What is the meaning of life?",
text_chunks=[
"It's 42",
],
)
# Assert
assert response.answer == "42"
assert response.attributed_passages == ["Meaning of life is 42."]
assert response.answerable_probability == pytest.approx(0.7)
assert mock_generate_answer.call_count == 1
request = mock_generate_answer.call_args.args[0]
assert request.contents[0].parts[0].text == "What is the meaning of life?"
assert request.answer_style == genai.GenerateAnswerRequest.AnswerStyle.ABSTRACTIVE
assert len(request.safety_settings) == 1
assert (
request.safety_settings[0].category
== genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT
)
assert (
request.safety_settings[0].threshold
== genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE
)
assert request.temperature == 0.5
passages = request.inline_passages.passages
assert len(passages) == 1
passage = passages[0]
assert passage.content.parts[0].text == "It's 42"
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[genai.Part(text="42")]),
grounding_attributions=[
genai.GroundingAttribution(
content=genai.Content(
parts=[genai.Part(text="Meaning of life is 42")]
),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/777",
part_index=0,
)
),
),
genai.GroundingAttribution(
content=genai.Content(parts=[genai.Part(text="Or maybe not")]),
source_id=genai.AttributionSourceId(
grounding_passage=genai.AttributionSourceId.GroundingPassageId(
passage_id="corpora/123/documents/456/chunks/888",
part_index=0,
)
),
),
],
finish_reason=genai.Candidate.FinishReason.STOP,
),
answerable_probability=0.9,
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
response = synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
additional_source_nodes=[
NodeWithScore(
node=TextNode(text="Additional node"),
score=0.4,
),
],
)
# Assert
assert response.response == "42"
assert len(response.source_nodes) == 4
first_attributed_source = response.source_nodes[0]
assert first_attributed_source.node.text == "Meaning of life is 42"
assert first_attributed_source.score is None
second_attributed_source = response.source_nodes[1]
assert second_attributed_source.node.text == "Or maybe not"
assert second_attributed_source.score is None
first_input_source = response.source_nodes[2]
assert first_input_source.node.text == "It's 42"
assert first_input_source.score == pytest.approx(0.5)
first_additional_source = response.source_nodes[3]
assert first_additional_source.node.text == "Additional node"
assert first_additional_source.score == pytest.approx(0.4)
assert response.metadata is not None
assert response.metadata.get("answerable_probability", None) == pytest.approx(0.9)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_max_token_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.MAX_TOKENS,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Maximum token" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_safety_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.SAFETY,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "safety" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_recitation_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.RECITATION,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "recitation" in str(e.value)
@pytest.mark.skipif(not has_google, reason=SKIP_TEST_REASON)
@patch("google.ai.generativelanguage.GenerativeServiceClient.generate_answer")
def test_synthesize_with_unknown_blocking(mock_generate_answer: MagicMock) -> None:
# Arrange
mock_generate_answer.return_value = genai.GenerateAnswerResponse(
answer=genai.Candidate(
content=genai.Content(parts=[]),
grounding_attributions=[],
finish_reason=genai.Candidate.FinishReason.OTHER,
),
)
# Act
synthesizer = GoogleTextSynthesizer.from_defaults()
with pytest.raises(Exception) as e:
synthesizer.synthesize(
query="What is the meaning of life?",
nodes=[
NodeWithScore(
node=TextNode(text="It's 42"),
score=0.5,
),
],
)
# Assert
assert "Unexpected" in str(e.value)
| [
"llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config",
"llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults",
"llama_index.legacy.schema.TextNode",
"llama_index.legacy.response_synthesizers.google.generativeai.set_google_config"
] | [((663, 722), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (681, 722), False, 'import pytest\n'), ((724, 768), 'unittest.mock.patch', 'patch', (['"""google.auth.credentials.Credentials"""'], {}), "('google.auth.credentials.Credentials')\n", (729, 768), False, 'from unittest.mock import MagicMock, patch\n'), ((982, 1041), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (1000, 1041), False, 'import pytest\n'), ((1043, 1120), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (1048, 1120), False, 'from unittest.mock import MagicMock, patch\n'), ((3580, 3639), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (3598, 3639), False, 'import pytest\n'), ((3641, 3718), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (3646, 3718), False, 'from unittest.mock import MagicMock, patch\n'), ((6499, 6558), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (6517, 6558), False, 'import pytest\n'), ((6560, 6637), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (6565, 6637), False, 'from unittest.mock import MagicMock, patch\n'), ((7434, 7493), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (7452, 7493), False, 'import pytest\n'), ((7495, 7572), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (7500, 7572), False, 'from unittest.mock import MagicMock, patch\n'), ((8355, 8414), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (8373, 8414), False, 'import pytest\n'), ((8416, 8493), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (8421, 8493), False, 'from unittest.mock import MagicMock, patch\n'), ((9288, 9347), 'pytest.mark.skipif', 'pytest.mark.skipif', (['(not has_google)'], {'reason': 'SKIP_TEST_REASON'}), '(not has_google, reason=SKIP_TEST_REASON)\n', (9306, 9347), False, 'import pytest\n'), ((9349, 9426), 'unittest.mock.patch', 'patch', (['"""google.ai.generativelanguage.GenerativeServiceClient.generate_answer"""'], {}), "('google.ai.generativelanguage.GenerativeServiceClient.generate_answer')\n", (9354, 9426), False, 'from unittest.mock import MagicMock, patch\n'), ((540, 641), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'api_endpoint': '"""No-such-endpoint-to-prevent-hitting-real-backend"""', 'testing': '(True)'}), "(api_endpoint=\n 'No-such-endpoint-to-prevent-hitting-real-backend', testing=True)\n", (557, 641), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((838, 890), 'llama_index.legacy.response_synthesizers.google.generativeai.set_google_config', 'set_google_config', ([], {'auth_credentials': 'mock_credentials'}), '(auth_credentials=mock_credentials)\n', (855, 890), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((904, 923), 'llama_index.legacy.vector_stores.google.generativeai.genai_extension.get_config', 'genaix.get_config', ([], {}), '()\n', (921, 923), True, 'import llama_index.legacy.vector_stores.google.generativeai.genai_extension as genaix\n'), ((5137, 5174), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (5172, 5174), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7037, 7074), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (7072, 7074), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((7965, 8002), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8000, 8002), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((8894, 8931), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (8929, 8931), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((9819, 9856), 'llama_index.legacy.response_synthesizers.google.generativeai.GoogleTextSynthesizer.from_defaults', 'GoogleTextSynthesizer.from_defaults', ([], {}), '()\n', (9854, 9856), False, 'from llama_index.legacy.response_synthesizers.google.generativeai import GoogleTextSynthesizer, set_google_config\n'), ((2786, 2804), 'pytest.approx', 'pytest.approx', (['(0.7)'], {}), '(0.7)\n', (2799, 2804), False, 'import pytest\n'), ((6163, 6181), 'pytest.approx', 'pytest.approx', (['(0.5)'], {}), '(0.5)\n', (6176, 6181), False, 'import pytest\n'), ((6348, 6366), 'pytest.approx', 'pytest.approx', (['(0.4)'], {}), '(0.4)\n', (6361, 6366), False, 'import pytest\n'), ((6477, 6495), 'pytest.approx', 'pytest.approx', (['(0.9)'], {}), '(0.9)\n', (6490, 6495), False, 'import pytest\n'), ((7084, 7108), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (7097, 7108), False, 'import pytest\n'), ((8012, 8036), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8025, 8036), False, 'import pytest\n'), ((8941, 8965), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (8954, 8965), False, 'import pytest\n'), ((9866, 9890), 'pytest.raises', 'pytest.raises', (['Exception'], {}), '(Exception)\n', (9879, 9890), False, 'import pytest\n'), ((2253, 2413), 'google.ai.generativelanguage.SafetySetting', 'genai.SafetySetting', ([], {'category': 'genai.HarmCategory.HARM_CATEGORY_SEXUALLY_EXPLICIT', 'threshold': 'genai.SafetySetting.HarmBlockThreshold.BLOCK_LOW_AND_ABOVE'}), '(category=genai.HarmCategory.\n HARM_CATEGORY_SEXUALLY_EXPLICIT, threshold=genai.SafetySetting.\n HarmBlockThreshold.BLOCK_LOW_AND_ABOVE)\n', (2272, 2413), True, 'import google.ai.generativelanguage as genai\n'), ((6860, 6883), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (6873, 6883), True, 'import google.ai.generativelanguage as genai\n'), ((7792, 7815), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (7805, 7815), True, 'import google.ai.generativelanguage as genai\n'), ((8717, 8740), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (8730, 8740), True, 'import google.ai.generativelanguage as genai\n'), ((9647, 9670), 'google.ai.generativelanguage.Content', 'genai.Content', ([], {'parts': '[]'}), '(parts=[])\n', (9660, 9670), True, 'import google.ai.generativelanguage as genai\n'), ((5324, 5348), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (5332, 5348), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((5485, 5517), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""Additional node"""'}), "(text='Additional node')\n", (5493, 5517), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((7273, 7297), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (7281, 7297), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((8201, 8225), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (8209, 8225), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((9130, 9154), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (9138, 9154), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((10055, 10079), 'llama_index.legacy.schema.TextNode', 'TextNode', ([], {'text': '"""It\'s 42"""'}), '(text="It\'s 42")\n', (10063, 10079), False, 'from llama_index.legacy.schema import NodeWithScore, TextNode\n'), ((1342, 1363), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (1352, 1363), True, 'import google.ai.generativelanguage as genai\n'), ((3938, 3959), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""42"""'}), "(text='42')\n", (3948, 3959), True, 'import google.ai.generativelanguage as genai\n'), ((1687, 1801), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/789"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/789', part_index=0)\n", (1731, 1801), True, 'import google.ai.generativelanguage as genai\n'), ((4282, 4396), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/777"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/777', part_index=0)\n", (4326, 4396), True, 'import google.ai.generativelanguage as genai\n'), ((4744, 4858), 'google.ai.generativelanguage.AttributionSourceId.GroundingPassageId', 'genai.AttributionSourceId.GroundingPassageId', ([], {'passage_id': '"""corpora/123/documents/456/chunks/888"""', 'part_index': '(0)'}), "(passage_id=\n 'corpora/123/documents/456/chunks/888', part_index=0)\n", (4788, 4858), True, 'import google.ai.generativelanguage as genai\n'), ((1522, 1563), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42."""'}), "(text='Meaning of life is 42.')\n", (1532, 1563), True, 'import google.ai.generativelanguage as genai\n'), ((4118, 4158), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Meaning of life is 42"""'}), "(text='Meaning of life is 42')\n", (4128, 4158), True, 'import google.ai.generativelanguage as genai\n'), ((4610, 4641), 'google.ai.generativelanguage.Part', 'genai.Part', ([], {'text': '"""Or maybe not"""'}), "(text='Or maybe not')\n", (4620, 4641), True, 'import google.ai.generativelanguage as genai\n')] |
from typing import Any, Dict, List, Optional, Tuple
from llama_index.core.base.base_query_engine import BaseQueryEngine
from llama_index.core.base.response.schema import RESPONSE_TYPE
from llama_index.core.callbacks.schema import CBEventType, EventPayload
from llama_index.core.indices.composability.graph import ComposableGraph
from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode
from llama_index.core.settings import (
Settings,
callback_manager_from_settings_or_context,
)
import llama_index.core.instrumentation as instrument
dispatcher = instrument.get_dispatcher(__name__)
class ComposableGraphQueryEngine(BaseQueryEngine):
"""Composable graph query engine.
This query engine can operate over a ComposableGraph.
It can take in custom query engines for its sub-indices.
Args:
graph (ComposableGraph): A ComposableGraph object.
custom_query_engines (Optional[Dict[str, BaseQueryEngine]]): A dictionary of
custom query engines.
recursive (bool): Whether to recursively query the graph.
**kwargs: additional arguments to be passed to the underlying index query
engine.
"""
def __init__(
self,
graph: ComposableGraph,
custom_query_engines: Optional[Dict[str, BaseQueryEngine]] = None,
recursive: bool = True,
**kwargs: Any
) -> None:
"""Init params."""
self._graph = graph
self._custom_query_engines = custom_query_engines or {}
self._kwargs = kwargs
# additional configs
self._recursive = recursive
callback_manager = callback_manager_from_settings_or_context(
Settings, self._graph.service_context
)
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> Dict[str, Any]:
"""Get prompt modules."""
return {}
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
@dispatcher.span
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return self._query_index(query_bundle, index_id=None, level=0)
def _query_index(
self,
query_bundle: QueryBundle,
index_id: Optional[str] = None,
level: int = 0,
) -> RESPONSE_TYPE:
"""Query a single index."""
index_id = index_id or self._graph.root_id
with self.callback_manager.event(
CBEventType.QUERY, payload={EventPayload.QUERY_STR: query_bundle.query_str}
) as query_event:
# get query engine
if index_id in self._custom_query_engines:
query_engine = self._custom_query_engines[index_id]
else:
query_engine = self._graph.get_index(index_id).as_query_engine(
**self._kwargs
)
with self.callback_manager.event(
CBEventType.RETRIEVE,
payload={EventPayload.QUERY_STR: query_bundle.query_str},
) as retrieve_event:
nodes = query_engine.retrieve(query_bundle)
retrieve_event.on_end(payload={EventPayload.NODES: nodes})
if self._recursive:
# do recursion here
nodes_for_synthesis = []
additional_source_nodes = []
for node_with_score in nodes:
node_with_score, source_nodes = self._fetch_recursive_nodes(
node_with_score, query_bundle, level
)
nodes_for_synthesis.append(node_with_score)
additional_source_nodes.extend(source_nodes)
response = query_engine.synthesize(
query_bundle, nodes_for_synthesis, additional_source_nodes
)
else:
response = query_engine.synthesize(query_bundle, nodes)
query_event.on_end(payload={EventPayload.RESPONSE: response})
return response
def _fetch_recursive_nodes(
self,
node_with_score: NodeWithScore,
query_bundle: QueryBundle,
level: int,
) -> Tuple[NodeWithScore, List[NodeWithScore]]:
"""Fetch nodes.
Uses existing node if it's not an index node.
Otherwise fetch response from corresponding index.
"""
if isinstance(node_with_score.node, IndexNode):
index_node = node_with_score.node
# recursive call
response = self._query_index(query_bundle, index_node.index_id, level + 1)
new_node = TextNode(text=str(response))
new_node_with_score = NodeWithScore(
node=new_node, score=node_with_score.score
)
return new_node_with_score, response.source_nodes
else:
return node_with_score, []
| [
"llama_index.core.settings.callback_manager_from_settings_or_context",
"llama_index.core.instrumentation.get_dispatcher",
"llama_index.core.schema.NodeWithScore"
] | [((585, 620), 'llama_index.core.instrumentation.get_dispatcher', 'instrument.get_dispatcher', (['__name__'], {}), '(__name__)\n', (610, 620), True, 'import llama_index.core.instrumentation as instrument\n'), ((1649, 1734), 'llama_index.core.settings.callback_manager_from_settings_or_context', 'callback_manager_from_settings_or_context', (['Settings', 'self._graph.service_context'], {}), '(Settings, self._graph.service_context\n )\n', (1690, 1734), False, 'from llama_index.core.settings import Settings, callback_manager_from_settings_or_context\n'), ((4741, 4798), 'llama_index.core.schema.NodeWithScore', 'NodeWithScore', ([], {'node': 'new_node', 'score': 'node_with_score.score'}), '(node=new_node, score=node_with_score.score)\n', (4754, 4798), False, 'from llama_index.core.schema import IndexNode, NodeWithScore, QueryBundle, TextNode\n')] |
from llama_index import (
SimpleDirectoryReader,
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import LlamaCPP
from llama_index.llms.llama_utils import messages_to_prompt, completion_to_prompt
import llama_index.llms.llama_cpp
from langchain.embeddings import HuggingFaceEmbeddings
import config
llm = llama_index.llms.llama_cpp.LlamaCPP(
model_kwargs={"n_gpu_layers": 1},
)
embed_model = HuggingFaceEmbeddings(model_name=config.EMBEDDING_MODEL_URL)
# create a service context
service_context = ServiceContext.from_defaults(
llm=llm,
embed_model=embed_model,
)
# load documents
documents = SimpleDirectoryReader(
config.KNOWLEDGE_BASE_PATH
).load_data()
# create vector store index
index = VectorStoreIndex.from_documents(documents, service_context=service_context)
# ================== Querying ================== #
# set up query engine
query_engine = index.as_query_engine()
# query_engine = index.as_query_engine()
response = query_engine.query("Who are the authors of this paper?")
print(response) | [
"llama_index.VectorStoreIndex.from_documents",
"llama_index.ServiceContext.from_defaults",
"llama_index.SimpleDirectoryReader"
] | [((431, 491), 'langchain.embeddings.HuggingFaceEmbeddings', 'HuggingFaceEmbeddings', ([], {'model_name': 'config.EMBEDDING_MODEL_URL'}), '(model_name=config.EMBEDDING_MODEL_URL)\n', (452, 491), False, 'from langchain.embeddings import HuggingFaceEmbeddings\n'), ((538, 600), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': 'embed_model'}), '(llm=llm, embed_model=embed_model)\n', (566, 600), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((747, 822), 'llama_index.VectorStoreIndex.from_documents', 'VectorStoreIndex.from_documents', (['documents'], {'service_context': 'service_context'}), '(documents, service_context=service_context)\n', (778, 822), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n'), ((642, 691), 'llama_index.SimpleDirectoryReader', 'SimpleDirectoryReader', (['config.KNOWLEDGE_BASE_PATH'], {}), '(config.KNOWLEDGE_BASE_PATH)\n', (663, 691), False, 'from llama_index import SimpleDirectoryReader, VectorStoreIndex, ServiceContext\n')] |
import time
import llama_index
from atlassian import Bitbucket
import os
import sys
sys.path.append('../')
import local_secrets as secrets
start_time = time.time()
stash = Bitbucket('https://git.techstyle.net', token=secrets.stash_token)
os.environ['OPENAI_API_KEY'] = secrets.techstyle_openai_key
project ='DATASICENCE'
repo = stash.get_repo(project, 'brand-analytics')
length_cutoff = 100000
for repo in stash.repo_list(project):
count = 0
repo_slug = repo['slug']
files = stash.get_file_list(project, repo_slug)
index = llama_index.GPTSimpleVectorIndex([])
index_file = f'./stash_index/{project}_{repo_slug}.json'
if os.path.isfile(index_file):
continue
for file in files:
if file[-3:] not in ['.py']:
continue
try:
count = count + 1
url = f"https://git.techstyle.net/projects/{project}/repos/{repo_slug}/browse/{file}"
code = str(stash.get_content_of_file(project, repo_slug, file))
code = code[2:len(code)-1].replace("\\n", '\n')
print(file, len(code))
if len(code) > length_cutoff:
print(f'{repo_slug} {file} size {len(code)}, truncating')
code = code[0:length_cutoff]
content = f"Stash Project: {project}\nStash Repository: {repo_slug}\nStash URL: {url}\nStash Code:\n {code}"
index.insert(llama_index.Document(content))
except Exception as e:
print(f'Error {e} on {repo_slug} {file}')
index.save_to_disk(index_file)
print(f'Done, {count} files in repo {repo_slug} saved to index in {round(time.time() - start_time, 0)} seconds.')
# projects = stash.project_list()
# for project in projects:
# print(project['key'])
# repos = stash.repo_list('DataScience')
# for repo in repos:
# print(repo['slug'])
| [
"llama_index.GPTSimpleVectorIndex",
"llama_index.Document"
] | [((84, 106), 'sys.path.append', 'sys.path.append', (['"""../"""'], {}), "('../')\n", (99, 106), False, 'import sys\n'), ((153, 164), 'time.time', 'time.time', ([], {}), '()\n', (162, 164), False, 'import time\n'), ((173, 238), 'atlassian.Bitbucket', 'Bitbucket', (['"""https://git.techstyle.net"""'], {'token': 'secrets.stash_token'}), "('https://git.techstyle.net', token=secrets.stash_token)\n", (182, 238), False, 'from atlassian import Bitbucket\n'), ((540, 576), 'llama_index.GPTSimpleVectorIndex', 'llama_index.GPTSimpleVectorIndex', (['[]'], {}), '([])\n', (572, 576), False, 'import llama_index\n'), ((645, 671), 'os.path.isfile', 'os.path.isfile', (['index_file'], {}), '(index_file)\n', (659, 671), False, 'import os\n'), ((1390, 1419), 'llama_index.Document', 'llama_index.Document', (['content'], {}), '(content)\n', (1410, 1419), False, 'import llama_index\n'), ((1618, 1629), 'time.time', 'time.time', ([], {}), '()\n', (1627, 1629), False, 'import time\n')] |
import qdrant_client
from llama_index import (
VectorStoreIndex,
ServiceContext,
)
from llama_index.llms import Ollama
from llama_index.vector_stores.qdrant import QdrantVectorStore
import llama_index
llama_index.set_global_handler("simple")
# re-initialize the vector store
client = qdrant_client.QdrantClient(
path="./qdrant_data"
)
vector_store = QdrantVectorStore(client=client, collection_name="tweets")
# get the LLM again
llm = Ollama(model="mistral")
service_context = ServiceContext.from_defaults(llm=llm,embed_model="local")
# load the index from the vector store
index = VectorStoreIndex.from_vector_store(vector_store=vector_store,service_context=service_context)
query_engine = index.as_query_engine(similarity_top_k=20)
response = query_engine.query("Does the author like web frameworks? Give details.")
print(response)
| [
"llama_index.vector_stores.qdrant.QdrantVectorStore",
"llama_index.ServiceContext.from_defaults",
"llama_index.llms.Ollama",
"llama_index.set_global_handler",
"llama_index.VectorStoreIndex.from_vector_store"
] | [((210, 250), 'llama_index.set_global_handler', 'llama_index.set_global_handler', (['"""simple"""'], {}), "('simple')\n", (240, 250), False, 'import llama_index\n'), ((294, 342), 'qdrant_client.QdrantClient', 'qdrant_client.QdrantClient', ([], {'path': '"""./qdrant_data"""'}), "(path='./qdrant_data')\n", (320, 342), False, 'import qdrant_client\n'), ((364, 422), 'llama_index.vector_stores.qdrant.QdrantVectorStore', 'QdrantVectorStore', ([], {'client': 'client', 'collection_name': '"""tweets"""'}), "(client=client, collection_name='tweets')\n", (381, 422), False, 'from llama_index.vector_stores.qdrant import QdrantVectorStore\n'), ((450, 473), 'llama_index.llms.Ollama', 'Ollama', ([], {'model': '"""mistral"""'}), "(model='mistral')\n", (456, 473), False, 'from llama_index.llms import Ollama\n'), ((492, 550), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'embed_model': '"""local"""'}), "(llm=llm, embed_model='local')\n", (520, 550), False, 'from llama_index import VectorStoreIndex, ServiceContext\n'), ((598, 696), 'llama_index.VectorStoreIndex.from_vector_store', 'VectorStoreIndex.from_vector_store', ([], {'vector_store': 'vector_store', 'service_context': 'service_context'}), '(vector_store=vector_store,\n service_context=service_context)\n', (632, 696), False, 'from llama_index import VectorStoreIndex, ServiceContext\n')] |
## main function of AWS Lambda function
import llama_index
from llama_index import download_loader
import boto3
import json
import urllib.parse
from llama_index import SimpleDirectoryReader
def main(event, context):
# extracting s3 bucket and key information from SQS message
print(event)
s3_info = json.loads(event['Records'][0]['body'])
bucket_name = s3_info['Records'][0]['s3']['bucket']['name']
object_key = urllib.parse.unquote_plus(s3_info['Records'][0]['s3']['object']['key'], encoding='utf-8')
try:
# the first approach to rea =d the content of uploaded file.
S3Reader = download_loader("S3Reader", custom_path='/tmp/llamahub_modules')
loader = S3Reader(bucket=bucket_name, key=object_key)
documents = loader.load_data()
# the second approach to read the content of uploaded file
# Creating an S3 client
# s3_client = boto3.client('s3')
# response = s3_client.get_object(Bucket=bucket_name, Key=object_key)
# file_content = response['Body'].read().decode('utf-8')
# save the file content to /tmp folder
# tmp_file_path = f"/tmp/{object_key.split('/')[-1]}"
# with open(tmp_file_path, "w") as f:
# tmp_file_path.write(file_content)
# reader = SimpleDirectoryReader(input_files=tmp_file_path)
# doc = reader.load_data()
# print(f"Loaded {len(doc)} doc")
## TODO
# ReIndex or Create New Index from document
# Update or Insert into VectoDatabase
# (Optional) Update or Insert into DocStorage DB
# Update or Insert index to MongoDB
# Can have Ingestion Pipeline with Redis Cache
return {
'statusCode': 200
}
# # creating an index
except Exception as e:
print(f"Error reading the file {object_key}: {str(e)}")
return {
'statusCode': 500,
'body': json.dumps('Error reading the file')
} | [
"llama_index.download_loader"
] | [((313, 352), 'json.loads', 'json.loads', (["event['Records'][0]['body']"], {}), "(event['Records'][0]['body'])\n", (323, 352), False, 'import json\n'), ((628, 692), 'llama_index.download_loader', 'download_loader', (['"""S3Reader"""'], {'custom_path': '"""/tmp/llamahub_modules"""'}), "('S3Reader', custom_path='/tmp/llamahub_modules')\n", (643, 692), False, 'from llama_index import download_loader\n'), ((1963, 1999), 'json.dumps', 'json.dumps', (['"""Error reading the file"""'], {}), "('Error reading the file')\n", (1973, 1999), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |
import json
from typing import Dict, List
import llama_index.query_engine
from llama_index import ServiceContext, QueryBundle
from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager
from llama_index.indices.base import BaseIndex
from llama_index.indices.query.base import BaseQueryEngine
from llama_index.llms.base import LLM
from llama_index.prompts.mixin import PromptMixinType
from llama_index.response.schema import RESPONSE_TYPE, Response
from llama_index.selectors import LLMSingleSelector
from llama_index.tools import QueryEngineTool
from common.config import DEBUG, LLM_CACHE_ENABLED
from common.llm import llm_predict, create_llm
from common.prompt import CH_SINGLE_SELECT_PROMPT_TMPL
from common.utils import ObjectEncoder
from query_todo.query_engine import load_indices
from query_todo.compose import create_compose_query_engine
class EchoNameEngine(BaseQueryEngine):
def __init__(self, name: str, callback_manager: CallbackManager = None):
self.name = name
super().__init__(callback_manager)
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(f"我是{self.name}")
class LlmQueryEngine(BaseQueryEngine):
def __init__(self, llm: LLM, callback_manager: CallbackManager):
self.llm = llm
super().__init__(callback_manager=callback_manager)
def _get_prompt_modules(self) -> PromptMixinType:
return {}
def _query(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
return Response(llm_predict(self.llm, query_bundle.query_str))
async def _aquery(self, query_bundle: QueryBundle) -> RESPONSE_TYPE:
pass
def create_route_query_engine(query_engines: List[BaseQueryEngine], descriptions: List[str],
service_context: ServiceContext = None):
assert len(query_engines) == len(descriptions)
# TODO
# 根据传入的多个query_engines和descriptions创建 RouteQueryEngine,实现query engine 的路由
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
class Chatter:
def __init__(self):
if DEBUG:
debug_handler = LlamaDebugHandler()
cb_manager = CallbackManager([debug_handler])
else:
debug_handler = None
cb_manager = CallbackManager()
llm = create_llm(cb_manager, LLM_CACHE_ENABLED)
service_context = ServiceContext.from_defaults(
llm=llm,
callback_manager=cb_manager
)
self.cb_manager = cb_manager
self.city_indices: Dict[str, List[BaseIndex]] = load_indices(service_context)
self.service_context = service_context
self.llm = llm
self.debug_handler = debug_handler
self.query_engine = self.create_query_engine()
def create_query_engine(self):
index_query_engine = create_compose_query_engine(self.city_indices, self.service_context)
index_summary = f"提供 {', '.join(self.city_indices.keys())} 这几个城市的相关信息"
llm_query_engine = LlmQueryEngine(llm=self.llm, callback_manager=self.cb_manager)
llm_summary = f"提供其他所有信息"
# 实现意图识别,把不同的query路由到不同的query_engine上,实现聊天和城市信息查询两个功能的分流
# https://docs.llamaindex.ai/en/stable/module_guides/querying/router/root.html#using-as-a-query-engine
raise NotImplementedError
def _print_and_flush_debug_info(self):
if self.debug_handler:
for event in self.debug_handler.get_events():
if event.event_type in (CBEventType.LLM, CBEventType.RETRIEVE):
print(
f"[DebugInfo] event_type={event.event_type}, content={json.dumps(event.payload, ensure_ascii=False, cls=ObjectEncoder)}")
self.debug_handler.flush_event_logs()
def chat(self, query):
response = self.query_engine.query(query)
self._print_and_flush_debug_info()
return response
| [
"llama_index.callbacks.LlamaDebugHandler",
"llama_index.response.schema.Response",
"llama_index.ServiceContext.from_defaults",
"llama_index.callbacks.CallbackManager"
] | [((1298, 1324), 'llama_index.response.schema.Response', 'Response', (['f"""我是{self.name}"""'], {}), "(f'我是{self.name}')\n", (1306, 1324), False, 'from llama_index.response.schema import RESPONSE_TYPE, Response\n'), ((2530, 2571), 'common.llm.create_llm', 'create_llm', (['cb_manager', 'LLM_CACHE_ENABLED'], {}), '(cb_manager, LLM_CACHE_ENABLED)\n', (2540, 2571), False, 'from common.llm import llm_predict, create_llm\n'), ((2598, 2664), 'llama_index.ServiceContext.from_defaults', 'ServiceContext.from_defaults', ([], {'llm': 'llm', 'callback_manager': 'cb_manager'}), '(llm=llm, callback_manager=cb_manager)\n', (2626, 2664), False, 'from llama_index import ServiceContext, QueryBundle\n'), ((2792, 2821), 'query_todo.query_engine.load_indices', 'load_indices', (['service_context'], {}), '(service_context)\n', (2804, 2821), False, 'from query_todo.query_engine import load_indices\n'), ((3055, 3123), 'query_todo.compose.create_compose_query_engine', 'create_compose_query_engine', (['self.city_indices', 'self.service_context'], {}), '(self.city_indices, self.service_context)\n', (3082, 3123), False, 'from query_todo.compose import create_compose_query_engine\n'), ((1683, 1728), 'common.llm.llm_predict', 'llm_predict', (['self.llm', 'query_bundle.query_str'], {}), '(self.llm, query_bundle.query_str)\n', (1694, 1728), False, 'from common.llm import llm_predict, create_llm\n'), ((2348, 2367), 'llama_index.callbacks.LlamaDebugHandler', 'LlamaDebugHandler', ([], {}), '()\n', (2365, 2367), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2393, 2425), 'llama_index.callbacks.CallbackManager', 'CallbackManager', (['[debug_handler]'], {}), '([debug_handler])\n', (2408, 2425), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((2498, 2515), 'llama_index.callbacks.CallbackManager', 'CallbackManager', ([], {}), '()\n', (2513, 2515), False, 'from llama_index.callbacks import CBEventType, LlamaDebugHandler, CallbackManager\n'), ((3855, 3919), 'json.dumps', 'json.dumps', (['event.payload'], {'ensure_ascii': '(False)', 'cls': 'ObjectEncoder'}), '(event.payload, ensure_ascii=False, cls=ObjectEncoder)\n', (3865, 3919), False, 'import json\n')] |
"""Download."""
import json
import logging
import os
import subprocess
import sys
from enum import Enum
from importlib import util
from pathlib import Path
from typing import Any, Dict, List, Optional, Union
import pkg_resources
import requests
from pkg_resources import DistributionNotFound
from llama_index.download.utils import (
get_exports,
get_file_content,
initialize_directory,
rewrite_exports,
)
LLAMA_HUB_CONTENTS_URL = f"https://raw.githubusercontent.com/run-llama/llama-hub/main"
LLAMA_HUB_PATH = "/llama_hub"
LLAMA_HUB_URL = LLAMA_HUB_CONTENTS_URL + LLAMA_HUB_PATH
PATH_TYPE = Union[str, Path]
logger = logging.getLogger(__name__)
LLAMAHUB_ANALYTICS_PROXY_SERVER = "https://llamahub.ai/api/analytics/downloads"
class MODULE_TYPE(str, Enum):
LOADER = "loader"
TOOL = "tool"
LLAMAPACK = "llamapack"
DATASETS = "datasets"
def get_module_info(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_class: str,
refresh_cache: bool = False,
library_path: str = "library.json",
disable_library_cache: bool = False,
) -> Dict:
"""Get module info."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
local_library_path = f"{local_dir_path}/{library_path}"
module_id = None # e.g. `web/simple_web`
extra_files = [] # e.g. `web/simple_web/utils.py`
# Check cache first
if not refresh_cache and os.path.exists(local_library_path):
with open(local_library_path) as f:
library = json.load(f)
if module_class in library:
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# Fetch up-to-date library from remote repo if module_id not found
if module_id is None:
library_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{library_path}"
)
library = json.loads(library_raw_content)
if module_class not in library:
raise ValueError("Loader class name not found in library")
module_id = library[module_class]["id"]
extra_files = library[module_class].get("extra_files", [])
# create cache dir if needed
local_library_dir = os.path.dirname(local_library_path)
if not disable_library_cache:
if not os.path.exists(local_library_dir):
os.makedirs(local_library_dir)
# Update cache
with open(local_library_path, "w") as f:
f.write(library_raw_content)
if module_id is None:
raise ValueError("Loader class name not found in library")
return {
"module_id": module_id,
"extra_files": extra_files,
}
def download_module_and_reqs(
local_dir_path: PATH_TYPE,
remote_dir_path: PATH_TYPE,
module_id: str,
extra_files: List[str],
refresh_cache: bool = False,
use_gpt_index_import: bool = False,
base_file_name: str = "base.py",
override_path: bool = False,
) -> None:
"""Load module."""
if isinstance(local_dir_path, str):
local_dir_path = Path(local_dir_path)
if override_path:
module_path = str(local_dir_path)
else:
module_path = f"{local_dir_path}/{module_id}"
if refresh_cache or not os.path.exists(module_path):
os.makedirs(module_path, exist_ok=True)
basepy_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{base_file_name}"
)
if use_gpt_index_import:
basepy_raw_content = basepy_raw_content.replace(
"import llama_index", "import llama_index"
)
basepy_raw_content = basepy_raw_content.replace(
"from llama_index", "from llama_index"
)
with open(f"{module_path}/{base_file_name}", "w") as f:
f.write(basepy_raw_content)
# Get content of extra files if there are any
# and write them under the loader directory
for extra_file in extra_files:
extra_file_raw_content, _ = get_file_content(
str(remote_dir_path), f"/{module_id}/{extra_file}"
)
# If the extra file is an __init__.py file, we need to
# add the exports to the __init__.py file in the modules directory
if extra_file == "__init__.py":
loader_exports = get_exports(extra_file_raw_content)
existing_exports = []
init_file_path = local_dir_path / "__init__.py"
# if the __init__.py file do not exists, we need to create it
mode = "a+" if not os.path.exists(init_file_path) else "r+"
with open(init_file_path, mode) as f:
f.write(f"from .{module_id} import {', '.join(loader_exports)}")
existing_exports = get_exports(f.read())
rewrite_exports(existing_exports + loader_exports, str(local_dir_path))
with open(f"{module_path}/{extra_file}", "w") as f:
f.write(extra_file_raw_content)
# install requirements
requirements_path = f"{local_dir_path}/requirements.txt"
if not os.path.exists(requirements_path):
# NOTE: need to check the status code
response_txt, status_code = get_file_content(
str(remote_dir_path), f"/{module_id}/requirements.txt"
)
if status_code == 200:
with open(requirements_path, "w") as f:
f.write(response_txt)
# Install dependencies if there are any and not already installed
if os.path.exists(requirements_path):
try:
requirements = pkg_resources.parse_requirements(
Path(requirements_path).open()
)
pkg_resources.require([str(r) for r in requirements])
except DistributionNotFound:
subprocess.check_call(
[sys.executable, "-m", "pip", "install", "-r", requirements_path]
)
def download_llama_module(
module_class: str,
llama_hub_url: str = LLAMA_HUB_URL,
refresh_cache: bool = False,
custom_dir: Optional[str] = None,
custom_path: Optional[str] = None,
library_path: str = "library.json",
base_file_name: str = "base.py",
use_gpt_index_import: bool = False,
disable_library_cache: bool = False,
override_path: bool = False,
) -> Any:
"""Download a module from LlamaHub.
Can be a loader, tool, pack, or more.
Args:
loader_class: The name of the llama module class you want to download,
such as `GmailOpenAIAgentPack`.
refresh_cache: If true, the local cache will be skipped and the
loader will be fetched directly from the remote repo.
custom_dir: Custom dir name to download loader into (under parent folder).
custom_path: Custom dirpath to download loader into.
library_path: File name of the library file.
use_gpt_index_import: If true, the loader files will use
llama_index as the base dependency. By default (False),
the loader files use llama_index as the base dependency.
NOTE: this is a temporary workaround while we fully migrate all usages
to llama_index.
is_dataset: whether or not downloading a LlamaDataset
Returns:
A Loader, A Pack, An Agent, or A Dataset
"""
# create directory / get path
dirpath = initialize_directory(custom_path=custom_path, custom_dir=custom_dir)
# fetch info from library.json file
module_info = get_module_info(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_class=module_class,
refresh_cache=refresh_cache,
library_path=library_path,
disable_library_cache=disable_library_cache,
)
module_id = module_info["module_id"]
extra_files = module_info["extra_files"]
# download the module, install requirements
download_module_and_reqs(
local_dir_path=dirpath,
remote_dir_path=llama_hub_url,
module_id=module_id,
extra_files=extra_files,
refresh_cache=refresh_cache,
use_gpt_index_import=use_gpt_index_import,
base_file_name=base_file_name,
override_path=override_path,
)
# loads the module into memory
if override_path:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{base_file_name}"
)
if spec is None:
raise ValueError(f"Could not find file: {dirpath}/{base_file_name}.")
else:
spec = util.spec_from_file_location(
"custom_module", location=f"{dirpath}/{module_id}/{base_file_name}"
)
if spec is None:
raise ValueError(
f"Could not find file: {dirpath}/{module_id}/{base_file_name}."
)
module = util.module_from_spec(spec)
spec.loader.exec_module(module) # type: ignore
return getattr(module, module_class)
def track_download(module_class: str, module_type: str) -> None:
"""Tracks number of downloads via Llamahub proxy.
Args:
module_class: The name of the llama module being downloaded, e.g.,`GmailOpenAIAgentPack`.
module_type: Can be "loader", "tool", "llamapack", or "datasets"
"""
try:
requests.post(
LLAMAHUB_ANALYTICS_PROXY_SERVER,
json={"type": module_type, "plugin": module_class},
)
except Exception as e:
logger.info(f"Error tracking downloads for {module_class} : {e}")
| [
"llama_index.download.utils.get_exports",
"llama_index.download.utils.initialize_directory"
] | [((637, 664), 'logging.getLogger', 'logging.getLogger', (['__name__'], {}), '(__name__)\n', (654, 664), False, 'import logging\n'), ((5550, 5583), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5564, 5583), False, 'import os\n'), ((7403, 7471), 'llama_index.download.utils.initialize_directory', 'initialize_directory', ([], {'custom_path': 'custom_path', 'custom_dir': 'custom_dir'}), '(custom_path=custom_path, custom_dir=custom_dir)\n', (7423, 7471), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((8849, 8876), 'importlib.util.module_from_spec', 'util.module_from_spec', (['spec'], {}), '(spec)\n', (8870, 8876), False, 'from importlib import util\n'), ((1197, 1217), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (1201, 1217), False, 'from pathlib import Path\n'), ((1434, 1468), 'os.path.exists', 'os.path.exists', (['local_library_path'], {}), '(local_library_path)\n', (1448, 1468), False, 'import os\n'), ((1938, 1969), 'json.loads', 'json.loads', (['library_raw_content'], {}), '(library_raw_content)\n', (1948, 1969), False, 'import json\n'), ((2263, 2298), 'os.path.dirname', 'os.path.dirname', (['local_library_path'], {}), '(local_library_path)\n', (2278, 2298), False, 'import os\n'), ((3131, 3151), 'pathlib.Path', 'Path', (['local_dir_path'], {}), '(local_dir_path)\n', (3135, 3151), False, 'from pathlib import Path\n'), ((3347, 3386), 'os.makedirs', 'os.makedirs', (['module_path'], {'exist_ok': '(True)'}), '(module_path, exist_ok=True)\n', (3358, 3386), False, 'import os\n'), ((5139, 5172), 'os.path.exists', 'os.path.exists', (['requirements_path'], {}), '(requirements_path)\n', (5153, 5172), False, 'import os\n'), ((8326, 8416), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{base_file_name}')\n", (8354, 8416), False, 'from importlib import util\n'), ((8566, 8668), 'importlib.util.spec_from_file_location', 'util.spec_from_file_location', (['"""custom_module"""'], {'location': 'f"""{dirpath}/{module_id}/{base_file_name}"""'}), "('custom_module', location=\n f'{dirpath}/{module_id}/{base_file_name}')\n", (8594, 8668), False, 'from importlib import util\n'), ((9299, 9401), 'requests.post', 'requests.post', (['LLAMAHUB_ANALYTICS_PROXY_SERVER'], {'json': "{'type': module_type, 'plugin': module_class}"}), "(LLAMAHUB_ANALYTICS_PROXY_SERVER, json={'type': module_type,\n 'plugin': module_class})\n", (9312, 9401), False, 'import requests\n'), ((1536, 1548), 'json.load', 'json.load', (['f'], {}), '(f)\n', (1545, 1548), False, 'import json\n'), ((3310, 3337), 'os.path.exists', 'os.path.exists', (['module_path'], {}), '(module_path)\n', (3324, 3337), False, 'import os\n'), ((4385, 4420), 'llama_index.download.utils.get_exports', 'get_exports', (['extra_file_raw_content'], {}), '(extra_file_raw_content)\n', (4396, 4420), False, 'from llama_index.download.utils import get_exports, get_file_content, initialize_directory, rewrite_exports\n'), ((2356, 2389), 'os.path.exists', 'os.path.exists', (['local_library_dir'], {}), '(local_library_dir)\n', (2370, 2389), False, 'import os\n'), ((2407, 2437), 'os.makedirs', 'os.makedirs', (['local_library_dir'], {}), '(local_library_dir)\n', (2418, 2437), False, 'import os\n'), ((5835, 5927), 'subprocess.check_call', 'subprocess.check_call', (["[sys.executable, '-m', 'pip', 'install', '-r', requirements_path]"], {}), "([sys.executable, '-m', 'pip', 'install', '-r',\n requirements_path])\n", (5856, 5927), False, 'import subprocess\n'), ((4620, 4650), 'os.path.exists', 'os.path.exists', (['init_file_path'], {}), '(init_file_path)\n', (4634, 4650), False, 'import os\n'), ((5675, 5698), 'pathlib.Path', 'Path', (['requirements_path'], {}), '(requirements_path)\n', (5679, 5698), False, 'from pathlib import Path\n')] |