The dataset viewer is not available for this split.
Cannot load the dataset split (in streaming mode) to extract the first rows.
Error code: StreamingRowsError Exception: CastError Message: Couldn't cast collection_uuid: string uuid: string embedding: list<element: double> child 0, element: double document: string id: string metadata: string to {'uuid': Value(dtype='string', id=None), 'name': Value(dtype='string', id=None), 'metadata': Value(dtype='string', id=None)} because column names don't match Traceback: Traceback (most recent call last): File "/src/libs/libcommon/src/libcommon/parquet_utils.py", line 428, in query pa_table = pa.concat_tables( File "pyarrow/table.pxi", line 5245, in pyarrow.lib.concat_tables File "pyarrow/error.pxi", line 154, in pyarrow.lib.pyarrow_internal_check_status File "pyarrow/error.pxi", line 91, in pyarrow.lib.check_status pyarrow.lib.ArrowInvalid: Schema at index 1 was different: uuid: string name: string metadata: string vs uuid: string metadata: string During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 105, in get_rows_content pa_table = rows_index.query(offset=0, length=rows_max_number) File "/src/libs/libcommon/src/libcommon/parquet_utils.py", line 577, in query return self.parquet_index.query(offset=offset, length=length) File "/src/libs/libcommon/src/libcommon/parquet_utils.py", line 435, in query raise SchemaMismatchError("Parquet files have different schema.", err) libcommon.parquet_utils.SchemaMismatchError: ('Parquet files have different schema.', ArrowInvalid('Schema at index 1 was different: \nuuid: string\nname: string\nmetadata: string\nvs\nuuid: string\nmetadata: string')) During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 328, in compute compute_first_rows_from_parquet_response( File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 119, in compute_first_rows_from_parquet_response return create_first_rows_response( File "/src/libs/libcommon/src/libcommon/viewer_utils/rows.py", line 134, in create_first_rows_response rows_content = get_rows_content(rows_max_number) File "/src/services/worker/src/worker/job_runners/split/first_rows.py", line 114, in get_rows_content raise SplitParquetSchemaMismatchError( libcommon.exceptions.SplitParquetSchemaMismatchError: Split parquet files being processed have different schemas. Ensure all files have identical column names. During handling of the above exception, another exception occurred: Traceback (most recent call last): File "/src/services/worker/src/worker/utils.py", line 126, in get_rows_or_raise return get_rows( File "/src/services/worker/src/worker/utils.py", line 64, in decorator return func(*args, **kwargs) File "/src/services/worker/src/worker/utils.py", line 103, in get_rows rows_plus_one = list(itertools.islice(ds, rows_max_number + 1)) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 1388, in __iter__ for key, example in ex_iterable: File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/iterable_dataset.py", line 282, in __iter__ for key, pa_table in self.generate_tables_fn(**self.kwargs): File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/parquet/parquet.py", line 96, in _generate_tables yield f"{file_idx}_{batch_idx}", self._cast_table(pa_table) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/packaged_modules/parquet/parquet.py", line 74, in _cast_table pa_table = table_cast(pa_table, self.info.features.arrow_schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2240, in table_cast return cast_table_to_schema(table, schema) File "/src/services/worker/.venv/lib/python3.9/site-packages/datasets/table.py", line 2194, in cast_table_to_schema raise CastError( datasets.table.CastError: Couldn't cast collection_uuid: string uuid: string embedding: list<element: double> child 0, element: double document: string id: string metadata: string to {'uuid': Value(dtype='string', id=None), 'name': Value(dtype='string', id=None), 'metadata': Value(dtype='string', id=None)} because column names don't match
Need help to make the dataset viewer work? Make sure to review how to configure the dataset viewer, and open a discussion for direct support.
Dataset Card for hlm-paraphrase-multilingual-mpnet-base-v2
Dataset Summary
Chromadb vectorstore for 红楼梦, created with
import os
from langchain.document_loaders import TextLoader
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain.vectorstores import Chroma
model_name = 'paraphrase-multilingual-mpnet-base-v2'
embedding = SentenceTransformerEmbeddings(model_name=model_name)
url = 'https://raw.githubusercontent.com/ffreemt/multilingual-dokugpt/master/docs/hlm.txt'
os.system(f'wget -c {url}')
doc = TextLoader('hlm.txt').load()
text_splitter = RecursiveCharacterTextSplitter(
separators=["\n\n", "\n", ".", "!", "?", ",", " ", ""],
chunk_size=620,
chunk_overlap=60,
length_function=len
)
doc_chunks = text_splitter.split_documents(doc)
client_settings = Settings(chroma_db_impl="duckdb+parquet", anonymized_telemetry=False, persist_directory='db')
# takes 8-20 minutes on CPU
vectorstore = Chroma.from_documents(
documents=doc_chunks,
embedding=embedding,
persist_directory='db',
client_settings=client_settings,
)
vectorstore.persist()
How to use
Download the hlm
directory to a local directory, e.g., db
, for example
from huggingface_hub import snapshot_download
snapshot_download(
repo_id="mikeee/chroma-paraphrase-multilingual-mpnet-base-v2",
repo_type="dataset",
allow_patterns="hlm/*",
local_dir="db",
resume_download=True,
)
Load the vectorestore:
from langchain.embeddings import SentenceTransformerEmbeddings
from langchain.vectorstores import Chroma
from chromadb.config import Settings
model_name = 'paraphrase-multilingual-mpnet-base-v2'
embedding = SentenceTransformerEmbeddings(model_name=model_name)
client_settings = Settings(
chroma_db_impl="duckdb+parquet",
anonymized_telemetry=False,
persist_directory='db/hlm'
)
db = Chroma(
# persist_directory='docs',
embedding_function=embedding,
client_settings=client_settings,
)
res = db.search("红楼梦主线", search_type="similarity", k=2)
print(res)
# [Document(page_content='通灵宝玉正面图式\u3000通灵宝玉反面图式\n\n\n\n玉宝灵通\u3000\u3000\u3000\u3000\u3000三二一\n\n仙莫\u3000\u3000\u3000\u3000\u3000\u3000知疗除\n\n寿失\u3000\u3000\u3000\u3000\u3000\u3000祸冤邪\n\n恒莫\u3000\u3000\u3000\u3000\u3000\u3000福疾崇\n\n昌忘\n\n\n\n宝钗看毕,【甲戌双行。。。
- Downloads last month
- 2