diff --git "a/awesome-japanese-nlp-resources-search.json" "b/awesome-japanese-nlp-resources-search.json" --- "a/awesome-japanese-nlp-resources-search.json" +++ "b/awesome-japanese-nlp-resources-search.json" @@ -3,9 +3,9 @@ "description": "Fine-tuned XLSR-53 large model for speech recognition in Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the train and validation splits of Common Voice 6.1, CSS10 and JSUT.", "url": "https://huggingface.co./jonatasgrosman/wav2vec2-large-xlsr-53-japanese", "project_name": "wav2vec2-large-xlsr-53-japanese", - "downloads": 2251749, + "downloads": 2336885, "source": "Hugging Face", - "score": 23.786891762662968, + "score": 22.61415962186833, "first_commit": "2021-04-16 00:20:03", "latest_commit": "2022-12-14 01:58:09", "languages": [], @@ -19,7 +19,7 @@ "project_name": "donut", "stargazers_count": 5553, "source": "GitHub", - "score": 16.02595241577937, + "score": 16.09053978411268, "first_commit": "2022-07-20 23:15:30", "latest_commit": "2023-07-31 15:14:20", "languages": [ @@ -27,27 +27,13 @@ ], "model_or_dataset": null }, - { - "description": "xlm-roberta-ner-japanese (Japanese caption : 日本語の固有表現抽出のモデル)", - "url": "https://huggingface.co./tsmatz/xlm-roberta-ner-japanese", - "project_name": "xlm-roberta-ner-japanese", - "downloads": 1002653, - "source": "Hugging Face", - "score": 10.540290467530491, - "first_commit": "2022-10-24 02:08:37", - "latest_commit": "2024-07-12 00:01:56", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.277, - "model_architectures": "RobertaForTokenClassification" - }, { "description": "BERT base Japanese (IPA dictionary)", "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese", "project_name": "bert-base-japanese", - "downloads": 929186, + "downloads": 1315391, "source": "Hugging Face", - "score": 9.761176566477651, + "score": 12.687247732332146, "first_commit": "2020-04-28 21:34:23", "latest_commit": "2024-02-22 00:57:00", "languages": [], @@ -55,13 +41,27 @@ "model_size": null, "model_architectures": "BertForMaskedLM" }, + { + "description": "xlm-roberta-ner-japanese (Japanese caption : 日本語の固有表現抽出のモデル)", + "url": "https://huggingface.co./tsmatz/xlm-roberta-ner-japanese", + "project_name": "xlm-roberta-ner-japanese", + "downloads": 1034005, + "source": "Hugging Face", + "score": 9.952729439567793, + "first_commit": "2022-10-24 02:08:37", + "latest_commit": "2024-07-12 00:01:56", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.277, + "model_architectures": "RobertaForTokenClassification" + }, { "description": "Neologism dictionary based on the language resources on the Web for mecab-ipadic", "url": "https://github.com/neologd/mecab-ipadic-neologd", "project_name": "mecab-ipadic-neologd", "stargazers_count": 2695, "source": "GitHub", - "score": 7.60524267604083, + "score": 7.6364257972917775, "first_commit": "2015-03-09 16:46:28", "latest_commit": "2020-09-14 19:56:40", "languages": [ @@ -75,7 +75,7 @@ "project_name": "voicevox", "stargazers_count": 2411, "source": "GitHub", - "score": 6.768475158166181, + "score": 6.796338893073045, "first_commit": "2021-08-01 02:41:10", "latest_commit": "2024-08-16 01:45:33", "languages": [ @@ -90,7 +90,7 @@ "project_name": "mozc", "stargazers_count": 2333, "source": "GitHub", - "score": 6.538658727200609, + "score": 6.565610799660859, "first_commit": "2010-05-10 12:05:41", "latest_commit": "2024-08-15 07:37:57", "languages": [ @@ -104,19 +104,47 @@ "project_name": "engineer-vocabulary-list", "stargazers_count": 1766, "source": "GitHub", - "score": 4.868070055950871, + "score": 4.888395043703039, "first_commit": "2020-09-30 14:16:14", "latest_commit": "2020-11-04 08:32:10", "languages": [], "model_or_dataset": "dataset" }, + { + "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", + "url": "https://huggingface.co./kha-white/manga-ocr-base", + "project_name": "manga-ocr-base", + "downloads": 478943, + "source": "Hugging Face", + "score": 4.5586188846064015, + "first_commit": "2022-01-15 17:39:06", + "latest_commit": "2022-06-22 15:34:05", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "VisionEncoderDecoderModel" + }, + { + "description": "BERT base Japanese (IPA dictionary, whole word masking enabled)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-whole-word-masking", + "project_name": "bert-base-japanese-whole-word-masking", + "downloads": 442261, + "source": "Hugging Face", + "score": 4.202142016477988, + "first_commit": "2020-04-28 21:34:35", + "latest_commit": "2024-02-22 00:57:37", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForMaskedLM" + }, { "description": "About Optical character recognition for Japanese text, with the main focus being Japanese manga", "url": "https://github.com/kha-white/manga-ocr", "project_name": "manga-ocr", "stargazers_count": 1517, "source": "GitHub", - "score": 4.134425295560774, + "score": 4.151839976271826, "first_commit": "2022-01-15 18:18:35", "latest_commit": "2024-06-29 11:23:04", "languages": [ @@ -130,7 +158,7 @@ "project_name": "N46Whisper", "stargazers_count": 1515, "source": "GitHub", - "score": 4.128532566561657, + "score": 4.145923871312539, "first_commit": "2022-10-25 16:27:20", "latest_commit": "2024-06-30 15:22:02", "languages": [ @@ -140,18 +168,18 @@ "model_or_dataset": null }, { - "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", - "url": "https://huggingface.co./kha-white/manga-ocr-base", - "project_name": "manga-ocr-base", - "downloads": 372642, + "description": "JMMLU Japanese Massive Multitask Language Understanding Benchmark JMMLU is a four-choice question set consisting of Japanese-translated questions of a portion of MMLU (Paper, Github) (Translated questions) and questions based on unique Japanese cultural context (Japanese questions).", + "url": "https://huggingface.co./datasets/nlp-waseda/JMMLU", + "project_name": "JMMLU", + "downloads": 370939, "source": "Hugging Face", - "score": 3.859054975184262, - "first_commit": "2022-01-15 17:39:06", - "latest_commit": "2022-06-22 15:34:05", + "score": 3.5090325025841227, + "first_commit": "2024-02-09 12:19:13", + "latest_commit": "2024-02-27 05:22:30", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "VisionEncoderDecoderModel" + "model_architectures": null }, { "description": "無料で使える中品質なテキスト読み上げソフトウェア、VOICEVOXの音声合成エンジン", @@ -159,7 +187,7 @@ "project_name": "voicevox_engine", "stargazers_count": 1257, "source": "GitHub", - "score": 3.3683705256755325, + "score": 3.3827463315645363, "first_commit": "2021-08-02 02:39:41", "latest_commit": "2024-08-15 01:08:26", "languages": [ @@ -173,7 +201,7 @@ "project_name": "ojichat", "stargazers_count": 1249, "source": "GitHub", - "score": 3.3447996096790633, + "score": 3.359081911727389, "first_commit": "2019-05-25 03:44:08", "latest_commit": "2023-04-23 16:02:15", "languages": [ @@ -187,7 +215,7 @@ "project_name": "yomichan", "stargazers_count": 1044, "source": "GitHub", - "score": 2.740794887269546, + "score": 2.752681153400487, "first_commit": "2016-03-16 20:33:15", "latest_commit": "2023-02-25 12:43:18", "languages": [ @@ -197,18 +225,18 @@ "model_or_dataset": null }, { - "description": "BERT base Japanese (IPA dictionary, whole word masking enabled)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-whole-word-masking", - "project_name": "bert-base-japanese-whole-word-masking", - "downloads": 261740, + "description": "rinna/japanese-cloob-vit-b-16", + "url": "https://huggingface.co./rinna/japanese-cloob-vit-b-16", + "project_name": "japanese-cloob-vit-b-16", + "downloads": 282737, "source": "Hugging Face", - "score": 2.682944750803765, - "first_commit": "2020-04-28 21:34:35", - "latest_commit": "2024-02-22 00:57:37", + "score": 2.651882600107094, + "first_commit": "2022-04-27 08:29:29", + "latest_commit": "2024-07-22 08:09:24", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_size": 0.197, + "model_architectures": "CLOOBModel" }, { "description": "Kuromoji is a self-contained and very easy to use Japanese morphological analyzer designed for search", @@ -216,7 +244,7 @@ "project_name": "kuromoji", "stargazers_count": 941, "source": "GitHub", - "score": 2.437319343815008, + "score": 2.4480017479972145, "first_commit": "2011-01-20 15:26:44", "latest_commit": "2019-09-23 11:13:04", "languages": [ @@ -230,7 +258,7 @@ "project_name": "mecab", "stargazers_count": 908, "source": "GitHub", - "score": 2.3400893153295734, + "score": 2.350386016168981, "first_commit": "2011-10-15 15:16:30", "latest_commit": "2023-05-24 16:04:25", "languages": [ @@ -244,27 +272,13 @@ ], "model_or_dataset": null }, - { - "description": "GLuCoSE (General Luke-based Contrastive Sentence Embedding)-base-Japanese 日本語のREADME/Japanese README GLuCoSE (General LUke-based COntrastive Sentence Embedding, \"glucose\") is a Japanese text embedding model based on LUKE.", - "url": "https://huggingface.co./pkshatech/GLuCoSE-base-ja", - "project_name": "GLuCoSE-base-ja", - "downloads": 227805, - "source": "Hugging Face", - "score": 2.3230657543532756, - "first_commit": "2023-07-16 07:28:46", - "latest_commit": "2023-08-25 02:53:22", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LukeModel" - }, { "description": "オープンソースの日本語LLMまとめ", "url": "https://github.com/llm-jp/awesome-japanese-llm", "project_name": "awesome-japanese-llm", "stargazers_count": 879, "source": "GitHub", - "score": 2.2546447448423734, + "score": 2.264602494259322, "first_commit": "2023-07-09 13:36:38", "latest_commit": "2024-08-10 19:11:35", "languages": [ @@ -278,7 +292,7 @@ "project_name": "janome", "stargazers_count": 843, "source": "GitHub", - "score": 2.1485756228582633, + "score": 2.158112604992159, "first_commit": "2015-02-14 18:45:54", "latest_commit": "2023-07-01 20:31:23", "languages": [ @@ -292,7 +306,7 @@ "project_name": "voicevox_core", "stargazers_count": 839, "source": "GitHub", - "score": 2.1367901648600287, + "score": 2.1462803950735854, "first_commit": "2021-08-31 23:19:33", "latest_commit": "2024-08-14 00:00:48", "languages": [ @@ -310,7 +324,7 @@ "project_name": "kuromoji.js", "stargazers_count": 831, "source": "GitHub", - "score": 2.1132192488635595, + "score": 2.122615975236438, "first_commit": "2014-12-04 17:31:39", "latest_commit": "2018-11-24 16:05:09", "languages": [ @@ -318,13 +332,27 @@ ], "model_or_dataset": null }, + { + "description": "This is a Japanese sentence-BERT model. ", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens-v2", + "project_name": "sentence-bert-base-ja-mean-tokens-v2", + "downloads": 227266, + "source": "Hugging Face", + "score": 2.112813619354919, + "first_commit": "2021-12-14 11:18:19", + "latest_commit": "2024-04-17 11:39:38", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111, + "model_architectures": "BertModel" + }, { "description": "Standalone. Small. Language-neutral. BudouX is the successor to Budou, the machine learning powered line break organizer tool.", "url": "https://github.com/google/budoux", "project_name": "budoux", "stargazers_count": 815, "source": "GitHub", - "score": 2.066077416870622, + "score": 2.075287135562143, "first_commit": "2021-11-18 09:36:21", "latest_commit": "2024-08-16 15:09:25", "languages": [ @@ -341,7 +369,7 @@ "project_name": "kagome", "stargazers_count": 803, "source": "GitHub", - "score": 2.030721042875918, + "score": 2.039790505806422, "first_commit": "2014-06-26 13:52:06", "latest_commit": "2024-08-13 10:35:54", "languages": [ @@ -356,7 +384,7 @@ "project_name": "kuroshiro", "stargazers_count": 787, "source": "GitHub", - "score": 1.9835792108829804, + "score": 1.9924616661321273, "first_commit": "2016-01-03 17:16:40", "latest_commit": "2021-06-07 23:02:39", "languages": [ @@ -370,7 +398,7 @@ "project_name": "mokuro", "stargazers_count": 781, "source": "GitHub", - "score": 1.9659010238856287, + "score": 1.9747133512542667, "first_commit": "2022-04-16 14:44:52", "latest_commit": "2024-07-09 17:10:55", "languages": [ @@ -386,7 +414,7 @@ "project_name": "WanaKana", "stargazers_count": 738, "source": "GitHub", - "score": 1.8392073504046078, + "score": 1.8475170946295996, "first_commit": "2013-08-27 12:57:41", "latest_commit": "2023-11-20 12:22:09", "languages": [ @@ -401,7 +429,7 @@ "project_name": "ginza", "stargazers_count": 731, "source": "GitHub", - "score": 1.8185827989076975, + "score": 1.8268107272720955, "first_commit": "2019-03-11 16:49:15", "latest_commit": "2024-03-31 07:29:06", "languages": [ @@ -409,27 +437,13 @@ ], "model_or_dataset": null }, - { - "description": "This is a Japanese sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens-v2", - "project_name": "sentence-bert-base-ja-mean-tokens-v2", - "downloads": 180115, - "source": "Hugging Face", - "score": 1.817315663275514, - "first_commit": "2021-12-14 11:18:19", - "latest_commit": "2024-04-17 11:39:38", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertModel" - }, { "description": "Extend GNOME On-Screen Keyboard for Input Methods", "url": "https://github.com/google/shuwa", "project_name": "shuwa", "stargazers_count": 726, "source": "GitHub", - "score": 1.8038509764099044, + "score": 1.8120204648738785, "first_commit": "2021-04-20 14:25:55", "latest_commit": "2022-12-22 19:41:35", "languages": [ @@ -444,7 +458,7 @@ "project_name": "luke", "stargazers_count": 699, "source": "GitHub", - "score": 1.7242991349218217, + "score": 1.7321530479235059, "first_commit": "2020-03-31 21:56:47", "latest_commit": "2023-06-16 23:11:54", "languages": [ @@ -459,7 +473,7 @@ "project_name": "japanese-addresses", "stargazers_count": 698, "source": "GitHub", - "score": 1.721352770422263, + "score": 1.7291949954438626, "first_commit": "2020-07-12 13:35:51", "latest_commit": "2024-01-15 09:28:19", "languages": [ @@ -467,27 +481,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "rinna/japanese-cloob-vit-b-16", - "url": "https://huggingface.co./rinna/japanese-cloob-vit-b-16", - "project_name": "japanese-cloob-vit-b-16", - "downloads": 170181, - "source": "Hugging Face", - "score": 1.7119660846475737, - "first_commit": "2022-04-27 08:29:29", - "latest_commit": "2024-07-22 08:09:24", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.197, - "model_architectures": "CLOOBModel" - }, { "description": "ChatdollKit enables you to make your 3D model into a chatbot", "url": "https://github.com/uezo/ChatdollKit", "project_name": "ChatdollKit", "stargazers_count": 685, "source": "GitHub", - "score": 1.683050031928001, + "score": 1.6907403132084982, "first_commit": "2020-03-21 22:01:11", "latest_commit": "2024-07-21 01:41:49", "languages": [ @@ -501,7 +501,7 @@ "project_name": "normalize-japanese-addresses", "stargazers_count": 672, "source": "GitHub", - "score": 1.6447472934337388, + "score": 1.6522856309731335, "first_commit": "2020-05-30 10:53:34", "latest_commit": "2024-07-02 10:44:21", "languages": [ @@ -516,7 +516,7 @@ "project_name": "mozc-devices", "stargazers_count": 635, "source": "GitHub", - "score": 1.5357318069500698, + "score": 1.542837689226327, "first_commit": "2012-06-06 06:06:16", "latest_commit": "2024-02-28 20:08:36", "languages": [ @@ -532,7 +532,7 @@ "project_name": "japanese-pretrained-models", "stargazers_count": 576, "source": "GitHub", - "score": 1.361896301476111, + "score": 1.368312592927365, "first_commit": "2021-04-06 13:48:14", "latest_commit": "2022-10-28 16:08:26", "languages": [ @@ -546,7 +546,7 @@ "project_name": "mecab-python3", "stargazers_count": 515, "source": "GitHub", - "score": 1.1821680670030352, + "score": 1.1878713916691162, "first_commit": "2014-05-31 17:47:48", "latest_commit": "2024-04-15 21:32:53", "languages": [ @@ -555,13 +555,27 @@ ], "model_or_dataset": null }, + { + "description": "GLuCoSE (General Luke-based Contrastive Sentence Embedding)-base-Japanese 日本語のREADME/Japanese README GLuCoSE (General LUke-based COntrastive Sentence Embedding, \"glucose\") is a Japanese text embedding model based on LUKE.", + "url": "https://huggingface.co./pkshatech/GLuCoSE-base-ja", + "project_name": "GLuCoSE-base-ja", + "downloads": 131137, + "source": "Hugging Face", + "score": 1.1786288732582964, + "first_commit": "2023-07-16 07:28:46", + "latest_commit": "2023-08-25 02:53:22", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LukeModel" + }, { "description": "BERT models for Japanese text.", "url": "https://github.com/cl-tohoku/bert-japanese", "project_name": "bert-japanese", "stargazers_count": 502, "source": "GitHub", - "score": 1.143865328508773, + "score": 1.1494167094337517, "first_commit": "2019-03-24 22:50:33", "latest_commit": "2024-03-24 00:08:38", "languages": [ @@ -576,7 +590,7 @@ "project_name": "bert-japanese", "stargazers_count": 497, "source": "GitHub", - "score": 1.12913350601098, + "score": 1.1346264470355345, "first_commit": "2018-12-27 20:05:33", "latest_commit": "2021-02-15 10:20:28", "languages": [ @@ -585,41 +599,13 @@ ], "model_or_dataset": "model" }, - { - "description": "BERT base Japanese (character tokenization)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char", - "project_name": "bert-base-japanese-char", - "downloads": 113765, - "source": "Hugging Face", - "score": 1.1136771951897628, - "first_commit": "2020-04-28 21:34:05", - "latest_commit": "2024-02-22 00:57:58", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" - }, - { - "description": "BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v2", - "project_name": "bert-base-japanese-char-v2", - "downloads": 109410, - "source": "Hugging Face", - "score": 1.0674926356040288, - "first_commit": "2021-03-05 04:05:08", - "latest_commit": "2021-09-23 15:45:24", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" - }, { "description": "morphological analyzer (word segmentor + PoS Tagger) for Chinese and Japanese written purely in JavaScript.", "url": "https://github.com/rakuten-nlp/rakutenma", "project_name": "rakutenma", "stargazers_count": 471, "source": "GitHub", - "score": 1.052528029022456, + "score": 1.0577170825648057, "first_commit": "2014-08-05 19:05:12", "latest_commit": "2015-01-29 18:28:18", "languages": [ @@ -633,7 +619,7 @@ "project_name": "bangumi-data", "stargazers_count": 471, "source": "GitHub", - "score": 1.052528029022456, + "score": 1.0577170825648057, "first_commit": "2016-09-05 10:17:46", "latest_commit": "2024-08-12 10:50:17", "languages": [ @@ -643,18 +629,32 @@ "model_or_dataset": null }, { - "description": "JMMLU Japanese Massive Multitask Language Understanding Benchmark JMMLU is a four-choice question set consisting of Japanese-translated questions of a portion of MMLU (Paper, Github) (Translated questions) and questions based on unique Japanese cultural context (Japanese questions).", - "url": "https://huggingface.co./datasets/nlp-waseda/JMMLU", - "project_name": "JMMLU", - "downloads": 90961, + "description": "BERT base Japanese (character tokenization)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char", + "project_name": "bert-base-japanese-char", + "downloads": 105479, "source": "Hugging Face", - "score": 0.8718419031592056, - "first_commit": "2024-02-09 12:19:13", - "latest_commit": "2024-02-27 05:22:30", + "score": 0.9292835953988872, + "first_commit": "2020-04-28 21:34:05", + "latest_commit": "2024-02-22 00:57:58", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "BertForMaskedLM" + }, + { + "description": "BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v2", + "project_name": "bert-base-japanese-char-v2", + "downloads": 103576, + "source": "Hugging Face", + "score": 0.9107901795532851, + "first_commit": "2021-03-05 04:05:08", + "latest_commit": "2021-09-23 15:45:24", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { "description": "Lightweight converter from Japanese Kana-kanji sentences into Kana-Roman.", @@ -662,7 +662,7 @@ "project_name": "pykakasi", "stargazers_count": 407, "source": "GitHub", - "score": 0.863960701050704, + "score": 0.8684017238676265, "first_commit": "2011-01-26 09:56:25", "latest_commit": "2022-07-22 12:36:37", "languages": [ @@ -670,27 +670,13 @@ ], "model_or_dataset": null }, - { - "description": "This is a Japanese sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens", - "project_name": "sentence-bert-base-ja-mean-tokens", - "downloads": 83440, - "source": "Hugging Face", - "score": 0.7920820701754386, - "first_commit": "2021-07-22 06:11:37", - "latest_commit": "2024-04-17 11:40:03", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForMaskedLM" - }, { "description": "テキストを壱百満天原サロメお嬢様風の口調に変換します", "url": "https://github.com/jiro4989/ojosama", "project_name": "ojosama", "stargazers_count": 380, "source": "GitHub", - "score": 0.7844088595626213, + "score": 0.7885343069172541, "first_commit": "2022-06-16 07:21:02", "latest_commit": "2024-08-07 00:15:14", "languages": [ @@ -704,7 +690,7 @@ "project_name": "fugashi", "stargazers_count": 376, "source": "GitHub", - "score": 0.7726234015643868, + "score": 0.7767020969986804, "first_commit": "2019-10-14 13:33:36", "latest_commit": "2024-04-15 21:15:01", "languages": [ @@ -718,7 +704,7 @@ "project_name": "nagisa", "stargazers_count": 376, "source": "GitHub", - "score": 0.7726234015643868, + "score": 0.7767020969986804, "first_commit": "2018-02-14 23:56:05", "latest_commit": "2024-06-15 01:55:19", "languages": [ @@ -732,7 +718,7 @@ "project_name": "jumanpp", "stargazers_count": 372, "source": "GitHub", - "score": 0.7608379435661523, + "score": 0.7648698870801067, "first_commit": "2016-10-17 14:48:20", "latest_commit": "2023-03-06 09:27:42", "languages": [ @@ -749,7 +735,7 @@ "project_name": "lindera", "stargazers_count": 369, "source": "GitHub", - "score": 0.7519988500674765, + "score": 0.7559957296411766, "first_commit": "2020-01-22 23:29:22", "latest_commit": "2024-07-15 23:23:30", "languages": [ @@ -757,13 +743,27 @@ ], "model_or_dataset": null }, + { + "description": "This is a Japanese sentence-BERT model.", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens", + "project_name": "sentence-bert-base-ja-mean-tokens", + "downloads": 84658, + "source": "Hugging Face", + "score": 0.7269444407371536, + "first_commit": "2021-07-22 06:11:37", + "latest_commit": "2024-04-17 11:40:03", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111, + "model_architectures": "BertForMaskedLM" + }, { "description": "NDLOCRのアプリケーション", "url": "https://github.com/ndl-lab/ndlocr_cli", "project_name": "ndlocr_cli", "stargazers_count": 344, "source": "GitHub", - "score": 0.6783397375785108, + "score": 0.6820444176500909, "first_commit": "2022-03-30 11:12:01", "latest_commit": "2024-07-29 09:20:46", "languages": [ @@ -777,7 +777,7 @@ "project_name": "camphr", "stargazers_count": 340, "source": "GitHub", - "score": 0.6665542795802765, + "score": 0.6702122077315172, "first_commit": "2019-08-27 07:22:43", "latest_commit": "2021-08-18 15:06:51", "languages": [ @@ -791,7 +791,7 @@ "project_name": "aquaskk", "stargazers_count": 339, "source": "GitHub", - "score": 0.6636079150807178, + "score": 0.6672541552518738, "first_commit": "2013-05-12 07:03:33", "latest_commit": "2023-07-10 00:35:46", "languages": [ @@ -805,7 +805,7 @@ "project_name": "kanji-data-media", "stargazers_count": 336, "source": "GitHub", - "score": 0.654768821582042, + "score": 0.6583799978129435, "first_commit": "2014-08-30 19:06:48", "latest_commit": "2023-11-15 20:28:16", "languages": [], @@ -817,7 +817,7 @@ "project_name": "emoji-ime-dictionary", "stargazers_count": 331, "source": "GitHub", - "score": 0.6400369990842488, + "score": 0.6435897354147264, "first_commit": "2018-10-13 21:37:28", "latest_commit": "2023-01-16 12:01:31", "languages": [ @@ -831,7 +831,7 @@ "project_name": "animedb", "stargazers_count": 321, "source": "GitHub", - "score": 0.6105733540886626, + "score": 0.6140092106182923, "first_commit": "2016-10-11 23:05:11", "latest_commit": "2023-01-04 16:56:30", "languages": [ @@ -845,7 +845,7 @@ "project_name": "gpt2-japanese", "stargazers_count": 313, "source": "GitHub", - "score": 0.5870024380921937, + "score": 0.5903447907811449, "first_commit": "2019-12-12 11:07:23", "latest_commit": "2023-09-02 17:23:50", "languages": [ @@ -853,27 +853,13 @@ ], "model_or_dataset": "model" }, - { - "description": "This is a Japanese sentence-LUKE model.", - "url": "https://huggingface.co./sonoisa/sentence-luke-japanese-base-lite", - "project_name": "sentence-luke-japanese-base-lite", - "downloads": 63740, - "source": "Hugging Face", - "score": 0.5831645446096616, - "first_commit": "2023-03-19 14:44:42", - "latest_commit": "2023-03-20 01:32:34", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LukeModel" - }, { "description": "vibrato: Viterbi-based accelerated tokenizer", "url": "https://github.com/daac-tools/vibrato", "project_name": "vibrato", "stargazers_count": 311, "source": "GitHub", - "score": 0.5811097090930764, + "score": 0.584428685821858, "first_commit": "2022-07-06 17:50:53", "latest_commit": "2024-05-30 19:17:07", "languages": [ @@ -883,17 +869,17 @@ "model_or_dataset": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-hf", - "project_name": "Swallow-7b-instruct-hf", - "downloads": 61889, + "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-instruct", + "project_name": "ELYZA-japanese-Llama-2-7b-instruct", + "downloads": 68268, "source": "Hugging Face", - "score": 0.5635347811669077, - "first_commit": "2023-12-07 02:18:36", - "latest_commit": "2024-06-29 08:56:26", + "score": 0.5676658880785006, + "first_commit": "2023-08-28 12:58:25", + "latest_commit": "2023-08-29 03:46:15", "languages": [], "model_or_dataset": "model", - "model_size": 6.83, + "model_size": null, "model_architectures": "LlamaForCausalLM" }, { @@ -902,7 +888,7 @@ "project_name": "accel-brain-code", "stargazers_count": 305, "source": "GitHub", - "score": 0.5634315220957247, + "score": 0.5666803709439975, "first_commit": "2016-01-26 21:16:08", "latest_commit": "2023-12-26 12:01:37", "languages": [ @@ -919,7 +905,7 @@ "project_name": "jaconv", "stargazers_count": 299, "source": "GitHub", - "score": 0.545753335098373, + "score": 0.5489320560661369, "first_commit": "2014-03-22 20:09:24", "latest_commit": "2024-08-13 14:26:29", "languages": [ @@ -933,7 +919,7 @@ "project_name": "yomitan-dictionaries", "stargazers_count": 295, "source": "GitHub", - "score": 0.5339678771001385, + "score": 0.5370998461475632, "first_commit": "2022-08-07 01:28:52", "latest_commit": "2024-08-04 01:00:08", "languages": [ @@ -942,18 +928,18 @@ "model_or_dataset": "dataset" }, { - "description": "shisa-gamma-7b-v1 For more information see our main Shisa 7B model We applied a version of our fine-tune data set onto Japanese Stable LM Base Gamma 7B and it performed pretty well, just sharing since it might be of interest.", - "url": "https://huggingface.co./augmxnt/shisa-gamma-7b-v1", - "project_name": "shisa-gamma-7b-v1", - "downloads": 58892, + "description": "BERT base Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v3", + "project_name": "bert-base-japanese-char-v3", + "downloads": 65026, "source": "Hugging Face", - "score": 0.5317517444095151, - "first_commit": "2023-12-23 20:21:44", - "latest_commit": "2024-05-19 06:07:36", + "score": 0.5361600267167341, + "first_commit": "2023-05-19 00:33:09", + "latest_commit": "2023-05-19 00:39:44", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": "BertForPreTraining" }, { "description": "JGLUE: Japanese General Language Understanding Evaluation", @@ -961,7 +947,7 @@ "project_name": "JGLUE", "stargazers_count": 294, "source": "GitHub", - "score": 0.5310215126005798, + "score": 0.5341417936679198, "first_commit": "2022-05-27 13:35:39", "latest_commit": "2023-06-24 10:50:30", "languages": [ @@ -969,27 +955,13 @@ ], "model_or_dataset": null }, - { - "description": "Sentence BERT base Japanese model This repository contains a Sentence BERT base model for Japanese.", - "url": "https://huggingface.co./colorfulscoop/sbert-base-ja", - "project_name": "sbert-base-ja", - "downloads": 57843, - "source": "Hugging Face", - "score": 0.5206271512969008, - "first_commit": "2021-08-01 04:12:28", - "latest_commit": "2021-08-08 15:47:42", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertModel" - }, { "description": "SudachiPy 0.6* and above are developed as Sudachi.rs.", "url": "https://github.com/WorksApplications/sudachi.rs", "project_name": "sudachi.rs", "stargazers_count": 290, "source": "GitHub", - "score": 0.5192360546023453, + "score": 0.5223095837493461, "first_commit": "2019-11-23 21:06:49", "latest_commit": "2024-06-28 10:11:04", "languages": [ @@ -1004,7 +976,7 @@ "project_name": "Poricom", "stargazers_count": 283, "source": "GitHub", - "score": 0.498611503105435, + "score": 0.5016032163918421, "first_commit": "2021-08-31 16:21:12", "latest_commit": "2023-06-04 18:55:23", "languages": [ @@ -1018,7 +990,7 @@ "project_name": "japanese-stable-diffusion", "stargazers_count": 280, "source": "GitHub", - "score": 0.4897724096067591, + "score": 0.4927290589529119, "first_commit": "2022-09-09 10:11:41", "latest_commit": "2023-03-20 08:21:57", "languages": [ @@ -1027,13 +999,27 @@ ], "model_or_dataset": null }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-hf", + "project_name": "Swallow-7b-instruct-hf", + "downloads": 60140, + "source": "Hugging Face", + "score": 0.48867771992660736, + "first_commit": "2023-12-07 02:18:36", + "latest_commit": "2024-06-29 08:56:26", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.83, + "model_architectures": "LlamaForCausalLM" + }, { "description": "Japanese to romaji converter in Python", "url": "https://github.com/polm/cutlet", "project_name": "cutlet", "stargazers_count": 278, "source": "GitHub", - "score": 0.4838796806076419, + "score": 0.48681295399362506, "first_commit": "2020-04-16 22:59:42", "latest_commit": "2024-06-28 00:40:28", "languages": [ @@ -1048,7 +1034,7 @@ "project_name": "corvusskk", "stargazers_count": 277, "source": "GitHub", - "score": 0.48093331610808326, + "score": 0.48385490151398164, "first_commit": "2012-06-15 14:12:35", "latest_commit": "2024-08-04 19:22:13", "languages": [ @@ -1064,7 +1050,7 @@ "project_name": "llm-book", "stargazers_count": 273, "source": "GitHub", - "score": 0.46914785810984877, + "score": 0.47202269159540794, "first_commit": "2023-07-13 16:50:00", "latest_commit": "2024-07-27 21:21:13", "languages": [ @@ -1078,7 +1064,7 @@ "project_name": "HotPepperGourmetDialogue", "stargazers_count": 272, "source": "GitHub", - "score": 0.4662014936102902, + "score": 0.4690646391157645, "first_commit": "2016-02-13 11:37:07", "latest_commit": "2016-05-19 13:05:29", "languages": [ @@ -1092,7 +1078,7 @@ "project_name": "tacotron2-japanese", "stargazers_count": 269, "source": "GitHub", - "score": 0.4573624001116143, + "score": 0.46019048167683424, "first_commit": "2022-07-25 11:42:50", "latest_commit": "2022-09-04 12:57:11", "languages": [ @@ -1107,7 +1093,7 @@ "project_name": "neologdn", "stargazers_count": 267, "source": "GitHub", - "score": 0.45146967111249703, + "score": 0.4542743767175474, "first_commit": "2015-07-22 01:15:07", "latest_commit": "2024-05-03 00:56:43", "languages": [ @@ -1118,28 +1104,28 @@ "model_or_dataset": null }, { - "description": "FuguMT", - "url": "https://huggingface.co./staka/fugumt-en-ja", - "project_name": "fugumt-en-ja", - "downloads": 50872, + "description": "Sentence BERT base Japanese model This repository contains a Sentence BERT base model for Japanese.", + "url": "https://huggingface.co./colorfulscoop/sbert-base-ja", + "project_name": "sbert-base-ja", + "downloads": 53684, "source": "Hugging Face", - "score": 0.44670004110811756, - "first_commit": "2022-05-08 04:23:57", - "latest_commit": "2023-08-15 17:45:04", + "score": 0.42593810211304645, + "first_commit": "2021-08-01 04:12:28", + "latest_commit": "2021-08-08 15:47:42", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MarianMTModel" + "model_architectures": "BertModel" }, { "description": "FuguMT", - "url": "https://huggingface.co./staka/fugumt-ja-en", - "project_name": "fugumt-ja-en", - "downloads": 50408, + "url": "https://huggingface.co./staka/fugumt-en-ja", + "project_name": "fugumt-en-ja", + "downloads": 52001, "source": "Hugging Face", - "score": 0.44177934405925867, - "first_commit": "2022-05-08 04:32:09", - "latest_commit": "2023-08-15 17:40:58", + "score": 0.409582653417225, + "first_commit": "2022-05-08 04:23:57", + "latest_commit": "2023-08-15 17:45:04", "languages": [], "model_or_dataset": "model", "model_size": null, @@ -1151,7 +1137,7 @@ "project_name": "bert-book", "stargazers_count": 250, "source": "GitHub", - "score": 0.4013814746200005, + "score": 0.4039874845636092, "first_commit": "2021-02-04 22:11:11", "latest_commit": "2024-02-13 13:36:21", "languages": [ @@ -1159,27 +1145,13 @@ ], "model_or_dataset": null }, - { - "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-instruct", - "project_name": "ELYZA-japanese-Llama-2-7b-instruct", - "downloads": 46400, - "source": "Hugging Face", - "score": 0.3992747023096326, - "first_commit": "2023-08-28 12:58:25", - "latest_commit": "2023-08-29 03:46:15", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" - }, { "description": "azooKey: A Japanese Keyboard iOS Application Fully Developed in Swift", "url": "https://github.com/ensan-hcl/azooKey", "project_name": "azooKey", "stargazers_count": 247, "source": "GitHub", - "score": 0.3925423811213246, + "score": 0.39511332712467895, "first_commit": "2020-09-03 16:35:44", "latest_commit": "2024-07-04 11:03:08", "languages": [ @@ -1194,7 +1166,7 @@ "project_name": "pdf-translator", "stargazers_count": 245, "source": "GitHub", - "score": 0.38664965212220737, + "score": 0.3891972221653921, "first_commit": "2023-04-17 14:55:03", "latest_commit": "2024-05-07 16:25:33", "languages": [ @@ -1208,7 +1180,7 @@ "project_name": "japanese-dialog-transformers", "stargazers_count": 240, "source": "GitHub", - "score": 0.3719178296244143, + "score": 0.374406959767175, "first_commit": "2021-09-17 22:13:57", "latest_commit": "2023-06-21 09:24:35", "languages": [ @@ -1216,13 +1188,27 @@ ], "model_or_dataset": "model" }, + { + "description": "FuguMT", + "url": "https://huggingface.co./staka/fugumt-ja-en", + "project_name": "fugumt-ja-en", + "downloads": 47961, + "source": "Hugging Face", + "score": 0.37032180212125443, + "first_commit": "2022-05-08 04:32:09", + "latest_commit": "2023-08-15 17:40:58", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "MarianMTModel" + }, { "description": "A tool for dividing the Japanese full name into a family name and a given name.", "url": "https://github.com/rskmoi/namedivider-python", "project_name": "namedivider-python", "stargazers_count": 237, "source": "GitHub", - "score": 0.36307873612573843, + "score": 0.3655328023282447, "first_commit": "2020-11-08 00:41:44", "latest_commit": "2024-05-03 15:28:00", "languages": [ @@ -1236,7 +1222,7 @@ "project_name": "mecab", "stargazers_count": 232, "source": "GitHub", - "score": 0.3483469136279453, + "score": 0.3507425399300276, "first_commit": "2011-10-15 15:16:30", "latest_commit": "2024-05-30 20:11:11", "languages": [ @@ -1256,7 +1242,7 @@ "project_name": "nlplot", "stargazers_count": 232, "source": "GitHub", - "score": 0.3483469136279453, + "score": 0.3507425399300276, "first_commit": "2020-05-07 00:10:04", "latest_commit": "2022-09-21 23:19:20", "languages": [ @@ -1265,13 +1251,27 @@ ], "model_or_dataset": null }, + { + "description": "Llama-3-ELYZA-JP-8B Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B", + "project_name": "Llama-3-ELYZA-JP-8B", + "downloads": 45183, + "source": "Hugging Face", + "score": 0.34332510783902515, + "first_commit": "2024-06-25 06:32:13", + "latest_commit": "2024-06-26 02:56:23", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" + }, { "description": "Using Vim as an input method for X11 apps", "url": "https://github.com/algon-320/vime", "project_name": "vime", "stargazers_count": 228, "source": "GitHub", - "score": 0.3365614556297108, + "score": 0.3389103300114539, "first_commit": "2022-12-03 22:32:24", "latest_commit": "2022-12-03 22:32:24", "languages": [ @@ -1285,7 +1285,7 @@ "project_name": "SudachiDict", "stargazers_count": 225, "source": "GitHub", - "score": 0.32772236213103495, + "score": 0.33003617257252366, "first_commit": "2019-04-01 17:14:39", "latest_commit": "2024-07-19 18:45:35", "languages": [ @@ -1300,7 +1300,7 @@ "project_name": "Jitendex", "stargazers_count": 225, "source": "GitHub", - "score": 0.32772236213103495, + "score": 0.33003617257252366, "first_commit": "2023-10-09 17:45:11", "latest_commit": "2024-07-09 16:43:23", "languages": [], @@ -1312,7 +1312,7 @@ "project_name": "konoha", "stargazers_count": 224, "source": "GitHub", - "score": 0.32477599763147635, + "score": 0.32707812009288023, "first_commit": "2018-08-22 13:55:37", "latest_commit": "2024-05-16 00:01:37", "languages": [ @@ -1327,7 +1327,7 @@ "project_name": "vaporetto", "stargazers_count": 223, "source": "GitHub", - "score": 0.3218296331319177, + "score": 0.3241200676132368, "first_commit": "2021-08-18 22:59:40", "latest_commit": "2024-07-14 16:33:38", "languages": [ @@ -1336,32 +1336,18 @@ "model_or_dataset": null }, { - "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese", - "project_name": "deberta-v2-base-japanese", - "downloads": 38625, - "source": "Hugging Face", - "score": 0.31682121188963686, - "first_commit": "2023-01-05 08:04:14", - "latest_commit": "2023-05-12 14:13:03", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.137, - "model_architectures": "DebertaV2ForMaskedLM" - }, - { - "description": "Llama-3-ELYZA-JP-8B Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B", - "project_name": "Llama-3-ELYZA-JP-8B", - "downloads": 38336, + "description": "llm-book/bert-base-japanese-v3-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-ner-wikipedia-dataset", + "project_name": "bert-base-japanese-v3-ner-wikipedia-dataset", + "downloads": 42714, "source": "Hugging Face", - "score": 0.31375638118463633, - "first_commit": "2024-06-25 06:32:13", - "latest_commit": "2024-06-26 02:56:23", + "score": 0.31933128559898766, + "first_commit": "2023-05-28 08:06:41", + "latest_commit": "2023-07-25 13:32:15", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "BertForTokenClassification" }, { "description": "Building AI-based conversational avatars lightning fast", @@ -1369,7 +1355,7 @@ "project_name": "aiavatarkit", "stargazers_count": 218, "source": "GitHub", - "score": 0.3070978106341246, + "score": 0.3093298052150197, "first_commit": "2023-05-27 23:14:32", "latest_commit": "2024-06-08 17:55:22", "languages": [ @@ -1377,13 +1363,27 @@ ], "model_or_dataset": null }, + { + "description": "nlp-waseda/roberta-base-japanese-with-auto-jumanpp Model description", + "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese-with-auto-jumanpp", + "project_name": "roberta-base-japanese-with-auto-jumanpp", + "downloads": 41665, + "source": "Hugging Face", + "score": 0.30913706950753395, + "first_commit": "2022-10-15 05:09:36", + "latest_commit": "2022-10-21 10:57:40", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "RobertaForMaskedLM" + }, { "description": "日本語OCR", "url": "https://github.com/tanreinama/OCR_Japanease", "project_name": "OCR_Japanease", "stargazers_count": 216, "source": "GitHub", - "score": 0.30120508163500737, + "score": 0.30341370025573283, "first_commit": "2020-04-08 09:25:03", "latest_commit": "2021-04-30 19:26:24", "languages": [ @@ -1391,41 +1391,13 @@ ], "model_or_dataset": null }, - { - "description": "hotchpotch/japanese-reranker-cross-encoder-xsmall-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-xsmall-v1", - "project_name": "japanese-reranker-cross-encoder-xsmall-v1", - "downloads": 37128, - "source": "Hugging Face", - "score": 0.30094560093674505, - "first_commit": "2024-03-28 04:29:26", - "latest_commit": "2024-06-10 03:57:05", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.107, - "model_architectures": "XLMRobertaForSequenceClassification" - }, - { - "description": "JMTEB:", - "url": "https://huggingface.co./datasets/sbintuitions/JMTEB", - "project_name": "JMTEB", - "downloads": 36325, - "source": "Hugging Face", - "score": 0.29242982565606895, - "first_commit": "2024-02-22 18:15:27", - "latest_commit": "2024-06-28 15:18:20", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, { "description": "Yet another Japanese IME for IBus/Linux", "url": "https://github.com/akaza-im/akaza", "project_name": "akaza", "stargazers_count": 212, "source": "GitHub", - "score": 0.28941962363677287, + "score": 0.29158149033715913, "first_commit": "2020-09-03 01:11:08", "latest_commit": "2023-05-28 23:41:07", "languages": [ @@ -1441,7 +1413,7 @@ "project_name": "jtubespeech", "stargazers_count": 208, "source": "GitHub", - "score": 0.2776341656385384, + "score": 0.27974928041858543, "first_commit": "2021-07-01 00:11:55", "latest_commit": "2023-03-02 15:50:30", "languages": [ @@ -1455,7 +1427,7 @@ "project_name": "fastTextJapaneseTutorial", "stargazers_count": 202, "source": "GitHub", - "score": 0.25995597864118664, + "score": 0.2620009655407249, "first_commit": "2016-09-28 13:06:13", "latest_commit": "2016-09-29 14:21:51", "languages": [ @@ -1463,13 +1435,27 @@ ], "model_or_dataset": null }, + { + "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese", + "project_name": "deberta-v2-base-japanese", + "downloads": 36651, + "source": "Hugging Face", + "score": 0.2604108545575348, + "first_commit": "2023-01-05 08:04:14", + "latest_commit": "2023-05-12 14:13:03", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.137, + "model_architectures": "DebertaV2ForMaskedLM" + }, { "description": "Japanese postal code data.", "url": "https://github.com/polm/posuto", "project_name": "posuto", "stargazers_count": 200, "source": "GitHub", - "score": 0.25406324964206944, + "score": 0.25608486058143803, "first_commit": null, "latest_commit": null, "languages": [], @@ -1481,7 +1467,7 @@ "project_name": "kytea", "stargazers_count": 199, "source": "GitHub", - "score": 0.2511168851425108, + "score": 0.25312680810179466, "first_commit": "2011-03-03 14:53:14", "latest_commit": "2020-04-03 09:46:01", "languages": [ @@ -1491,13 +1477,27 @@ ], "model_or_dataset": null }, + { + "description": "This is a Japanese sentence-LUKE model.", + "url": "https://huggingface.co./sonoisa/sentence-luke-japanese-base-lite", + "project_name": "sentence-luke-japanese-base-lite", + "downloads": 35036, + "source": "Hugging Face", + "score": 0.24471623207164556, + "first_commit": "2023-03-19 14:44:42", + "latest_commit": "2023-03-20 01:32:34", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LukeModel" + }, { "description": "Python wrapper for OpenJTalk", "url": "https://github.com/r9y9/pyopenjtalk", "project_name": "pyopenjtalk", "stargazers_count": 192, "source": "GitHub", - "score": 0.23049233364560046, + "score": 0.2324204407442907, "first_commit": "2018-08-07 00:37:37", "latest_commit": "2024-07-13 16:04:09", "languages": [ @@ -1512,7 +1512,7 @@ "project_name": "daachorse", "stargazers_count": 192, "source": "GitHub", - "score": 0.23049233364560046, + "score": 0.2324204407442907, "first_commit": "2021-09-02 11:37:02", "latest_commit": "2024-06-06 18:11:39", "languages": [ @@ -1520,27 +1520,13 @@ ], "model_or_dataset": null }, - { - "description": "llm-book/bert-base-japanese-v3-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-ner-wikipedia-dataset", - "project_name": "bert-base-japanese-v3-ner-wikipedia-dataset", - "downloads": 29838, - "source": "Hugging Face", - "score": 0.22363551152687125, - "first_commit": "2023-05-28 08:06:41", - "latest_commit": "2023-07-25 13:32:15", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForTokenClassification" - }, { "description": "English-Japanese Dictionary data (Public Domain) EJDict-hand", "url": "https://github.com/kujirahand/EJDict", "project_name": "EJDict", "stargazers_count": 189, "source": "GitHub", - "score": 0.22165324014692458, + "score": 0.22354628330536042, "first_commit": "2016-01-04 23:23:52", "latest_commit": "2024-02-15 09:49:00", "languages": [ @@ -1554,7 +1540,7 @@ "project_name": "kanjitomo-ocr", "stargazers_count": 184, "source": "GitHub", - "score": 0.20692141764913147, + "score": 0.2087560209071433, "first_commit": "2019-12-08 14:09:53", "latest_commit": "2021-05-03 21:38:06", "languages": [ @@ -1563,13 +1549,27 @@ ], "model_or_dataset": null }, + { + "description": "JMTEB:", + "url": "https://huggingface.co./datasets/sbintuitions/JMTEB", + "project_name": "JMTEB", + "downloads": 31322, + "source": "Hugging Face", + "score": 0.20862345937034985, + "first_commit": "2024-02-22 18:15:27", + "latest_commit": "2024-06-28 15:18:20", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, { "description": "Sentence boundary disambiguation tool for Japanese texts (日本語文境界判定器)", "url": "https://github.com/megagonlabs/bunkai", "project_name": "bunkai", "stargazers_count": 181, "source": "GitHub", - "score": 0.19808232415045562, + "score": 0.19988186346821302, "first_commit": "2021-04-21 12:36:10", "latest_commit": "2023-08-10 14:13:25", "languages": [ @@ -1577,13 +1577,27 @@ ], "model_or_dataset": null }, + { + "description": "BERT base Japanese (unidic-lite with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v2", + "project_name": "bert-base-japanese-v2", + "downloads": 30384, + "source": "Hugging Face", + "score": 0.19950794488628537, + "first_commit": "2021-03-05 03:37:30", + "latest_commit": "2021-09-23 15:45:31", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForMaskedLM" + }, { "description": "ITAコーパスの文章リスト", "url": "https://github.com/mmorise/ita-corpus", "project_name": "ita-corpus", "stargazers_count": 178, "source": "GitHub", - "score": 0.18924323065177975, + "score": 0.19100770602928277, "first_commit": "2021-06-03 11:22:38", "latest_commit": "2023-12-04 00:50:44", "languages": [], @@ -1595,7 +1609,7 @@ "project_name": "elasticsearch-sudachi", "stargazers_count": 177, "source": "GitHub", - "score": 0.18629686615222113, + "score": 0.18804965354963935, "first_commit": "2017-11-09 19:21:47", "latest_commit": "2024-07-04 14:46:30", "languages": [ @@ -1606,26 +1620,40 @@ "model_or_dataset": null }, { - "description": "BERT base Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v3", - "project_name": "bert-base-japanese-char-v3", - "downloads": 25661, + "description": "BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v3", + "project_name": "bert-base-japanese-v3", + "downloads": 28901, "source": "Hugging Face", - "score": 0.17933863313660472, - "first_commit": "2023-05-19 00:33:09", - "latest_commit": "2023-05-19 00:39:44", + "score": 0.1850961026902645, + "first_commit": "2023-05-19 00:13:53", + "latest_commit": "2023-05-19 00:31:53", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "BertForPreTraining" }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-v0.1", + "project_name": "Swallow-7b-instruct-v0.1", + "downloads": 28835, + "source": "Hugging Face", + "score": 0.18445471254533033, + "first_commit": "2024-03-04 08:46:03", + "latest_commit": "2024-07-06 15:18:14", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.83, + "model_architectures": "LlamaForCausalLM" + }, { "description": "Topologically ordered lists of kanji for effective learning", "url": "https://github.com/scriptin/topokanji", "project_name": "topokanji", "stargazers_count": 174, "source": "GitHub", - "score": 0.17745777265354526, + "score": 0.17917549611070907, "first_commit": "2015-05-18 01:55:58", "latest_commit": "2016-01-24 05:18:22", "languages": [ @@ -1633,13 +1661,27 @@ ], "model_or_dataset": "dataset" }, + { + "description": "shisa-gamma-7b-v1 For more information see our main Shisa 7B model We applied a version of our fine-tune data set onto Japanese Stable LM Base Gamma 7B and it performed pretty well, just sharing since it might be of interest.", + "url": "https://huggingface.co./augmxnt/shisa-gamma-7b-v1", + "project_name": "shisa-gamma-7b-v1", + "downloads": 28107, + "source": "Hugging Face", + "score": 0.1773799848860564, + "first_commit": "2023-12-23 20:21:44", + "latest_commit": "2024-05-19 06:07:36", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" + }, { "description": "Can neural networks transliterate Romaji into Japanese correctly?", "url": "https://github.com/Kyubyong/neural_japanese_transliterator", "project_name": "neural_japanese_transliterator", "stargazers_count": 173, "source": "GitHub", - "score": 0.17451140815398664, + "score": 0.17621744363106565, "first_commit": "2017-01-01 13:20:54", "latest_commit": "2017-09-17 15:21:27", "languages": [ @@ -1653,7 +1695,7 @@ "project_name": "summarize_arxv", "stargazers_count": 173, "source": "GitHub", - "score": 0.17451140815398664, + "score": 0.17621744363106565, "first_commit": "2023-05-21 17:03:22", "latest_commit": "2023-05-23 01:54:10", "languages": [ @@ -1662,54 +1704,26 @@ "model_or_dataset": null }, { - "description": "rinna/japanese-clip-vit-b-16", - "url": "https://huggingface.co./rinna/japanese-clip-vit-b-16", - "project_name": "japanese-clip-vit-b-16", - "downloads": 25053, - "source": "Hugging Face", - "score": 0.17289082321051377, - "first_commit": "2022-04-27 07:52:33", - "latest_commit": "2024-07-20 08:42:32", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.197, - "model_architectures": "CLIPModel" - }, - { - "description": "十条蛍(Hotaru Jujo)の作成したLoRAを配布しています。 ", - "url": "https://huggingface.co./JujoHotaru/lora", - "project_name": "lora", - "downloads": 23810, + "description": "Contributors/Maintainers Junfeng Jiang@Aizawa Lab: jiangjf@is.s.u-tokyo.ac.jp", + "url": "https://huggingface.co./datasets/Coldog2333/JMedBench", + "project_name": "JMedBench", + "downloads": 26255, "source": "Hugging Face", - "score": 0.15970886969385079, - "first_commit": null, - "latest_commit": null, + "score": 0.15938218869790355, + "first_commit": "2024-08-17 11:12:54", + "latest_commit": "2024-09-01 12:41:40", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, - { - "description": "nlp-waseda/roberta-base-japanese-with-auto-jumanpp Model description", - "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese-with-auto-jumanpp", - "project_name": "roberta-base-japanese-with-auto-jumanpp", - "downloads": 23541, - "source": "Hugging Face", - "score": 0.15685613799957698, - "first_commit": "2022-10-15 05:09:36", - "latest_commit": "2022-10-21 10:57:40", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "RobertaForMaskedLM" - }, { "description": "国会議案データベース:衆議院", "url": "https://github.com/smartnews-smri/house-of-representatives", "project_name": "house-of-representatives", "stargazers_count": 167, "source": "GitHub", - "score": 0.15683322115663492, + "score": 0.1584691287532051, "first_commit": "2022-04-18 14:41:05", "latest_commit": "2024-08-15 14:09:44", "languages": [ @@ -1724,7 +1738,7 @@ "project_name": "jmdict-simplified", "stargazers_count": 166, "source": "GitHub", - "score": 0.1538868566570763, + "score": 0.15551107627356167, "first_commit": "2016-02-07 00:30:44", "latest_commit": "2024-08-12 12:21:03", "languages": [ @@ -1735,32 +1749,18 @@ "model_or_dataset": "dataset" }, { - "description": "BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v3", - "project_name": "bert-base-japanese-v3", - "downloads": 22758, - "source": "Hugging Face", - "score": 0.14855246172962758, - "first_commit": "2023-05-19 00:13:53", - "latest_commit": "2023-05-19 00:31:53", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForPreTraining" - }, - { - "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of May 31, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", - "url": "https://huggingface.co./sazyou-roukaku/BracingEvoMix", - "project_name": "BracingEvoMix", - "downloads": 21934, + "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", + "url": "https://huggingface.co./rinna/japanese-gpt-1b", + "project_name": "japanese-gpt-1b", + "downloads": 25590, "source": "Hugging Face", - "score": 0.13981398248768848, - "first_commit": "2023-05-31 10:29:16", - "latest_commit": "2023-10-01 08:58:54", + "score": 0.15291969708606679, + "first_commit": "2022-01-20 02:30:19", + "latest_commit": "2024-07-20 07:52:31", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 1.33, + "model_architectures": "GPT2LMHeadModel" }, { "description": "ディープラーニングモデルの性能を体系的に最大化するためのプレイブック", @@ -1768,7 +1768,7 @@ "project_name": "tuning_playbook_ja", "stargazers_count": 161, "source": "GitHub", - "score": 0.13915503415928318, + "score": 0.14072081387534457, "first_commit": "2023-01-22 02:19:42", "latest_commit": "2023-01-22 22:10:48", "languages": [], @@ -1780,19 +1780,33 @@ "project_name": "DistilBERT-base-jp", "stargazers_count": 160, "source": "GitHub", - "score": 0.13620866965972456, + "score": 0.13776276139570115, "first_commit": "2020-04-22 16:17:15", "latest_commit": "2020-04-22 16:24:26", "languages": [], "model_or_dataset": "model" }, + { + "description": "rinna/japanese-clip-vit-b-16", + "url": "https://huggingface.co./rinna/japanese-clip-vit-b-16", + "project_name": "japanese-clip-vit-b-16", + "downloads": 23761, + "source": "Hugging Face", + "score": 0.135145415645391, + "first_commit": "2022-04-27 07:52:33", + "latest_commit": "2024-07-20 08:42:32", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.197, + "model_architectures": "CLIPModel" + }, { "description": "日本語における不適切表現を収集します。自然言語処理の時のデータクリーニング用等に使えると思います。", "url": "https://github.com/MosasoM/inappropriate-words-ja", "project_name": "inappropriate-words-ja", "stargazers_count": 159, "source": "GitHub", - "score": 0.13326230516016593, + "score": 0.13480470891605772, "first_commit": "2020-08-17 13:38:34", "latest_commit": "2021-12-02 00:33:00", "languages": [ @@ -1806,7 +1820,7 @@ "project_name": "JapaneseEmbeddingEval", "stargazers_count": 157, "source": "GitHub", - "score": 0.1273695761610487, + "score": 0.12888860395677088, "first_commit": "2023-01-07 13:35:18", "latest_commit": "2024-04-06 12:45:12", "languages": [ @@ -1820,7 +1834,7 @@ "project_name": "Jotoba", "stargazers_count": 157, "source": "GitHub", - "score": 0.1273695761610487, + "score": 0.12888860395677088, "first_commit": "2021-04-15 11:08:23", "latest_commit": "2024-01-22 17:06:42", "languages": [ @@ -1830,18 +1844,18 @@ "model_or_dataset": null }, { - "description": "LaBSE Model description Language-agnostic BERT Sentence Encoder (LaBSE) is a BERT-based model trained for sentence embedding for 109 languages.", - "url": "https://huggingface.co./setu4993/LaBSE", - "project_name": "LaBSE", - "downloads": 20114, + "description": "hotchpotch/japanese-reranker-cross-encoder-xsmall-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-xsmall-v1", + "project_name": "japanese-reranker-cross-encoder-xsmall-v1", + "downloads": 22934, "source": "Hugging Face", - "score": 0.12051297251156086, - "first_commit": "2021-01-11 06:06:51", - "latest_commit": "2023-10-18 23:23:16", + "score": 0.12710860276871583, + "first_commit": "2024-03-28 04:29:26", + "latest_commit": "2024-06-10 03:57:05", "languages": [], "model_or_dataset": "model", - "model_size": 0.47100000000000003, - "model_architectures": "BertModel" + "model_size": 0.107, + "model_architectures": "XLMRobertaForSequenceClassification" }, { "description": "Japanese negative positive classification.日本語文書のネガポジを判定。", @@ -1849,7 +1863,7 @@ "project_name": "negapoji", "stargazers_count": 151, "source": "GitHub", - "score": 0.10969138916369697, + "score": 0.11114028907891034, "first_commit": "2017-08-17 17:23:49", "latest_commit": "2017-08-20 18:12:51", "languages": [ @@ -1863,7 +1877,7 @@ "project_name": "dictation-kit", "stargazers_count": 151, "source": "GitHub", - "score": 0.10969138916369697, + "score": 0.11114028907891034, "first_commit": "2015-10-03 17:08:52", "latest_commit": "2019-04-18 11:41:47", "languages": [ @@ -1878,7 +1892,7 @@ "project_name": "chiVe", "stargazers_count": 151, "source": "GitHub", - "score": 0.10969138916369697, + "score": 0.11114028907891034, "first_commit": "2019-11-18 15:39:08", "latest_commit": "2024-03-01 17:50:40", "languages": [ @@ -1886,13 +1900,27 @@ ], "model_or_dataset": "model" }, + { + "description": "bert-finetuned-japanese-sentiment This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on product amazon reviews japanese dataset.", + "url": "https://huggingface.co./christian-phu/bert-finetuned-japanese-sentiment", + "project_name": "bert-finetuned-japanese-sentiment", + "downloads": 21171, + "source": "Hugging Face", + "score": 0.10997571147297422, + "first_commit": "2023-04-06 16:43:51", + "latest_commit": "2023-04-07 17:27:53", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForSequenceClassification" + }, { "description": "Phishing URL dataset from JPCERT/CC", "url": "https://github.com/JPCERTCC/phishurl-list", "project_name": "phishurl-list", "stargazers_count": 150, "source": "GitHub", - "score": 0.10674502466413835, + "score": 0.10818223659926691, "first_commit": "2022-08-05 15:20:50", "latest_commit": "2024-05-02 04:51:47", "languages": [ @@ -1900,41 +1928,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "このドキュメントの日本語版はまだ作成中です。", - "url": "https://huggingface.co./bclavie/JaColBERT", - "project_name": "JaColBERT", - "downloads": 18086, - "source": "Hugging Face", - "score": 0.0990061328238758, - "first_commit": "2023-12-25 22:43:54", - "latest_commit": "2024-01-27 15:30:00", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "HF_ColBERT" - }, - { - "description": "Please feel free to open an issue or pull request.", - "url": "https://huggingface.co./datasets/shunk031/JGLUE", - "project_name": "JGLUE", - "downloads": 18026, - "source": "Hugging Face", - "score": 0.09836983579169577, - "first_commit": "2023-02-25 13:33:13", - "latest_commit": "2024-05-21 11:23:51", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, { "description": "WRIME: 主観と客観の感情分析データセット", "url": "https://github.com/ids-cv/wrime", "project_name": "wrime", "stargazers_count": 145, "source": "GitHub", - "score": 0.09201320216634525, + "score": 0.0933919742010498, "first_commit": "2020-08-18 14:13:44", "latest_commit": "2023-01-15 13:44:21", "languages": [], @@ -1946,7 +1946,7 @@ "project_name": "asari", "stargazers_count": 143, "source": "GitHub", - "score": 0.086120473167228, + "score": 0.08747586924176295, "first_commit": "2019-02-06 14:17:47", "latest_commit": "2022-10-19 07:38:31", "languages": [ @@ -1960,7 +1960,7 @@ "project_name": "wanna", "stargazers_count": 143, "source": "GitHub", - "score": 0.086120473167228, + "score": 0.08747586924176295, "first_commit": "2023-03-01 18:10:59", "latest_commit": "2023-04-02 00:25:56", "languages": [ @@ -1974,7 +1974,7 @@ "project_name": "mojimoji", "stargazers_count": 142, "source": "GitHub", - "score": 0.08317410866766937, + "score": 0.08451781676211953, "first_commit": "2013-11-03 01:38:05", "latest_commit": "2024-01-12 19:24:55", "languages": [ @@ -1988,7 +1988,7 @@ "project_name": "jp-stable", "stargazers_count": 142, "source": "GitHub", - "score": 0.08317410866766937, + "score": 0.08451781676211953, "first_commit": null, "latest_commit": null, "languages": [], @@ -2000,7 +2000,7 @@ "project_name": "Japanese-Alpaca-LoRA", "stargazers_count": 142, "source": "GitHub", - "score": 0.08317410866766937, + "score": 0.08451781676211953, "first_commit": "2023-03-22 23:04:56", "latest_commit": "2023-04-03 05:31:02", "languages": [ @@ -2009,60 +2009,32 @@ "model_or_dataset": "model" }, { - "description": "Contributors/Maintainers Junfeng Jiang@Aizawa Lab: jiangjf@is.s.u-tokyo.ac.jp", - "url": "https://huggingface.co./datasets/Coldog2333/JMedBench", - "project_name": "JMedBench", - "downloads": 16420, + "description": "Please feel free to open an issue or pull request.", + "url": "https://huggingface.co./datasets/shunk031/JGLUE", + "project_name": "JGLUE", + "downloads": 17617, "source": "Hugging Face", - "score": 0.0813382852303436, - "first_commit": "2024-08-17 11:12:54", - "latest_commit": "2024-09-01 12:41:40", + "score": 0.07543782397151891, + "first_commit": "2023-02-25 13:33:13", + "latest_commit": "2024-05-21 11:23:51", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "bert-finetuned-japanese-sentiment This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on product amazon reviews japanese dataset.", - "url": "https://huggingface.co./christian-phu/bert-finetuned-japanese-sentiment", - "project_name": "bert-finetuned-japanese-sentiment", - "downloads": 15767, - "source": "Hugging Face", - "score": 0.07441325253011759, - "first_commit": "2023-04-06 16:43:51", - "latest_commit": "2023-04-07 17:27:53", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForSequenceClassification" - }, - { - "description": "Japanese Stable LM Base Gamma 7B Model Description", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-gamma-7b", - "project_name": "japanese-stablelm-base-gamma-7b", - "downloads": 15095, - "source": "Hugging Face", - "score": 0.06728672576970124, - "first_commit": "2023-10-16 08:15:14", - "latest_commit": "2024-01-25 08:05:12", - "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" - }, - { - "description": "LINE DistilBERT Japanese This is a DistilBERT model pre-trained on 131 GB of Japanese web text.", - "url": "https://huggingface.co./line-corporation/line-distilbert-base-japanese", - "project_name": "line-distilbert-base-japanese", - "downloads": 14953, + "description": "OpenCALM-3B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-3b", + "project_name": "open-calm-3b", + "downloads": 16880, "source": "Hugging Face", - "score": 0.06578082279354183, - "first_commit": "2023-03-10 10:23:54", - "latest_commit": "2023-12-01 09:50:34", + "score": 0.06827563401975398, + "first_commit": "2023-05-15 07:14:36", + "latest_commit": "2023-05-18 01:11:50", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DistilBertForMaskedLM" + "model_architectures": "GPTNeoXForCausalLM" }, { "description": "A set of metrics for feature selection from text data", @@ -2070,7 +2042,7 @@ "project_name": "JapaneseTokenizers", "stargazers_count": 136, "source": "GitHub", - "score": 0.06549592167031765, + "score": 0.066769501884259, "first_commit": "2015-09-01 19:27:42", "latest_commit": "2019-03-25 16:52:52", "languages": [ @@ -2084,7 +2056,7 @@ "project_name": "chABSA-dataset", "stargazers_count": 134, "source": "GitHub", - "score": 0.05960319267120041, + "score": 0.06085339692497215, "first_commit": "2017-12-27 11:51:01", "latest_commit": "2018-09-11 12:28:06", "languages": [ @@ -2095,18 +2067,18 @@ "model_or_dataset": "dataset" }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1", - "project_name": "Llama-3-Swallow-8B-Instruct-v0.1", - "downloads": 14295, + "description": "Japanese Stable LM Base Gamma 7B Model Description", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-gamma-7b", + "project_name": "japanese-stablelm-base-gamma-7b", + "downloads": 15825, "source": "Hugging Face", - "score": 0.05880276534063415, - "first_commit": "2024-06-26 04:11:25", - "latest_commit": "2024-07-06 15:02:39", + "score": 0.05802310973330621, + "first_commit": "2023-10-16 08:15:14", + "latest_commit": "2024-01-25 08:05:12", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { "description": "自然言語で書かれた時間情報表現を抽出/規格化するルールベースの解析器", @@ -2114,7 +2086,7 @@ "project_name": "ja-timex", "stargazers_count": 132, "source": "GitHub", - "score": 0.05371046367208317, + "score": 0.05493729196568531, "first_commit": "2021-07-18 20:38:04", "latest_commit": "2023-11-04 14:58:31", "languages": [ @@ -2122,27 +2094,13 @@ ], "model_or_dataset": null }, - { - "description": "BERT base Japanese (unidic-lite with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v2", - "project_name": "bert-base-japanese-v2", - "downloads": 13759, - "source": "Hugging Face", - "score": 0.0531185118531592, - "first_commit": "2021-03-05 03:37:30", - "latest_commit": "2021-09-23 15:45:31", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" - }, { "description": "A JSON kanji dataset with updated JLPT levels and WaniKani information", "url": "https://github.com/davidluzgouveia/kanji-data", "project_name": "kanji-data", "stargazers_count": 130, "source": "GitHub", - "score": 0.04781773467296592, + "score": 0.04902118700639846, "first_commit": "2019-02-28 05:53:36", "latest_commit": "2019-12-29 12:12:54", "languages": [ @@ -2157,19 +2115,61 @@ "project_name": "awesome-bert-japanese", "stargazers_count": 130, "source": "GitHub", - "score": 0.04781773467296592, + "score": 0.04902118700639846, "first_commit": "2020-07-27 00:10:39", "latest_commit": "2023-03-15 18:51:20", "languages": [], "model_or_dataset": null }, + { + "description": "このドキュメントの日本語版はまだ作成中です。", + "url": "https://huggingface.co./bclavie/JaColBERT", + "project_name": "JaColBERT", + "downloads": 14308, + "source": "Hugging Face", + "score": 0.043280854432319237, + "first_commit": "2023-12-25 22:43:54", + "latest_commit": "2024-01-27 15:30:00", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111, + "model_architectures": "HF_ColBERT" + }, + { + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese", + "project_name": "t5-base-japanese", + "downloads": 14299, + "source": "Hugging Face", + "score": 0.043193392139828216, + "first_commit": "2021-03-28 10:54:32", + "latest_commit": "2022-07-31 08:20:41", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "T5Model" + }, + { + "description": "reazonspeech-nemo-v2 reazonspeech-nemo-v2 is an automatic speech recognition model trained on ReazonSpeech v2.0 corpus.", + "url": "https://huggingface.co./reazon-research/reazonspeech-nemo-v2", + "project_name": "reazonspeech-nemo-v2", + "downloads": 14003, + "source": "Hugging Face", + "score": 0.04031685452012344, + "first_commit": "2024-01-30 01:49:12", + "latest_commit": "2024-02-14 01:32:45", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": null + }, { "description": "Wikipediaを用いた日本語の固有表現抽出データセット", "url": "https://github.com/stockmarkteam/ner-wikipedia-dataset", "project_name": "ner-wikipedia-dataset", "stargazers_count": 127, "source": "GitHub", - "score": 0.038978641174290056, + "score": 0.04014702956746819, "first_commit": "2020-12-15 01:52:19", "latest_commit": "2023-09-02 23:44:38", "languages": [ @@ -2183,7 +2183,7 @@ "project_name": "PAX_SAPIENTICA", "stargazers_count": 127, "source": "GitHub", - "score": 0.038978641174290056, + "score": 0.04014702956746819, "first_commit": "2022-06-24 23:18:02", "latest_commit": "2024-08-13 01:06:14", "languages": [ @@ -2193,13 +2193,27 @@ ], "model_or_dataset": "dataset" }, + { + "description": "LINE DistilBERT Japanese This is a DistilBERT model pre-trained on 131 GB of Japanese web text.", + "url": "https://huggingface.co./line-corporation/line-distilbert-base-japanese", + "project_name": "line-distilbert-base-japanese", + "downloads": 13778, + "source": "Hugging Face", + "score": 0.03813029720784785, + "first_commit": "2023-03-10 10:23:54", + "latest_commit": "2023-12-01 09:50:34", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DistilBertForMaskedLM" + }, { "description": "Node.js module for converting Japanese Hiragana and Katakana script to, and from, Romaji using Hepburn romanisation", "url": "https://github.com/lovell/hepburn", "project_name": "hepburn", "stargazers_count": 126, "source": "GitHub", - "score": 0.03603227667473144, + "score": 0.037188977087824766, "first_commit": "2013-06-28 03:06:52", "latest_commit": "2023-09-08 09:11:19", "languages": [ @@ -2213,7 +2227,7 @@ "project_name": "kanji-frequency", "stargazers_count": 123, "source": "GitHub", - "score": 0.027193183176055572, + "score": 0.028314819648894502, "first_commit": "2016-01-23 01:19:08", "latest_commit": "2024-07-13 03:51:19", "languages": [ @@ -2222,13 +2236,27 @@ ], "model_or_dataset": "dataset" }, + { + "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of May 31, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", + "url": "https://huggingface.co./sazyou-roukaku/BracingEvoMix", + "project_name": "BracingEvoMix", + "downloads": 12672, + "source": "Hugging Face", + "score": 0.02738215326395095, + "first_commit": "2023-05-31 10:29:16", + "latest_commit": "2023-10-01 08:58:54", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": null + }, { "description": "deep-learning-with-pytorchの日本語版repositoryです。", "url": "https://github.com/Gin5050/deep-learning-with-pytorch-ja", "project_name": "deep-learning-with-pytorch-ja", "stargazers_count": 121, "source": "GitHub", - "score": 0.02130045417693833, + "score": 0.022398714689607656, "first_commit": "2020-12-05 15:15:16", "latest_commit": "2021-05-13 11:02:11", "languages": [ @@ -2239,13 +2267,27 @@ ], "model_or_dataset": null }, + { + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1", + "project_name": "Llama-3-Swallow-8B-Instruct-v0.1", + "downloads": 12045, + "source": "Hugging Face", + "score": 0.021288946887076308, + "first_commit": "2024-06-26 04:11:25", + "latest_commit": "2024-07-06 15:02:39", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" + }, { "description": "A unified language analyzer for Japanese", "url": "https://github.com/ku-nlp/kwja", "project_name": "kwja", "stargazers_count": 119, "source": "GitHub", - "score": 0.015407725177821087, + "score": 0.01648260973032081, "first_commit": "2022-05-25 16:09:37", "latest_commit": "2024-08-06 23:13:59", "languages": [ @@ -2259,7 +2301,7 @@ "project_name": "jamdict", "stargazers_count": 117, "source": "GitHub", - "score": 0.009514996178703845, + "score": 0.010566504771033963, "first_commit": "2016-10-25 10:47:58", "latest_commit": "2021-06-06 12:04:03", "languages": [ @@ -2268,17 +2310,17 @@ "model_or_dataset": "dataset" }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-v0.1", - "project_name": "Llama-3-Swallow-8B-v0.1", - "downloads": 9617, + "description": "Tanuki-8B-dpo-v1.0 モデルについて Tanuki-8Bは、フルスクラッチで約1.3Tトークン事前学習を行った約8Bパラメータの大規模言語モデルです。", + "url": "https://huggingface.co./weblab-GENIAC/Tanuki-8B-dpo-v1.0", + "project_name": "Tanuki-8B-dpo-v1.0", + "downloads": 10915, "source": "Hugging Face", - "score": 0.009192806731664376, - "first_commit": "2024-05-20 06:36:00", - "latest_commit": "2024-07-01 06:24:48", + "score": 0.010307570163203347, + "first_commit": "2024-08-12 12:47:52", + "latest_commit": "2024-09-02 23:47:02", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 7.51, "model_architectures": "LlamaForCausalLM" }, { @@ -2287,33 +2329,19 @@ "project_name": "dataset-list", "stargazers_count": 116, "source": "GitHub", - "score": 0.006568631679145223, + "score": 0.007608452291390541, "first_commit": "2015-11-21 22:38:56", "latest_commit": "2024-07-26 00:40:50", "languages": [], "model_or_dataset": null }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-v0.1", - "project_name": "Swallow-7b-instruct-v0.1", - "downloads": 9260, - "source": "Hugging Face", - "score": 0.005406839390193189, - "first_commit": "2024-03-04 08:46:03", - "latest_commit": "2024-07-06 15:18:14", - "languages": [], - "model_or_dataset": "model", - "model_size": 6.83, - "model_architectures": "LlamaForCausalLM" - }, { "description": "A comparison tool of Japanese tokenizers", "url": "https://github.com/taishi-i/toiro", "project_name": "toiro", "stargazers_count": 115, "source": "GitHub", - "score": 0.0036222671795866014, + "score": 0.004650399811747119, "first_commit": "2020-08-13 19:49:15", "latest_commit": "2023-07-31 23:55:55", "languages": [ @@ -2328,7 +2356,7 @@ "project_name": "ja_text_bert", "stargazers_count": 115, "source": "GitHub", - "score": 0.0036222671795866014, + "score": 0.004650399811747119, "first_commit": "2018-10-31 18:23:04", "latest_commit": "2018-11-08 15:17:20", "languages": [ @@ -2343,7 +2371,7 @@ "project_name": "GPTSAN", "stargazers_count": 115, "source": "GitHub", - "score": 0.0036222671795866014, + "score": 0.004650399811747119, "first_commit": "2022-02-11 14:38:55", "latest_commit": "2023-09-13 12:20:29", "languages": [ @@ -2352,46 +2380,18 @@ "model_or_dataset": "model" }, { - "description": "reazonspeech-nemo-v2 reazonspeech-nemo-v2 is an automatic speech recognition model trained on ReazonSpeech v2.0 corpus.", - "url": "https://huggingface.co./reazon-research/reazonspeech-nemo-v2", - "project_name": "reazonspeech-nemo-v2", - "downloads": 8921, - "source": "Hugging Face", - "score": 0.0018117611583760118, - "first_commit": "2024-01-30 01:49:12", - "latest_commit": "2024-02-14 01:32:45", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": null - }, - { - "description": "BERT large Japanese (unidic-lite with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese", - "project_name": "bert-large-japanese", - "downloads": 8602, - "source": "Hugging Face", - "score": -0.0015712180627144882, - "first_commit": "2021-03-05 06:17:13", - "latest_commit": "2021-09-23 15:45:41", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" - }, - { - "description": "luke-japanese-large luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-large", - "project_name": "luke-japanese-large", - "downloads": 8361, + "description": "Model Card for Japanese character-level DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese-char-wwm", + "project_name": "deberta-v2-large-japanese-char-wwm", + "downloads": 10018, "source": "Hugging Face", - "score": -0.004127011141970947, - "first_commit": "2022-11-07 14:25:53", - "latest_commit": "2022-11-09 11:18:56", + "score": 0.0015904950115979966, + "first_commit": "2023-03-09 10:13:05", + "latest_commit": "2023-09-15 03:48:28", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LukeForMaskedLM" + "model_size": 0.33, + "model_architectures": "DebertaV2ForMaskedLM" }, { "description": "複数の前処理を構成して管理するテキスト前処理ツール", @@ -2399,7 +2399,7 @@ "project_name": "HojiChar", "stargazers_count": 112, "source": "GitHub", - "score": -0.005216826319089263, + "score": -0.00422375762718315, "first_commit": "2023-01-31 20:37:40", "latest_commit": "2024-08-15 17:25:48", "languages": [ @@ -2413,7 +2413,7 @@ "project_name": "pymlask", "stargazers_count": 112, "source": "GitHub", - "score": -0.005216826319089263, + "score": -0.00422375762718315, "first_commit": "2017-02-10 21:33:23", "latest_commit": "2024-07-26 01:27:14", "languages": [ @@ -2422,40 +2422,40 @@ "model_or_dataset": null }, { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese", - "project_name": "t5-base-japanese", - "downloads": 8230, + "description": "LaBSE Model description Language-agnostic BERT Sentence Encoder (LaBSE) is a BERT-based model trained for sentence embedding for 109 languages.", + "url": "https://huggingface.co./setu4993/LaBSE", + "project_name": "LaBSE", + "downloads": 9271, "source": "Hugging Face", - "score": -0.0055162596622306825, - "first_commit": "2021-03-28 10:54:32", - "latest_commit": "2022-07-31 08:20:41", + "score": -0.005668875265156961, + "first_commit": "2021-01-11 06:06:51", + "latest_commit": "2023-10-18 23:23:16", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5Model" + "model_size": 0.47100000000000003, + "model_architectures": "BertModel" }, { - "description": "Fugaku-LLM-13B-instruct-gguf Fugaku-LLMさんが公開しているFugaku-LLM-13B-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Fugaku-LLM-13B-instruct-gguf", - "project_name": "Fugaku-LLM-13B-instruct-gguf", - "downloads": 7807, + "description": "transformers-ud-japanese-electra-ginza-510 (sudachitra-wordpiece, mC4 Japanese)", + "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-510", + "project_name": "transformers-ud-japanese-electra-base-ginza-510", + "downloads": 8998, "source": "Hugging Face", - "score": -0.010002153739099904, - "first_commit": "2024-05-10 16:43:49", - "latest_commit": "2024-05-12 06:06:51", + "score": -0.008321898137384676, + "first_commit": "2021-12-05 11:31:57", + "latest_commit": "2021-12-05 21:12:12", "languages": [], "model_or_dataset": "model", - "model_size": 13.4, - "model_architectures": null + "model_size": null, + "model_architectures": "ElectraModel" }, { "description": "Model Card for Japanese DeBERTa V3 base Model description This is a Japanese DeBERTa V3 base model pre-trained on LLM-jp corpus v1.0.", "url": "https://huggingface.co./ku-nlp/deberta-v3-base-japanese", "project_name": "deberta-v3-base-japanese", - "downloads": 7707, + "downloads": 8878, "source": "Hugging Face", - "score": -0.011062648792733288, + "score": -0.00948806203726499, "first_commit": "2024-04-23 05:08:21", "latest_commit": "2024-04-28 06:08:55", "languages": [], @@ -2463,27 +2463,13 @@ "model_size": null, "model_architectures": null }, - { - "description": "Tanuki-8B-dpo-v1.0 モデルについて Tanuki-8Bは、フルスクラッチで約1.3Tトークン事前学習を行った約8Bパラメータの大規模言語モデルです。", - "url": "https://huggingface.co./weblab-GENIAC/Tanuki-8B-dpo-v1.0", - "project_name": "Tanuki-8B-dpo-v1.0", - "downloads": 7643, - "source": "Hugging Face", - "score": -0.011741365627058655, - "first_commit": "2024-08-12 12:47:52", - "latest_commit": "2024-09-02 23:47:02", - "languages": [], - "model_or_dataset": "model", - "model_size": 7.51, - "model_architectures": "LlamaForCausalLM" - }, { "description": "Swallow-MX-8x7b-NVE-v0.1 Our Swallow-MX-8x7b-NVE-v0.1 model has undergone continuous pre-training from the Mixtral-8x7B-Instruct-v0.1, primarily with the addition of Japanese language data.", "url": "https://huggingface.co./tokyotech-llm/Swallow-MX-8x7b-NVE-v0.1", "project_name": "Swallow-MX-8x7b-NVE-v0.1", - "downloads": 7440, + "downloads": 8531, "source": "Hugging Face", - "score": -0.013894170585934429, + "score": -0.0128602193144189, "first_commit": "2024-02-22 04:44:42", "latest_commit": "2024-05-03 18:51:12", "languages": [], @@ -2497,7 +2483,7 @@ "project_name": "JLM", "stargazers_count": 109, "source": "GitHub", - "score": -0.014055919817765127, + "score": -0.013097915066113418, "first_commit": "2018-01-10 14:12:41", "latest_commit": "2019-06-04 21:35:33", "languages": [ @@ -2511,7 +2497,7 @@ "project_name": "t5-japanese", "stargazers_count": 109, "source": "GitHub", - "score": -0.014055919817765127, + "score": -0.013097915066113418, "first_commit": "2021-03-28 22:12:19", "latest_commit": "2023-07-20 21:36:10", "languages": [ @@ -2525,7 +2511,7 @@ "project_name": "tdmelodic", "stargazers_count": 109, "source": "GitHub", - "score": -0.014055919817765127, + "score": -0.013097915066113418, "first_commit": "2020-09-14 18:12:46", "latest_commit": "2024-03-22 01:44:10", "languages": [ @@ -2534,18 +2520,18 @@ "model_or_dataset": "dataset" }, { - "description": "CyberAgentLM2-7B (CALM2-7B)", - "url": "https://huggingface.co./cyberagent/calm2-7b", - "project_name": "calm2-7b", - "downloads": 7424, + "description": "luke-japanese-large luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-large", + "project_name": "luke-japanese-large", + "downloads": 8438, "source": "Hugging Face", - "score": -0.01406384979451577, - "first_commit": "2023-11-01 07:24:59", - "latest_commit": "2023-11-02 05:46:18", + "score": -0.013763996336826144, + "first_commit": "2022-11-07 14:25:53", + "latest_commit": "2022-11-09 11:18:56", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "LukeForMaskedLM" }, { "description": "Yet Another Japanese Dependency Structure Analyzer", @@ -2553,7 +2539,7 @@ "project_name": "cabocha", "stargazers_count": 108, "source": "GitHub", - "score": -0.017002284317323747, + "score": -0.01605596754575684, "first_commit": "2011-07-29 04:08:14", "latest_commit": "2020-07-24 11:31:47", "languages": [ @@ -2572,7 +2558,7 @@ "project_name": "ja.text8", "stargazers_count": 108, "source": "GitHub", - "score": -0.017002284317323747, + "score": -0.01605596754575684, "first_commit": "2017-10-04 13:15:25", "latest_commit": "2017-10-04 13:38:23", "languages": [ @@ -2581,18 +2567,32 @@ "model_or_dataset": "dataset" }, { - "description": "Model Card for Japanese character-level DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese-char-wwm", - "project_name": "deberta-v2-large-japanese-char-wwm", - "downloads": 6760, + "description": "BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-v2", + "project_name": "bert-large-japanese-v2", + "downloads": 7754, "source": "Hugging Face", - "score": -0.021105536950641452, - "first_commit": "2023-03-09 10:13:05", - "latest_commit": "2023-09-15 03:48:28", + "score": -0.020411130566143937, + "first_commit": "2023-05-19 00:40:35", + "latest_commit": "2023-05-19 00:47:40", "languages": [], "model_or_dataset": "model", - "model_size": 0.33, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": null, + "model_architectures": "BertForPreTraining" + }, + { + "description": "Japanese Kana Kanji conversion input method library", + "url": "https://github.com/ueno/libkkc", + "project_name": "libkkc", + "stargazers_count": 106, + "source": "GitHub", + "score": -0.021972072505043686, + "first_commit": "2012-08-07 17:45:52", + "latest_commit": "2024-09-02 12:08:48", + "languages": [ + "Python" + ], + "model_or_dataset": "dataset" }, { "description": "JaQuAD: Japanese Question Answering Dataset for Machine Reading Comprehension (2022, Skelter Labs)", @@ -2600,7 +2600,7 @@ "project_name": "JaQuAD", "stargazers_count": 106, "source": "GitHub", - "score": -0.02289501331644099, + "score": -0.021972072505043686, "first_commit": "2022-01-11 16:58:07", "latest_commit": "2022-02-04 11:42:51", "languages": [ @@ -2608,27 +2608,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "japanese-sentiment-analysis This model was trained from scratch on the chABSA dataset.", - "url": "https://huggingface.co./jarvisx17/japanese-sentiment-analysis", - "project_name": "japanese-sentiment-analysis", - "downloads": 6563, - "source": "Hugging Face", - "score": -0.02319471220629922, - "first_commit": "2022-11-15 06:28:39", - "latest_commit": "2024-01-20 14:45:14", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" - }, { "description": "Yet another mecab wrapper for nodejs", "url": "https://github.com/golbin/node-mecab-ya", "project_name": "node-mecab-ya", "stargazers_count": 105, "source": "GitHub", - "score": -0.025841377815999614, + "score": -0.02493012498468711, "first_commit": "2016-04-09 23:34:34", "latest_commit": "2021-06-18 18:30:43", "languages": [ @@ -2637,46 +2623,32 @@ "model_or_dataset": null }, { - "description": "BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-v2", - "project_name": "bert-large-japanese-v2", - "downloads": 6186, - "source": "Hugging Face", - "score": -0.027192778558497083, - "first_commit": "2023-05-19 00:40:35", - "latest_commit": "2023-05-19 00:47:40", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForPreTraining" - }, - { - "description": "japanese-roberta-base This repository provides a base-sized Japanese RoBERTa model.", - "url": "https://huggingface.co./rinna/japanese-roberta-base", - "project_name": "japanese-roberta-base", - "downloads": 5861, + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-v0.1", + "project_name": "Llama-3-Swallow-8B-v0.1", + "downloads": 6842, "source": "Hugging Face", - "score": -0.030639387482805588, - "first_commit": "2021-06-11 02:56:39", - "latest_commit": "2024-07-20 07:44:40", + "score": -0.029273976205234326, + "first_commit": "2024-05-20 06:36:00", + "latest_commit": "2024-07-01 06:24:48", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "RobertaForMaskedLM" + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "transformers-ud-japanese-electra-ginza-510 (sudachitra-wordpiece, mC4 Japanese)", - "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-510", - "project_name": "transformers-ud-japanese-electra-base-ginza-510", - "downloads": 5810, + "description": "CyberAgentLM2-7B (CALM2-7B)", + "url": "https://huggingface.co./cyberagent/calm2-7b", + "project_name": "calm2-7b", + "downloads": 6597, "source": "Hugging Face", - "score": -0.031180239960158614, - "first_commit": "2021-12-05 11:31:57", - "latest_commit": "2021-12-05 21:12:12", + "score": -0.03165489416748997, + "first_commit": "2023-11-01 07:24:59", + "latest_commit": "2023-11-02 05:46:18", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraModel" + "model_architectures": "LlamaForCausalLM" }, { "description": "An example usage of JParaCrawl pre-trained Neural Machine Translation (NMT) models.", @@ -2684,7 +2656,7 @@ "project_name": "jparacrawl-finetune", "stargazers_count": 102, "source": "GitHub", - "score": -0.03468047131467548, + "score": -0.033804282423617375, "first_commit": "2019-11-17 14:46:58", "latest_commit": "2021-04-29 14:27:00", "languages": [ @@ -2698,7 +2670,7 @@ "project_name": "Kamite", "stargazers_count": 102, "source": "GitHub", - "score": -0.03468047131467548, + "score": -0.033804282423617375, "first_commit": "2022-07-08 19:26:15", "latest_commit": "2024-02-22 23:41:52", "languages": [ @@ -2717,7 +2689,7 @@ "project_name": "house-of-councillors", "stargazers_count": 102, "source": "GitHub", - "score": -0.03468047131467548, + "score": -0.033804282423617375, "first_commit": "2022-06-16 16:06:39", "latest_commit": "2024-08-15 14:22:06", "languages": [ @@ -2727,46 +2699,32 @@ "model_or_dataset": "dataset" }, { - "description": "【告知】chilled_remix及びreversemixは2023年5月21日にVersion変更を行い、v2へ移行いたしました。", - "url": "https://huggingface.co./sazyou-roukaku/chilled_remix", - "project_name": "chilled_remix", - "downloads": 5470, - "source": "Hugging Face", - "score": -0.034785923142512123, - "first_commit": "2023-04-18 12:48:48", - "latest_commit": "2023-06-09 23:08:31", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": null - }, - { - "description": "OpenCALM-7B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-7b", - "project_name": "open-calm-7b", - "downloads": 5355, + "description": "japanese-sentiment-analysis This model was trained from scratch on the chABSA dataset.", + "url": "https://huggingface.co./jarvisx17/japanese-sentiment-analysis", + "project_name": "japanese-sentiment-analysis", + "downloads": 6303, "source": "Hugging Face", - "score": -0.03600549245419052, - "first_commit": "2023-05-15 07:53:34", - "latest_commit": "2023-05-18 01:12:08", + "score": -0.03451199572219674, + "first_commit": "2022-11-15 06:28:39", + "latest_commit": "2024-01-20 14:45:14", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" }, { - "description": "japanese-gpt2-medium This repository provides a medium-sized Japanese GPT-2 model.", - "url": "https://huggingface.co./rinna/japanese-gpt2-medium", - "project_name": "japanese-gpt2-medium", - "downloads": 5237, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-hf", + "project_name": "Swallow-7b-hf", + "downloads": 6168, "source": "Hugging Face", - "score": -0.03725687661747791, - "first_commit": "2021-04-05 02:01:26", - "latest_commit": "2024-07-20 07:50:47", + "score": -0.03582393010956209, + "first_commit": "2023-11-25 10:09:49", + "latest_commit": "2024-06-29 08:56:17", "languages": [], "model_or_dataset": "model", - "model_size": 0.361, - "model_architectures": "GPT2LMHeadModel" + "model_size": 6.83, + "model_architectures": "LlamaForCausalLM" }, { "description": "CJK computer science terms comparison / 中日韓電腦科學術語對照 / 日中韓のコンピュータ科学の用語対照 / 한·중·일 전산학 용어 대조", @@ -2774,7 +2732,7 @@ "project_name": "cjk-compsci-terms", "stargazers_count": 101, "source": "GitHub", - "score": -0.0376268358142341, + "score": -0.0367623349032608, "first_commit": "2020-12-19 06:11:14", "latest_commit": "2022-09-16 01:27:59", "languages": [ @@ -2789,7 +2747,7 @@ "project_name": "JapaneseWordSimilarityDataset", "stargazers_count": 101, "source": "GitHub", - "score": -0.0376268358142341, + "score": -0.0367623349032608, "first_commit": "2016-01-08 16:25:16", "latest_commit": "2021-12-07 12:22:07", "languages": [ @@ -2803,81 +2761,39 @@ "project_name": "japanese-pitch-accent-resources", "stargazers_count": 101, "source": "GitHub", - "score": -0.0376268358142341, + "score": -0.0367623349032608, "first_commit": "2018-03-03 15:51:09", "latest_commit": "2024-02-11 00:55:27", "languages": [], "model_or_dataset": null }, { - "description": "bert-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-upos", - "project_name": "bert-large-japanese-upos", - "downloads": 4786, - "source": "Hugging Face", - "score": -0.04203970930936448, - "first_commit": "2021-08-19 10:39:38", - "latest_commit": "2022-09-18 19:43:53", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForTokenClassification" - }, - { - "description": "DeBERTa V2 small Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", - "url": "https://huggingface.co./izumi-lab/deberta-v2-small-japanese", - "project_name": "deberta-v2-small-japanese", - "downloads": 4729, + "description": "japanese-gpt2-medium This repository provides a medium-sized Japanese GPT-2 model.", + "url": "https://huggingface.co./rinna/japanese-gpt2-medium", + "project_name": "japanese-gpt2-medium", + "downloads": 5711, "source": "Hugging Face", - "score": -0.042644191489935514, - "first_commit": "2023-10-21 13:24:28", - "latest_commit": "2024-07-19 03:08:14", + "score": -0.04026507096160629, + "first_commit": "2021-04-05 02:01:26", + "latest_commit": "2024-07-20 07:50:47", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": 0.361, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "Llama-3.1-70B-Japanese-Instruct-2407-gguf cyberagentさんが公開しているLlama-3.1-70B-Japanese-Instruct-2407のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Japanese-Instruct-2407-gguf", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-gguf", - "downloads": 4668, + "description": "japanese-roberta-base This repository provides a base-sized Japanese RoBERTa model.", + "url": "https://huggingface.co./rinna/japanese-roberta-base", + "project_name": "japanese-roberta-base", + "downloads": 5203, "source": "Hugging Face", - "score": -0.04329109347265188, - "first_commit": "2024-07-26 09:05:34", - "latest_commit": "2024-07-27 05:59:10", + "score": -0.04520183147109962, + "first_commit": "2021-06-11 02:56:39", + "latest_commit": "2024-07-20 07:44:40", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": null - }, - { - "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF Original Model elyza/ELYZA-japanese-Llama-2-13b-fast-instruct Run with LlamaEdge LlamaEdge version: v0.2.8 and above Prompt template Prompt type: llama-2-chat Prompt string <s>[INST] <<SYS>> {{ system_prompt }} <</SYS>> {{ user_msg_1 }}", - "url": "https://huggingface.co./second-state/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "downloads": 4594, - "source": "Hugging Face", - "score": -0.04407585981234058, - "first_commit": "2024-01-06 03:33:53", - "latest_commit": "2024-03-20 07:21:25", - "languages": [], - "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-hf", - "project_name": "Swallow-13b-instruct-hf", - "downloads": 4403, - "source": "Hugging Face", - "score": -0.04610140536478035, - "first_commit": "2023-12-07 03:10:55", - "latest_commit": "2024-06-29 08:56:29", - "languages": [], - "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.111, + "model_architectures": "RobertaForMaskedLM" }, { "description": "Asynchronous japanese morphological analyser using MeCab.", @@ -2885,7 +2801,7 @@ "project_name": "node-mecab-async", "stargazers_count": 98, "source": "GitHub", - "score": -0.046465929312909965, + "score": -0.04563649234219107, "first_commit": "2012-10-29 09:32:49", "latest_commit": "2017-10-29 14:56:11", "languages": [ @@ -2899,7 +2815,7 @@ "project_name": "jiten", "stargazers_count": 98, "source": "GitHub", - "score": -0.046465929312909965, + "score": -0.04563649234219107, "first_commit": "2020-06-15 02:09:21", "latest_commit": "2023-12-16 23:43:06", "languages": [ @@ -2910,54 +2826,26 @@ "model_or_dataset": "dataset" }, { - "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b", - "project_name": "ELYZA-japanese-Llama-2-7b", - "downloads": 4303, - "source": "Hugging Face", - "score": -0.047161900418413734, - "first_commit": "2023-08-28 12:38:34", - "latest_commit": "2023-08-29 03:45:51", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-hf", - "project_name": "Swallow-7b-hf", - "downloads": 4298, - "source": "Hugging Face", - "score": -0.04721492517109541, - "first_commit": "2023-11-25 10:09:49", - "latest_commit": "2024-06-29 08:56:17", - "languages": [], - "model_or_dataset": "model", - "model_size": 6.83, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-instruct", - "project_name": "ELYZA-japanese-Llama-2-13b-instruct", - "downloads": 4186, + "description": "DeBERTa V2 small Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", + "url": "https://huggingface.co./izumi-lab/deberta-v2-small-japanese", + "project_name": "deberta-v2-small-japanese", + "downloads": 4838, "source": "Hugging Face", - "score": -0.0484026796311648, - "first_commit": "2023-12-25 16:10:32", - "latest_commit": "2023-12-27 01:41:15", + "score": -0.048748913333235576, + "first_commit": "2023-10-21 13:24:28", + "latest_commit": "2024-07-19 03:08:14", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "DebertaV2ForMaskedLM" }, { "description": "Llama 3 Youko 8B (rinna/llama-3-youko-8b)", "url": "https://huggingface.co./rinna/llama-3-youko-8b", "project_name": "llama-3-youko-8b", - "downloads": 4173, + "downloads": 4591, "source": "Hugging Face", - "score": -0.04854054398813713, + "score": -0.05114926736048922, "first_commit": "2024-05-01 07:53:46", "latest_commit": "2024-07-25 05:14:42", "languages": [], @@ -2969,9 +2857,9 @@ "description": "RetrievaBERT Model The RetrievaBERT is the pre-trained Transformer Encoder using Megatron-LM.", "url": "https://huggingface.co./retrieva-jp/bert-1.3b", "project_name": "bert-1.3b", - "downloads": 4112, + "downloads": 4524, "source": "Hugging Face", - "score": -0.0491874459708535, + "score": -0.0518003755379224, "first_commit": "2024-06-25 06:18:24", "latest_commit": "2024-07-09 05:36:08", "languages": [], @@ -2980,124 +2868,68 @@ "model_architectures": "RetrievaBertForMaskedLM" }, { - "description": "japanese-gpt-neox-3.6b Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b", - "project_name": "japanese-gpt-neox-3.6b", - "downloads": 4080, - "source": "Hugging Face", - "score": -0.04952680438801618, - "first_commit": "2023-05-17 02:16:45", - "latest_commit": "2024-07-20 07:55:19", - "languages": [], - "model_or_dataset": "model", - "model_size": 3.76, - "model_architectures": "GPTNeoXForCausalLM" - }, - { - "description": "Llama-3-ELYZA-JP-8B-GGUF Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-GGUF", - "project_name": "Llama-3-ELYZA-JP-8B-GGUF", - "downloads": 3987, - "source": "Hugging Face", - "score": -0.05051306478789523, - "first_commit": "2024-06-25 07:29:22", - "latest_commit": "2024-06-26 02:56:52", - "languages": [], - "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null - }, - { - "description": "このモデルはLuke-japanese-large-liteをファインチューニングしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-large-sentiment-analysis-wrime", - "project_name": "luke-japanese-large-sentiment-analysis-wrime", - "downloads": 3906, - "source": "Hugging Face", - "score": -0.05137206578133827, - "first_commit": "2023-03-13 12:40:08", - "latest_commit": "2023-05-15 12:58:08", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.41400000000000003, - "model_architectures": "LukeForSequenceClassification" - }, - { - "description": "japanese-gpt2-xsmall", - "url": "https://huggingface.co./rinna/japanese-gpt2-xsmall", - "project_name": "japanese-gpt2-xsmall", - "downloads": 3856, + "description": "Kotoba-Whisper-v1.1 Kotoba-Whisper-v1.1 is a Japanese ASR model based on kotoba-tech/kotoba-whisper-v1.0, with additional postprocessing stacks integrated as pipeline.", + "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1", + "project_name": "kotoba-whisper-v1.1", + "downloads": 4304, "source": "Hugging Face", - "score": -0.051902313308154965, - "first_commit": "2021-07-26 02:52:54", - "latest_commit": "2024-07-20 07:48:11", + "score": -0.05393834268770298, + "first_commit": "2024-04-29 14:53:48", + "latest_commit": "2024-05-08 15:34:40", "languages": [], "model_or_dataset": "model", - "model_size": 0.0437, - "model_architectures": "GPT2LMHeadModel" + "model_size": 0.756, + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "rinna/youri-7b Overview We conduct continual pre-training of llama2-7b on 40B tokens from a mixture of Japanese and English datasets.", - "url": "https://huggingface.co./rinna/youri-7b", - "project_name": "youri-7b", - "downloads": 3837, + "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-base-lite", + "project_name": "luke-japanese-base-lite", + "downloads": 4188, "source": "Hugging Face", - "score": -0.05210380736834531, - "first_commit": "2023-10-30 15:12:17", - "latest_commit": "2024-07-22 08:01:22", + "score": -0.055065634457587276, + "first_commit": "2022-10-25 09:27:16", + "latest_commit": "2022-11-09 15:22:22", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "LukeForMaskedLM" }, { - "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with LlamaEdge LlamaEdge version: v0.10.1 and above Prompt template Prompt type: llama-3-chat Prompt string <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|><|start_header", - "url": "https://huggingface.co./second-state/Llama-3-8B-Japanese-Instruct-GGUF", - "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", - "downloads": 3474, + "description": "Fugaku-LLM-13B-instruct-gguf Fugaku-LLMさんが公開しているFugaku-LLM-13B-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Fugaku-LLM-13B-instruct-gguf", + "project_name": "Fugaku-LLM-13B-instruct-gguf", + "downloads": 4160, "source": "Hugging Face", - "score": -0.0559534044130345, - "first_commit": "2024-05-14 05:37:53", - "latest_commit": "2024-05-14 06:42:38", + "score": -0.055337739367559355, + "first_commit": "2024-05-10 16:43:49", + "latest_commit": "2024-05-12 06:06:51", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 13.4, "model_architectures": null }, { - "description": "OpenCALM-Small Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-small", - "project_name": "open-calm-small", - "downloads": 3330, + "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-base", + "project_name": "luke-japanese-base", + "downloads": 4156, "source": "Hugging Face", - "score": -0.057480517290266576, - "first_commit": "2023-05-15 06:40:15", - "latest_commit": "2023-05-18 01:10:33", + "score": -0.055376611497555364, + "first_commit": "2022-10-25 06:30:23", + "latest_commit": "2022-11-09 15:23:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" - }, - { - "description": "Kotoba-Whisper-v1.1 Kotoba-Whisper-v1.1 is a Japanese ASR model based on kotoba-tech/kotoba-whisper-v1.0, with additional postprocessing stacks integrated as pipeline.", - "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1", - "project_name": "kotoba-whisper-v1.1", - "downloads": 3286, - "source": "Hugging Face", - "score": -0.05794713511386527, - "first_commit": "2024-04-29 14:53:48", - "latest_commit": "2024-05-08 15:34:40", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.756, - "model_architectures": "WhisperForConditionalGeneration" + "model_architectures": "LukeForMaskedLM" }, { "description": "sbert-jsnli-luke-japanese-base-lite This is a sentence-transformers model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.", "url": "https://huggingface.co./oshizo/sbert-jsnli-luke-japanese-base-lite", "project_name": "sbert-jsnli-luke-japanese-base-lite", - "downloads": 3275, + "downloads": 4066, "source": "Hugging Face", - "score": -0.05806378956976494, + "score": -0.0562512344224656, "first_commit": "2023-01-10 11:53:15", "latest_commit": "2023-01-10 12:36:12", "languages": [], @@ -3105,13 +2937,27 @@ "model_size": null, "model_architectures": "LukeModel" }, + { + "description": "bert-base-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-upos", + "project_name": "bert-base-japanese-upos", + "downloads": 3958, + "source": "Hugging Face", + "score": -0.05730078193235788, + "first_commit": "2021-08-26 23:02:50", + "latest_commit": "2022-09-18 19:43:26", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForTokenClassification" + }, { "description": "Japanese Company Lexicon (JCLdic)", "url": "https://github.com/chakki-works/Japanese-Company-Lexicon", "project_name": "Japanese-Company-Lexicon", "stargazers_count": 94, "source": "GitHub", - "score": -0.058251387311144445, + "score": -0.05746870226076476, "first_commit": "2020-01-16 15:25:09", "latest_commit": "2023-01-21 14:50:18", "languages": [ @@ -3125,7 +2971,7 @@ "project_name": "genshin-dict", "stargazers_count": 94, "source": "GitHub", - "score": -0.058251387311144445, + "score": -0.05746870226076476, "first_commit": "2021-05-05 00:02:08", "latest_commit": "2024-07-15 16:47:41", "languages": [ @@ -3134,96 +2980,82 @@ "model_or_dataset": "dataset" }, { - "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoさんが公開しているjapanese-novel-gpt-j-6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/aibuncho-japanese-novel-gpt-j-6b-gguf", - "project_name": "aibuncho-japanese-novel-gpt-j-6b-gguf", - "downloads": 3243, + "description": "このモデルはLuke-japanese-large-liteをファインチューニングしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-large-sentiment-analysis-wrime", + "project_name": "luke-japanese-large-sentiment-analysis-wrime", + "downloads": 3915, "source": "Hugging Face", - "score": -0.05840314798692762, - "first_commit": "2023-09-03 17:32:44", - "latest_commit": "2023-09-11 01:10:36", + "score": -0.057718657329814994, + "first_commit": "2023-03-13 12:40:08", + "latest_commit": "2023-05-15 12:58:08", "languages": [], "model_or_dataset": "model", - "model_size": 6.05, - "model_architectures": null + "model_size": 0.41400000000000003, + "model_architectures": "LukeForSequenceClassification" }, { - "description": "Model Card for Model ID 実験モデルです /", - "url": "https://huggingface.co./mmnga/Llama-3-70B-japanese-suzume-vector-v0.1", - "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1", - "downloads": 3191, + "description": "clip-japanese-base This is a Japanese CLIP (Contrastive Language-Image Pre-training) model developed by LY Corporation.", + "url": "https://huggingface.co./line-corporation/clip-japanese-base", + "project_name": "clip-japanese-base", + "downloads": 3818, "source": "Hugging Face", - "score": -0.05895460541481698, - "first_commit": "2024-04-28 04:11:49", - "latest_commit": "2024-04-28 07:46:32", + "score": -0.05866130648221825, + "first_commit": "2024-04-24 01:36:22", + "latest_commit": "2024-05-10 03:07:04", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.197, + "model_architectures": "CLYPModel" }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-gamma-7b", - "project_name": "japanese-stablelm-instruct-gamma-7b", - "downloads": 3134, + "description": "OpenCALM-7B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-7b", + "project_name": "open-calm-7b", + "downloads": 3719, "source": "Hugging Face", - "score": -0.05955908759538801, - "first_commit": "2023-10-16 08:55:06", - "latest_commit": "2024-01-24 05:54:38", + "score": -0.059623391699619506, + "first_commit": "2023-05-15 07:53:34", + "latest_commit": "2023-05-18 01:12:08", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-Instruct-v0.1", - "project_name": "Llama-3-Swallow-70B-Instruct-v0.1", - "downloads": 3080, + "description": "japanese-gpt-neox-3.6b Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b", + "project_name": "japanese-gpt-neox-3.6b", + "downloads": 3707, "source": "Hugging Face", - "score": -0.06013175492435004, - "first_commit": "2024-06-28 16:17:32", - "latest_commit": "2024-07-19 08:08:59", + "score": -0.05974000808960754, + "first_commit": "2023-05-17 02:16:45", + "latest_commit": "2024-07-20 07:55:19", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": "LlamaForCausalLM" + "model_size": 3.76, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Japanese-StableLM-Base-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-7b", - "project_name": "japanese-stablelm-base-beta-7b", - "downloads": 3064, + "description": "rinna/youri-7b Overview We conduct continual pre-training of llama2-7b on 40B tokens from a mixture of Japanese and English datasets.", + "url": "https://huggingface.co./rinna/youri-7b", + "project_name": "youri-7b", + "downloads": 3706, "source": "Hugging Face", - "score": -0.06030143413293138, - "first_commit": "2023-10-30 07:43:36", - "latest_commit": "2023-12-19 06:43:01", + "score": -0.05974972612210654, + "first_commit": "2023-10-30 15:12:17", + "latest_commit": "2024-07-22 08:01:22", "languages": [], "model_or_dataset": "model", "model_size": 6.74, "model_architectures": "LlamaForCausalLM" }, - { - "description": "japanese-gpt-neox-3.6b-instruction-ppo Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-ppo", - "project_name": "japanese-gpt-neox-3.6b-instruction-ppo", - "downloads": 3002, - "source": "Hugging Face", - "score": -0.06095894106618408, - "first_commit": "2023-05-30 01:50:48", - "latest_commit": "2024-07-20 07:58:49", - "languages": [], - "model_or_dataset": "model", - "model_size": 3.76, - "model_architectures": "GPTNeoXForCausalLM" - }, { "description": "Japanese Morphological Analyzer written in Rust", "url": "https://github.com/togatoga/kanpyo", "project_name": "kanpyo", "stargazers_count": 93, "source": "GitHub", - "score": -0.06119775181070307, + "score": -0.060426754740408185, "first_commit": "2023-10-12 08:02:23", "latest_commit": "2024-08-20 08:56:37", "languages": [ @@ -3232,60 +3064,102 @@ "model_or_dataset": null }, { - "description": "stockmark/stockmark-13b Stockmark-13b is a 13 billion parameter LLM pretrained from scratch based on Japanese corpus of about 220B tokens.", - "url": "https://huggingface.co./stockmark/stockmark-13b", - "project_name": "stockmark-13b", - "downloads": 2863, + "description": "Llama-3.1-70B-Japanese-Instruct-2407-gguf cyberagentさんが公開しているLlama-3.1-70B-Japanese-Instruct-2407のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Japanese-Instruct-2407-gguf", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-gguf", + "downloads": 3629, "source": "Hugging Face", - "score": -0.062433029190734485, - "first_commit": "2023-10-21 06:53:06", - "latest_commit": "2024-05-17 06:15:56", + "score": -0.06049801462452974, + "first_commit": "2024-07-26 09:05:34", + "latest_commit": "2024-07-27 05:59:10", "languages": [], "model_or_dataset": "model", - "model_size": 13.2, + "model_size": 70.6, + "model_architectures": null + }, + { + "description": "Japanese-StableLM-Base-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-7b", + "project_name": "japanese-stablelm-base-beta-7b", + "downloads": 3507, + "source": "Hugging Face", + "score": -0.06168361458940806, + "first_commit": "2023-10-30 07:43:36", + "latest_commit": "2023-12-19 06:43:01", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.74, "model_architectures": "LlamaForCausalLM" }, { - "description": "Japanese SimCSE (BERT-base)", - "url": "https://huggingface.co./pkshatech/simcse-ja-bert-base-clcmlp", - "project_name": "simcse-ja-bert-base-clcmlp", - "downloads": 2850, + "description": "OpenCALM-Small Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-small", + "project_name": "open-calm-small", + "downloads": 3481, "source": "Hugging Face", - "score": -0.06257089354770683, - "first_commit": "2022-12-26 02:52:03", - "latest_commit": "2023-01-27 06:44:23", + "score": -0.06193628343438213, + "first_commit": "2023-05-15 06:40:15", + "latest_commit": "2023-05-18 01:10:33", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertModel" + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-v0.1", - "project_name": "Llama-3-Swallow-70B-v0.1", - "downloads": 2729, + "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", + "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-chat-v0.1", + "project_name": "karakuri-lm-70b-chat-v0.1", + "downloads": 3473, "source": "Hugging Face", - "score": -0.06385409256260323, - "first_commit": "2024-06-14 05:56:33", - "latest_commit": "2024-07-01 06:24:32", + "score": -0.062014027694374155, + "first_commit": "2024-01-26 09:08:09", + "latest_commit": "2024-05-07 09:00:17", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, + "model_size": 69.2, "model_architectures": "LlamaForCausalLM" }, { - "description": "clip-japanese-base This is a Japanese CLIP (Contrastive Language-Image Pre-training) model developed by LY Corporation.", - "url": "https://huggingface.co./line-corporation/clip-japanese-base", - "project_name": "clip-japanese-base", - "downloads": 2728, + "description": "【告知】chilled_remix及びreversemixは2023年5月21日にVersion変更を行い、v2へ移行いたしました。", + "url": "https://huggingface.co./sazyou-roukaku/chilled_remix", + "project_name": "chilled_remix", + "downloads": 3448, "source": "Hugging Face", - "score": -0.06386469751313956, - "first_commit": "2024-04-24 01:36:22", - "latest_commit": "2024-05-10 03:07:04", + "score": -0.06225697850684922, + "first_commit": "2023-04-18 12:48:48", + "latest_commit": "2023-06-09 23:08:31", "languages": [], "model_or_dataset": "model", - "model_size": 0.197, - "model_architectures": "CLYPModel" + "model_size": null, + "model_architectures": null + }, + { + "description": "rinna/japanese-wav2vec2-base Overview This is a Japanese wav2vec 2.0 Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-wav2vec2-base", + "project_name": "japanese-wav2vec2-base", + "downloads": 3412, + "source": "Hugging Face", + "score": -0.06260682767681332, + "first_commit": "2024-03-06 01:07:56", + "latest_commit": "2024-07-22 08:11:46", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.095, + "model_architectures": "Wav2Vec2ForPreTraining" + }, + { + "description": "japanese-gpt2-xsmall", + "url": "https://huggingface.co./rinna/japanese-gpt2-xsmall", + "project_name": "japanese-gpt2-xsmall", + "downloads": 3397, + "source": "Hugging Face", + "score": -0.06275259816429835, + "first_commit": "2021-07-26 02:52:54", + "latest_commit": "2024-07-20 07:48:11", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.0437, + "model_architectures": "GPT2LMHeadModel" }, { "description": "50k English-Japanese Parallel Corpus for Machine Translation Benchmark.", @@ -3293,94 +3167,94 @@ "project_name": "small_parallel_enja", "stargazers_count": 92, "source": "GitHub", - "score": -0.06414411631026169, + "score": -0.0633848072200516, "first_commit": "2016-10-27 12:27:03", "latest_commit": "2019-09-11 14:00:17", "languages": [], "model_or_dataset": "dataset" }, { - "description": "Llama-3.1-70B-Japanese-Instruct-2407 Model Description This is a Japanese continually pre-trained model based on meta-llama/Meta-Llama-3.1-70B-Instruct.", - "url": "https://huggingface.co./cyberagent/Llama-3.1-70B-Japanese-Instruct-2407", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407", - "downloads": 2661, + "description": "hotchpotch/japanese-reranker-cross-encoder-small-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-small-v1", + "project_name": "japanese-reranker-cross-encoder-small-v1", + "downloads": 3288, "source": "Hugging Face", - "score": -0.06457522919907392, - "first_commit": "2024-07-26 01:30:21", - "latest_commit": "2024-07-26 02:30:17", + "score": -0.06381186370668963, + "first_commit": "2024-03-28 04:31:45", + "latest_commit": "2024-04-01 02:39:19", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.11800000000000001, + "model_architectures": "XLMRobertaForSequenceClassification" }, { - "description": "rinna/japanese-wav2vec2-base Overview This is a Japanese wav2vec 2.0 Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-wav2vec2-base", - "project_name": "japanese-wav2vec2-base", - "downloads": 2633, + "description": "stockmark/stockmark-13b Stockmark-13b is a 13 billion parameter LLM pretrained from scratch based on Japanese corpus of about 220B tokens.", + "url": "https://huggingface.co./stockmark/stockmark-13b", + "project_name": "stockmark-13b", + "downloads": 3253, "source": "Hugging Face", - "score": -0.06487216781409127, - "first_commit": "2024-03-06 01:07:56", - "latest_commit": "2024-07-22 08:11:46", + "score": -0.06415199484415474, + "first_commit": "2023-10-21 06:53:06", + "latest_commit": "2024-05-17 06:15:56", "languages": [], "model_or_dataset": "model", - "model_size": 0.095, - "model_architectures": "Wav2Vec2ForPreTraining" + "model_size": 13.2, + "model_architectures": "LlamaForCausalLM" }, { - "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct", - "downloads": 2586, + "description": "japanese-gpt-neox-3.6b-instruction-ppo Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-ppo", + "project_name": "japanese-gpt-neox-3.6b-instruction-ppo", + "downloads": 3213, "source": "Hugging Face", - "score": -0.06537060048929896, - "first_commit": "2023-08-28 13:36:19", - "latest_commit": "2023-08-29 03:47:09", + "score": -0.06454071614411483, + "first_commit": "2023-05-30 01:50:48", + "latest_commit": "2024-07-20 07:58:49", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_size": 3.76, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "LLaVA-JP Model Card Model detail Model type: LLaVA-JP is a vision-language model that can converse about input images.", - "url": "https://huggingface.co./toshi456/llava-jp-1.3b-v1.0", - "project_name": "llava-jp-1.3b-v1.0", - "downloads": 2544, + "description": "bilingual-gpt-neox-4b Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", + "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b", + "project_name": "bilingual-gpt-neox-4b", + "downloads": 3207, "source": "Hugging Face", - "score": -0.06581600841182499, - "first_commit": "2023-12-04 13:13:03", - "latest_commit": "2023-12-18 10:21:11", + "score": -0.06459902433910884, + "first_commit": "2023-07-31 02:34:03", + "latest_commit": "2024-07-20 08:02:07", "languages": [], "model_or_dataset": "model", - "model_size": 1.73, - "model_architectures": "LlavaGpt2ForCausalLM" + "model_size": 3.95, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Llama-3.1-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-8B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-8B-Instruct-gguf", - "project_name": "Llama-3.1-8B-Instruct-gguf", - "downloads": 2504, + "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b", + "project_name": "ELYZA-japanese-Llama-2-7b", + "downloads": 3133, "source": "Hugging Face", - "score": -0.06624020643327834, - "first_commit": "2024-07-23 16:33:06", - "latest_commit": "2024-07-24 21:04:40", + "score": -0.06531815874403504, + "first_commit": "2023-08-28 12:38:34", + "latest_commit": "2023-08-29 03:45:51", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": null, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-hf", - "project_name": "Swallow-70b-instruct-hf", - "downloads": 2429, + "description": "[Llama-3-EZO model card]", + "url": "https://huggingface.co./AXCXEPT/Llama-3-EZO-8b-Common-it", + "project_name": "Llama-3-EZO-8b-Common-it", + "downloads": 3106, "source": "Hugging Face", - "score": -0.06703557772350338, - "first_commit": "2023-12-11 07:23:47", - "latest_commit": "2024-06-29 08:56:31", + "score": -0.06558054562150811, + "first_commit": "2024-07-13 06:42:31", + "latest_commit": "2024-08-23 10:52:05", "languages": [], "model_or_dataset": "model", - "model_size": 69.2, + "model_size": 8.03, "model_architectures": "LlamaForCausalLM" }, { @@ -3389,7 +3263,7 @@ "project_name": "natto-py", "stargazers_count": 91, "source": "GitHub", - "score": -0.06709048080982032, + "score": -0.06634285969969503, "first_commit": "2014-10-24 20:56:40", "latest_commit": "2023-11-04 21:25:21", "languages": [ @@ -3405,7 +3279,7 @@ "project_name": "depccg", "stargazers_count": 91, "source": "GitHub", - "score": -0.06709048080982032, + "score": -0.06634285969969503, "first_commit": "2016-10-06 14:39:12", "latest_commit": "2023-08-26 16:03:23", "languages": [ @@ -3420,7 +3294,7 @@ "project_name": "oseti", "stargazers_count": 91, "source": "GitHub", - "score": -0.06709048080982032, + "score": -0.06634285969969503, "first_commit": "2019-02-12 02:03:26", "latest_commit": "2024-01-12 07:14:53", "languages": [ @@ -3429,82 +3303,82 @@ "model_or_dataset": null }, { - "description": "PLaMo-13B Model Description PLaMo-13B is a LLaMA-based 13B model pre-trained on English and Japanese open datasets, developed by Preferred Networks, Inc. ", - "url": "https://huggingface.co./pfnet/plamo-13b", - "project_name": "plamo-13b", - "downloads": 2335, + "description": "Model Card for Model ID 実験モデルです /", + "url": "https://huggingface.co./mmnga/Llama-3-70B-japanese-suzume-vector-v0.1", + "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1", + "downloads": 2985, "source": "Hugging Face", - "score": -0.06803244307391876, - "first_commit": "2023-09-25 12:47:05", - "latest_commit": "2023-10-10 15:24:54", + "score": -0.06675642755388743, + "first_commit": "2024-04-28 04:11:49", + "latest_commit": "2024-04-28 07:46:32", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "PlamoForCausalLM" + "model_size": 70.6, + "model_architectures": "LlamaForCausalLM" }, { - "description": "rinna/japanese-hubert-base Overview This is a Japanese HuBERT Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-hubert-base", - "project_name": "japanese-hubert-base", - "downloads": 2293, + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-gamma-7b", + "project_name": "japanese-stablelm-instruct-gamma-7b", + "downloads": 2935, "source": "Hugging Face", - "score": -0.06847785099644478, - "first_commit": "2023-04-28 07:39:44", - "latest_commit": "2024-07-20 08:55:38", + "score": -0.06724232917883756, + "first_commit": "2023-10-16 08:55:06", + "latest_commit": "2024-01-24 05:54:38", "languages": [], "model_or_dataset": "model", - "model_size": 0.09440000000000001, - "model_architectures": "HubertModel" + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "bert-base-japanese-v3-marc_ja 「大規模言語モデル入門」の第5章で紹介している(感情分析)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-marc_ja", - "project_name": "bert-base-japanese-v3-marc_ja", - "downloads": 2290, + "description": "wav2vec2-base-asr", + "url": "https://huggingface.co./TKU410410103/wav2vec2-base-japanese-asr", + "project_name": "wav2vec2-base-japanese-asr", + "downloads": 2922, "source": "Hugging Face", - "score": -0.06850966584805379, - "first_commit": "2023-06-01 14:29:06", - "latest_commit": "2023-07-24 06:49:13", + "score": -0.0673686636013246, + "first_commit": "2024-04-14 10:22:21", + "latest_commit": "2024-04-14 14:00:30", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_size": 0.0945, + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "Converted from clu-ling/whisper-large-v2-japanese-5k-steps using CTranslate2.", - "url": "https://huggingface.co./zh-plus/faster-whisper-large-v2-japanese-5k-steps", - "project_name": "faster-whisper-large-v2-japanese-5k-steps", - "downloads": 2261, + "description": "LLaVA-JP Model Card Model detail Model type: LLaVA-JP is a vision-language model that can converse about input images.", + "url": "https://huggingface.co./toshi456/llava-jp-1.3b-v1.0", + "project_name": "llava-jp-1.3b-v1.0", + "downloads": 2907, "source": "Hugging Face", - "score": -0.06881720941360747, - "first_commit": "2023-07-03 08:29:37", - "latest_commit": "2023-07-03 18:42:31", + "score": -0.06751443408880964, + "first_commit": "2023-12-04 13:13:03", + "latest_commit": "2023-12-18 10:21:11", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 1.73, + "model_architectures": "LlavaGpt2ForCausalLM" }, { - "description": "japanese-large-lm-3.6b", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b", - "project_name": "japanese-large-lm-3.6b", - "downloads": 2188, + "description": "hubert-base-asr", + "url": "https://huggingface.co./TKU410410103/hubert-base-japanese-asr", + "project_name": "hubert-base-japanese-asr", + "downloads": 2873, "source": "Hugging Face", - "score": -0.06959137080275984, - "first_commit": "2023-07-21 00:48:05", - "latest_commit": "2023-08-17 01:06:17", + "score": -0.06784484719377573, + "first_commit": "2024-04-09 06:01:43", + "latest_commit": "2024-04-14 13:20:43", "languages": [], "model_or_dataset": "model", - "model_size": 3.68, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 0.0945, + "model_architectures": "HubertForCTC" }, { "description": "BERT base Japanese (character tokenization, whole word masking enabled)", "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-whole-word-masking", "project_name": "bert-base-japanese-char-whole-word-masking", - "downloads": 2175, + "downloads": 2834, "source": "Hugging Face", - "score": -0.06972923515973217, + "score": -0.06822385046123683, "first_commit": "2020-04-28 21:34:13", "latest_commit": "2024-02-22 00:58:18", "languages": [], @@ -3513,17 +3387,17 @@ "model_architectures": "BertForMaskedLM" }, { - "description": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fast-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", - "downloads": 2172, + "description": "Llama-3-ELYZA-JP-8B-GGUF Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-GGUF", + "project_name": "Llama-3-ELYZA-JP-8B-GGUF", + "downloads": 2799, "source": "Hugging Face", - "score": -0.06976105001134118, - "first_commit": "2023-08-29 15:31:01", - "latest_commit": "2023-11-16 14:27:48", + "score": -0.06856398159870192, + "first_commit": "2024-06-25 07:29:22", + "latest_commit": "2024-06-26 02:56:52", "languages": [], "model_or_dataset": "model", - "model_size": 6.85, + "model_size": 8.03, "model_architectures": null }, { @@ -3532,7 +3406,7 @@ "project_name": "shiba", "stargazers_count": 90, "source": "GitHub", - "score": -0.07003684530937894, + "score": -0.06930091217933845, "first_commit": "2021-06-24 20:17:27", "latest_commit": "2023-11-03 10:01:53", "languages": [ @@ -3546,7 +3420,7 @@ "project_name": "open2ch-dialogue-corpus", "stargazers_count": 90, "source": "GitHub", - "score": -0.07003684530937894, + "score": -0.06930091217933845, "first_commit": "2019-09-13 11:21:53", "latest_commit": "2021-06-07 00:06:23", "languages": [ @@ -3555,40 +3429,40 @@ "model_or_dataset": "dataset" }, { - "description": "Leia-Swallow-7B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", - "url": "https://huggingface.co./leia-llm/Leia-Swallow-7b", - "project_name": "Leia-Swallow-7b", - "downloads": 2098, + "description": "BERT large Japanese (unidic-lite with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese", + "project_name": "bert-large-japanese", + "downloads": 2696, "source": "Hugging Face", - "score": -0.07054581635102988, - "first_commit": "2024-04-17 07:12:28", - "latest_commit": "2024-04-17 10:29:56", + "score": -0.06956493894609919, + "first_commit": "2021-03-05 06:17:13", + "latest_commit": "2021-09-23 15:45:41", "languages": [], "model_or_dataset": "model", - "model_size": 6.83, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-instruct-1_6b", - "project_name": "japanese-stablelm-2-instruct-1_6b", - "downloads": 2093, + "description": "rinna/japanese-hubert-base Overview This is a Japanese HuBERT Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-hubert-base", + "project_name": "japanese-hubert-base", + "downloads": 2679, "source": "Hugging Face", - "score": -0.07059884110371155, - "first_commit": null, - "latest_commit": null, + "score": -0.06973014549858224, + "first_commit": "2023-04-28 07:39:44", + "latest_commit": "2024-07-20 08:55:38", "languages": [], "model_or_dataset": "model", - "model_size": 1.64, - "model_architectures": null + "model_size": 0.09440000000000001, + "model_architectures": "HubertModel" }, { "description": "This dataset was created by automatically translating \"databricks-dolly-15k\" into Japanese.", "url": "https://huggingface.co./datasets/kunishou/databricks-dolly-15k-ja", "project_name": "databricks-dolly-15k-ja", - "downloads": 2079, + "downloads": 2577, "source": "Hugging Face", - "score": -0.07074731041122023, + "score": -0.0707213848134805, "first_commit": "2023-04-13 08:31:08", "latest_commit": "2024-04-01 17:26:37", "languages": [], @@ -3597,250 +3471,234 @@ "model_architectures": null }, { - "description": "Gemma-Mling: Multilingual Gemma Update @ 2024.04.15: First release of Gemma-Mling 7B model Original Gemma Model Page:", - "url": "https://huggingface.co./beomi/gemma-mling-7b", - "project_name": "gemma-mling-7b", - "downloads": 2070, + "description": "Japanese SimCSE (BERT-base)", + "url": "https://huggingface.co./pkshatech/simcse-ja-bert-base-clcmlp", + "project_name": "simcse-ja-bert-base-clcmlp", + "downloads": 2555, "source": "Hugging Face", - "score": -0.07084275496604724, - "first_commit": "2024-04-15 05:37:05", - "latest_commit": "2024-04-18 14:28:20", + "score": -0.07093518152845855, + "first_commit": "2022-12-26 02:52:03", + "latest_commit": "2023-01-27 06:44:23", "languages": [], "model_or_dataset": "model", - "model_size": 8.54, - "model_architectures": "GemmaForCausalLM" + "model_size": null, + "model_architectures": "BertModel" }, { - "description": "Leia-Swallow-13B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", - "url": "https://huggingface.co./leia-llm/Leia-Swallow-13b", - "project_name": "Leia-Swallow-13b", - "downloads": 2058, + "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct", + "downloads": 2524, "source": "Hugging Face", - "score": -0.07097001437248324, - "first_commit": "2024-04-17 07:32:11", - "latest_commit": "2024-04-18 05:21:10", + "score": -0.07123644053592763, + "first_commit": "2023-08-28 13:36:19", + "latest_commit": "2023-08-29 03:47:09", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, + "model_size": null, "model_architectures": "LlamaForCausalLM" }, { - "description": "bilingual-gpt-neox-4b Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", - "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b", - "project_name": "bilingual-gpt-neox-4b", - "downloads": 2048, + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-Instruct-v0.1", + "project_name": "Llama-3-Swallow-70B-Instruct-v0.1", + "downloads": 2520, "source": "Hugging Face", - "score": -0.07107606387784658, - "first_commit": "2023-07-31 02:34:03", - "latest_commit": "2024-07-20 08:02:07", + "score": -0.07127531266592364, + "first_commit": "2024-06-28 16:17:32", + "latest_commit": "2024-07-19 08:08:59", "languages": [], "model_or_dataset": "model", - "model_size": 3.95, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 70.6, + "model_architectures": "LlamaForCausalLM" }, { - "description": "wav2vec2-base-asr", - "url": "https://huggingface.co./TKU410410103/wav2vec2-base-japanese-asr", - "project_name": "wav2vec2-base-japanese-asr", - "downloads": 2032, + "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoさんが公開しているjapanese-novel-gpt-j-6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/aibuncho-japanese-novel-gpt-j-6b-gguf", + "project_name": "aibuncho-japanese-novel-gpt-j-6b-gguf", + "downloads": 2486, "source": "Hugging Face", - "score": -0.07124574308642792, - "first_commit": "2024-04-14 10:22:21", - "latest_commit": "2024-04-14 14:00:30", + "score": -0.07160572577088974, + "first_commit": "2023-09-03 17:32:44", + "latest_commit": "2023-09-11 01:10:36", "languages": [], "model_or_dataset": "model", - "model_size": 0.0945, - "model_architectures": "Wav2Vec2ForCTC" + "model_size": 6.05, + "model_architectures": null }, { - "description": "FINGU-AI/FinguAI-Chat-v1 Overview The FINGU-AI/FinguAI-Chat-v1 model offers a specialized curriculum tailored to English, Korean, and Japanese speakers interested in finance, investment, and legal frameworks.", - "url": "https://huggingface.co./FINGU-AI/FinguAI-Chat-v1", - "project_name": "FinguAI-Chat-v1", - "downloads": 2007, + "description": "Ruri: Japanese General Text Embeddings Usage First install the Sentence Transformers library: pip install -U sentence-transformers Then you can load this model and run inference.", + "url": "https://huggingface.co./cl-nagoya/ruri-large", + "project_name": "ruri-large", + "downloads": 2444, "source": "Hugging Face", - "score": -0.07151086684983626, - "first_commit": "2024-03-21 07:08:05", - "latest_commit": "2024-03-22 09:36:44", + "score": -0.07201388313584785, + "first_commit": "2024-08-28 17:11:42", + "latest_commit": "2024-09-04 08:49:10", "languages": [], "model_or_dataset": "model", - "model_size": 0.464, - "model_architectures": "Qwen2ForCausalLM" + "model_size": 0.337, + "model_architectures": "BertModel" }, { - "description": "Llama-3-Swallow-70B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-70B-Instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3-Swallow-70B-Instruct-v0.1-gguf", - "project_name": "Llama-3-Swallow-70B-Instruct-v0.1-gguf", - "downloads": 2001, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-hf", + "project_name": "Swallow-70b-instruct-hf", + "downloads": 2418, "source": "Hugging Face", - "score": -0.07157449655305427, - "first_commit": "2024-07-01 14:21:29", - "latest_commit": "2024-07-07 05:04:16", + "score": -0.07226655198082192, + "first_commit": "2023-12-11 07:23:47", + "latest_commit": "2024-06-29 08:56:31", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": null + "model_size": 69.2, + "model_architectures": "LlamaForCausalLM" }, { - "description": "hubert-base-asr", - "url": "https://huggingface.co./TKU410410103/hubert-base-japanese-asr", - "project_name": "hubert-base-japanese-asr", - "downloads": 1984, + "description": "Llama-3.1-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-8B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-8B-Instruct-gguf", + "project_name": "Llama-3.1-8B-Instruct-gguf", + "downloads": 2398, "source": "Hugging Face", - "score": -0.07175478071217195, - "first_commit": "2024-04-09 06:01:43", - "latest_commit": "2024-04-14 13:20:43", + "score": -0.07246091263080197, + "first_commit": "2024-07-23 16:33:06", + "latest_commit": "2024-07-24 21:04:40", "languages": [], "model_or_dataset": "model", - "model_size": 0.0945, - "model_architectures": "HubertForCTC" + "model_size": 8.03, + "model_architectures": null }, { - "description": "Stockmark-13b-instruct Stockmark-13b-instruct is an instruction-tuned version of Stockmark-13b, a 13 billion parameter Japanese LLM.", - "url": "https://huggingface.co./stockmark/stockmark-13b-instruct", - "project_name": "stockmark-13b-instruct", - "downloads": 1976, + "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-instruct", + "project_name": "ELYZA-japanese-Llama-2-13b-instruct", + "downloads": 2200, "source": "Hugging Face", - "score": -0.07183962031646261, - "first_commit": "2023-11-08 16:56:34", - "latest_commit": "2023-11-08 17:02:17", + "score": -0.07438508306560448, + "first_commit": "2023-12-25 16:10:32", + "latest_commit": "2023-12-27 01:41:15", "languages": [], "model_or_dataset": "model", - "model_size": 13.2, + "model_size": null, "model_architectures": "LlamaForCausalLM" }, { - "description": "Kotoba-Whisper Kotoba-Whisper is a collection of distilled Whisper models for Japanese ASR, developed through the collaboration bewteen Asahi Ushio and Kotoba Technologies.", - "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.0", - "project_name": "kotoba-whisper-v1.0", - "downloads": 1960, - "source": "Hugging Face", - "score": -0.07200929952504395, - "first_commit": "2024-04-14 08:53:48", - "latest_commit": "2024-05-08 12:40:53", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.756, - "model_architectures": "WhisperForConditionalGeneration" + "description": "A Python Module for JUMAN++/KNP", + "url": "https://github.com/ku-nlp/pyknp", + "project_name": "pyknp", + "stargazers_count": 88, + "source": "GitHub", + "score": -0.0752170171386253, + "first_commit": "2015-04-08 15:25:47", + "latest_commit": "2024-07-06 15:16:48", + "languages": [ + "Python" + ], + "model_or_dataset": null }, { - "description": "calm3-22b-RP-GGUF 概要 Aratako/calm3-22b-RPの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/calm3-22b-RP-GGUF", - "project_name": "calm3-22b-RP-GGUF", - "downloads": 1936, - "source": "Hugging Face", - "score": -0.07226381833791597, - "first_commit": "2024-08-21 01:13:32", - "latest_commit": "2024-08-21 13:26:35", + "description": "STAIR captions: large-scale Japanese image caption dataset", + "url": "https://github.com/STAIR-Lab-CIT/STAIR-captions", + "project_name": "STAIR-captions", + "stargazers_count": 88, + "source": "GitHub", + "score": -0.0752170171386253, + "first_commit": "2017-02-21 16:49:14", + "latest_commit": "2018-07-04 18:24:35", "languages": [], - "model_or_dataset": "model", - "model_size": 22.5, - "model_architectures": null + "model_or_dataset": "dataset" }, { - "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", - "url": "https://huggingface.co./fishaudio/fish-speech-1.2-sft", - "project_name": "fish-speech-1.2-sft", - "downloads": 1903, - "source": "Hugging Face", - "score": -0.07261378170561499, - "first_commit": "2024-07-18 08:00:29", - "latest_commit": "2024-08-02 08:13:06", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "description": "NMeCab: About Japanese morphological analyzer on .NET", + "url": "https://github.com/komutan/NMeCab", + "project_name": "NMeCab", + "stargazers_count": 88, + "source": "GitHub", + "score": -0.0752170171386253, + "first_commit": "2014-04-24 17:34:29", + "latest_commit": "2024-03-31 03:51:55", + "languages": [ + "C#" + ], + "model_or_dataset": "dataset" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-hf", - "project_name": "Swallow-13b-hf", - "downloads": 1800, + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-v0.1", + "project_name": "Llama-3-Swallow-70B-v0.1", + "downloads": 2089, "source": "Hugging Face", - "score": -0.07370609161085738, - "first_commit": "2023-11-16 15:40:49", - "latest_commit": "2024-06-29 08:56:21", + "score": -0.07546378467299378, + "first_commit": "2024-06-14 05:56:33", + "latest_commit": "2024-07-01 06:24:32", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 70.6, "model_architectures": "LlamaForCausalLM" }, { - "description": "recruit-jp/japanese-clip-vit-b-32-roberta-base Overview Developed by: Recruit Co.", - "url": "https://huggingface.co./recruit-jp/japanese-clip-vit-b-32-roberta-base", - "project_name": "japanese-clip-vit-b-32-roberta-base", - "downloads": 1795, + "description": "Llama-3.1-70B-Japanese-Instruct-2407 Model Description This is a Japanese continually pre-trained model based on meta-llama/Meta-Llama-3.1-70B-Instruct.", + "url": "https://huggingface.co./cyberagent/Llama-3.1-70B-Japanese-Instruct-2407", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407", + "downloads": 2082, "source": "Hugging Face", - "score": -0.07375911636353905, - "first_commit": "2023-12-20 06:06:12", - "latest_commit": "2024-01-22 07:41:59", + "score": -0.0755318109004868, + "first_commit": "2024-07-26 01:30:21", + "latest_commit": "2024-07-26 02:30:17", "languages": [], "model_or_dataset": "model", - "model_size": 0.198, - "model_architectures": "JapaneseCLIPModel" + "model_size": 70.6, + "model_architectures": "LlamaForCausalLM" }, { - "description": "OpenCALM-Large Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-large", - "project_name": "open-calm-large", - "downloads": 1779, + "description": "nlp-waseda/roberta-large-japanese-seq512 Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100 with the maximum sequence length of 512.", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512", + "project_name": "roberta-large-japanese-seq512", + "downloads": 2033, "source": "Hugging Face", - "score": -0.07392879557212038, - "first_commit": "2023-05-15 06:50:24", - "latest_commit": "2023-05-18 01:11:13", + "score": -0.07600799449293792, + "first_commit": "2022-06-13 09:46:45", + "latest_commit": "2022-10-21 14:49:40", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" - }, - { - "description": "alabnii/jmedroberta-base-sentencepiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece", - "project_name": "jmedroberta-base-sentencepiece", - "downloads": 1776, - "source": "Hugging Face", - "score": -0.07396061042372938, - "first_commit": "2022-12-22 17:20:33", - "latest_commit": "2023-03-21 23:57:37", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.109, - "model_architectures": "BertForMaskedLM" + "model_architectures": "RobertaForMaskedLM" }, { - "description": "自動生成のマルチターンデータセット オープンなデータソースから、Calm3-22bを使ってQ&Aを自動生成したものです。 一部の計算には東京工業大学のスーパーコンピュータTSUBAME4.0を利用しました。 データソース はじめの質問(q1)を、種々のデータソースから収集しました。その後のやりとりはすべて、Calmが生成しました。質問文については、元データのライセンスに準拠します。 oasst2-33k-ja apache 2.0 databricks-dolly-15k-ja cc-by-sa-3.0 minnade CC0 cyberagent/chatbot-arena-ja-calm2-7b-chat-experimental cc-by-4.0", - "url": "https://huggingface.co./datasets/kanhatakeyama/AutoMultiTurnByCalm3-22B", - "project_name": "AutoMultiTurnByCalm3-22B", - "downloads": 1776, + "description": "This dataset contains a diverse set of natural Japanese speech, collected from terrestrial television streams.", + "url": "https://huggingface.co./datasets/reazon-research/reazonspeech", + "project_name": "reazonspeech", + "downloads": 1977, "source": "Hugging Face", - "score": -0.07396061042372938, - "first_commit": "2024-07-17 09:53:20", - "latest_commit": "2024-07-17 10:03:02", + "score": -0.07655220431288207, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jsts", - "project_name": "bert-base-japanese-v3-jsts", - "downloads": 1730, + "description": "BERTによる日本語固有表現抽出のモデル BertForTokenClassificationを用いて、日本語の文から固有表現を抽出します。 ", + "url": "https://huggingface.co./jurabi/bert-ner-japanese", + "project_name": "bert-ner-japanese", + "downloads": 1969, "source": "Hugging Face", - "score": -0.07444843814840074, - "first_commit": "2023-06-11 15:27:32", - "latest_commit": "2023-07-29 11:27:18", + "score": -0.0766299485728741, + "first_commit": "2022-09-26 07:46:38", + "latest_commit": "2022-09-26 12:13:44", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "BertForTokenClassification" }, { "description": "upskyy/gte-korean-base This model is korsts and kornli finetuning model from Alibaba-NLP/gte-multilingual-base.", "url": "https://huggingface.co./upskyy/gte-base-korean", "project_name": "gte-base-korean", - "downloads": 1726, + "downloads": 1957, "source": "Hugging Face", - "score": -0.07449085795054608, + "score": -0.07674656496286213, "first_commit": "2024-08-08 14:34:44", "latest_commit": "2024-08-08 15:29:27", "languages": [], @@ -3849,26 +3707,26 @@ "model_architectures": "NewModel" }, { - "description": "Model Card for Japanese character-level DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-char-wwm", - "project_name": "deberta-v2-base-japanese-char-wwm", - "downloads": 1700, + "description": "japanese-large-lm-3.6b", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b", + "project_name": "japanese-large-lm-3.6b", + "downloads": 1950, "source": "Hugging Face", - "score": -0.07476658666449076, - "first_commit": "2023-01-18 13:55:30", - "latest_commit": "2023-03-26 03:32:27", + "score": -0.07681459119035515, + "first_commit": "2023-07-21 00:48:05", + "latest_commit": "2023-08-17 01:06:17", "languages": [], "model_or_dataset": "model", - "model_size": 0.122, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": 3.68, + "model_architectures": "GPTNeoXForCausalLM" }, { "description": "uniTKU-hubert-japanese-asr", "url": "https://huggingface.co./TKU410410103/uniTKU-hubert-japanese-asr", "project_name": "uniTKU-hubert-japanese-asr", - "downloads": 1694, + "downloads": 1914, "source": "Hugging Face", - "score": -0.07483021636770876, + "score": -0.07716444036031923, "first_commit": "2024-04-20 14:59:52", "latest_commit": "2024-04-22 18:37:33", "languages": [], @@ -3877,12 +3735,40 @@ "model_architectures": "HubertForCTC" }, { - "description": "hubert-large-asr", - "url": "https://huggingface.co./TKU410410103/hubert-large-japanese-asr", - "project_name": "hubert-large-japanese-asr", - "downloads": 1687, + "description": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fast-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", + "downloads": 1910, "source": "Hugging Face", - "score": -0.0749044510214631, + "score": -0.07720331249031524, + "first_commit": "2023-08-29 15:31:01", + "latest_commit": "2023-11-16 14:27:48", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.85, + "model_architectures": null + }, + { + "description": "japanese-gpt2-small This repository provides a small-sized Japanese GPT-2 model.", + "url": "https://huggingface.co./rinna/japanese-gpt2-small", + "project_name": "japanese-gpt2-small", + "downloads": 1883, + "source": "Hugging Face", + "score": -0.07746569936778831, + "first_commit": "2021-06-15 06:32:27", + "latest_commit": "2024-07-20 07:49:31", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.123, + "model_architectures": "GPT2LMHeadModel" + }, + { + "description": "hubert-large-asr", + "url": "https://huggingface.co./TKU410410103/hubert-large-japanese-asr", + "project_name": "hubert-large-japanese-asr", + "downloads": 1876, + "source": "Hugging Face", + "score": -0.07753372559528134, "first_commit": "2024-04-09 03:01:08", "latest_commit": "2024-04-14 13:21:01", "languages": [], @@ -3891,82 +3777,122 @@ "model_architectures": "HubertForCTC" }, { - "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b", - "project_name": "ELYZA-japanese-Llama-2-13b", - "downloads": 1677, + "description": "FINGU-AI/FinguAI-Chat-v1 Overview The FINGU-AI/FinguAI-Chat-v1 model offers a specialized curriculum tailored to English, Korean, and Japanese speakers interested in finance, investment, and legal frameworks.", + "url": "https://huggingface.co./FINGU-AI/FinguAI-Chat-v1", + "project_name": "FinguAI-Chat-v1", + "downloads": 1866, "source": "Hugging Face", - "score": -0.07501050052682644, - "first_commit": "2023-12-25 16:38:08", - "latest_commit": "2023-12-27 01:40:43", + "score": -0.07763090592027136, + "first_commit": "2024-03-21 07:08:05", + "latest_commit": "2024-03-22 09:36:44", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.464, + "model_architectures": "Qwen2ForCausalLM" }, { - "description": "BERTによる日本語固有表現抽出のモデル BertForTokenClassificationを用いて、日本語の文から固有表現を抽出します。 ", - "url": "https://huggingface.co./jurabi/bert-ner-japanese", - "project_name": "bert-ner-japanese", - "downloads": 1667, + "description": "recruit-jp/japanese-clip-vit-b-32-roberta-base Overview Developed by: Recruit Co.", + "url": "https://huggingface.co./recruit-jp/japanese-clip-vit-b-32-roberta-base", + "project_name": "japanese-clip-vit-b-32-roberta-base", + "downloads": 1850, "source": "Hugging Face", - "score": -0.07511655003218978, - "first_commit": "2022-09-26 07:46:38", - "latest_commit": "2022-09-26 12:13:44", + "score": -0.07778639444025541, + "first_commit": "2023-12-20 06:06:12", + "latest_commit": "2024-01-22 07:41:59", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.198, + "model_architectures": "JapaneseCLIPModel" + }, + { + "description": "Gemma-Mling: Multilingual Gemma Update @ 2024.04.15: First release of Gemma-Mling 7B model Original Gemma Model Page:", + "url": "https://huggingface.co./beomi/gemma-mling-7b", + "project_name": "gemma-mling-7b", + "downloads": 1849, + "source": "Hugging Face", + "score": -0.0777961124727544, + "first_commit": "2024-04-15 05:37:05", + "latest_commit": "2024-04-18 14:28:20", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.54, + "model_architectures": "GemmaForCausalLM" + }, + { + "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jsts", + "project_name": "bert-base-japanese-v3-jsts", + "downloads": 1829, + "source": "Hugging Face", + "score": -0.07799047312273447, + "first_commit": "2023-06-11 15:27:32", + "latest_commit": "2023-07-29 11:27:18", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_architectures": "BertForSequenceClassification" }, { - "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", - "url": "https://huggingface.co./rinna/japanese-gpt-1b", - "project_name": "japanese-gpt-1b", - "downloads": 1666, + "description": "calm3-22b-RP-GGUF 概要 Aratako/calm3-22b-RPの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/calm3-22b-RP-GGUF", + "project_name": "calm3-22b-RP-GGUF", + "downloads": 1821, "source": "Hugging Face", - "score": -0.07512715498272611, - "first_commit": "2022-01-20 02:30:19", - "latest_commit": "2024-07-20 07:52:31", + "score": -0.07806821738272648, + "first_commit": "2024-08-21 01:13:32", + "latest_commit": "2024-08-21 13:26:35", "languages": [], "model_or_dataset": "model", - "model_size": 1.33, - "model_architectures": "GPT2LMHeadModel" + "model_size": 22.5, + "model_architectures": null }, { - "description": "Japanese-StableLM-Base-Alpha-7B \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-base-alpha-7b is a 7B-parameter decoder-only language model pre-trained on a diverse collection of Japanese and English datasets which focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-alpha-7b", - "project_name": "japanese-stablelm-base-alpha-7b", - "downloads": 1647, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-hf", + "project_name": "Swallow-7b-NVE-hf", + "downloads": 1815, "source": "Hugging Face", - "score": -0.07532864904291646, - "first_commit": "2023-08-09 14:30:09", - "latest_commit": "2023-08-22 09:36:29", + "score": -0.0781265255777205, + "first_commit": "2023-11-30 09:02:26", + "latest_commit": "2024-06-29 08:56:18", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "JapaneseStableLMAlphaForCausalLM" + "model_architectures": "LlamaForCausalLM" }, { - "description": "OpenCALM-1B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-1b", - "project_name": "open-calm-1b", - "downloads": 1641, + "description": "alpacaデータセットを日本語化したものです", + "url": "https://github.com/shi3z/alpaca_ja", + "project_name": "alpaca_ja", + "stargazers_count": 87, + "source": "GitHub", + "score": -0.07817506961826873, + "first_commit": "2023-04-01 07:03:33", + "latest_commit": "2023-05-17 15:43:50", + "languages": [], + "model_or_dataset": "dataset" + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-hf", + "project_name": "Swallow-13b-hf", + "downloads": 1790, "source": "Hugging Face", - "score": -0.07539227874613445, - "first_commit": "2023-05-15 07:00:18", - "latest_commit": "2023-05-18 01:11:30", + "score": -0.07836947639019556, + "first_commit": "2023-11-16 15:40:49", + "latest_commit": "2024-06-29 08:56:21", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" + "model_architectures": "LlamaForCausalLM" }, { "description": "Llama-3.1-8B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-8B-EZO-1.1-itのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Llama-3.1-8B-EZO-1.1-it-gguf", "project_name": "Llama-3.1-8B-EZO-1.1-it-gguf", - "downloads": 1627, + "downloads": 1784, "source": "Hugging Face", - "score": -0.07554074805364314, + "score": -0.07842778458518958, "first_commit": "2024-07-31 11:06:36", "latest_commit": "2024-07-31 12:47:45", "languages": [], @@ -3975,136 +3901,306 @@ "model_architectures": null }, { - "description": "luke-japanese-large-lite luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-large-lite", - "project_name": "luke-japanese-large-lite", - "downloads": 1621, + "description": "日本語向け Llama 3 8B はじめに このリポジトリはLlama 3を日本語化しようとしたモデルのリポジトリです。", + "url": "https://huggingface.co./alfredplpl/Llama-3-8B-Instruct-Ja", + "project_name": "Llama-3-8B-Instruct-Ja", + "downloads": 1750, "source": "Hugging Face", - "score": -0.07560437775686113, - "first_commit": "2022-11-07 14:26:40", - "latest_commit": "2022-11-09 11:19:36", + "score": -0.07875819769015567, + "first_commit": "2024-04-22 05:14:33", + "latest_commit": "2024-05-01 19:16:01", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LukeForMaskedLM" + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "ELYZA-tasks-100: 日本語instructionモデル評価データセット Data Description 本データセットはinstruction-tuningを行ったモデルの評価用データセットです。", - "url": "https://huggingface.co./datasets/elyza/ELYZA-tasks-100", - "project_name": "ELYZA-tasks-100", - "downloads": 1616, + "description": "OpenCALM-Large Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-large", + "project_name": "open-calm-large", + "downloads": 1725, "source": "Hugging Face", - "score": -0.07565740250954281, - "first_commit": "2023-08-28 09:01:44", - "latest_commit": "2023-12-27 18:17:36", + "score": -0.07900114850263074, + "first_commit": "2023-05-15 06:50:24", + "latest_commit": "2023-05-18 01:11:13", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "A Python Module for JUMAN++/KNP", - "url": "https://github.com/ku-nlp/pyknp", - "project_name": "pyknp", - "stargazers_count": 88, - "source": "GitHub", - "score": -0.07592957430849617, - "first_commit": "2015-04-08 15:25:47", - "latest_commit": "2024-07-06 15:16:48", - "languages": [ - "Python" - ], - "model_or_dataset": null + "description": "GLuCoSE v2", + "url": "https://huggingface.co./pkshatech/GLuCoSE-base-ja-v2", + "project_name": "GLuCoSE-base-ja-v2", + "downloads": 1717, + "source": "Hugging Face", + "score": -0.07907889276262275, + "first_commit": "2024-08-22 03:16:48", + "latest_commit": "2024-09-18 09:21:54", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.133, + "model_architectures": "LukeModel" }, { - "description": "STAIR captions: large-scale Japanese image caption dataset", - "url": "https://github.com/STAIR-Lab-CIT/STAIR-captions", - "project_name": "STAIR-captions", - "stargazers_count": 88, - "source": "GitHub", - "score": -0.07592957430849617, - "first_commit": "2017-02-21 16:49:14", - "latest_commit": "2018-07-04 18:24:35", + "description": "alabnii/jmedroberta-base-sentencepiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece", + "project_name": "jmedroberta-base-sentencepiece", + "downloads": 1693, + "source": "Hugging Face", + "score": -0.07931212554259881, + "first_commit": "2022-12-22 17:20:33", + "latest_commit": "2023-03-21 23:57:37", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.109, + "model_architectures": "BertForMaskedLM" }, { - "description": "NMeCab: About Japanese morphological analyzer on .NET", - "url": "https://github.com/komutan/NMeCab", - "project_name": "NMeCab", - "stargazers_count": 88, + "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF Original Model elyza/ELYZA-japanese-Llama-2-13b-fast-instruct Run with LlamaEdge LlamaEdge version: v0.2.8 and above Prompt template Prompt type: llama-2-chat Prompt string <s>[INST] <<SYS>> {{ system_prompt }} <</SYS>> {{ user_msg_1 }}", + "url": "https://huggingface.co./second-state/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "downloads": 1665, + "source": "Hugging Face", + "score": -0.07958423045257089, + "first_commit": "2024-01-06 03:33:53", + "latest_commit": "2024-03-20 07:21:25", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Model Card for Japanese character-level DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-char-wwm", + "project_name": "deberta-v2-base-japanese-char-wwm", + "downloads": 1638, + "source": "Hugging Face", + "score": -0.07984661733004396, + "first_commit": "2023-01-18 13:55:30", + "latest_commit": "2023-03-26 03:32:27", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.122, + "model_architectures": "DebertaV2ForMaskedLM" + }, + { + "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b", + "project_name": "ELYZA-japanese-Llama-2-13b", + "downloads": 1607, + "source": "Hugging Face", + "score": -0.08014787633751304, + "first_commit": "2023-12-25 16:38:08", + "latest_commit": "2023-12-27 01:40:43", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-hf", + "project_name": "Swallow-13b-instruct-hf", + "downloads": 1522, + "source": "Hugging Face", + "score": -0.08097390909992827, + "first_commit": "2023-12-07 03:10:55", + "latest_commit": "2024-06-29 08:56:29", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "GUIで動作する文書校正ツール GUI tool for textlinting.", + "url": "https://github.com/gecko655/proofreading-tool", + "project_name": "proofreading-tool", + "stargazers_count": 86, "source": "GitHub", - "score": -0.07592957430849617, - "first_commit": "2014-04-24 17:34:29", - "latest_commit": "2024-03-31 03:51:55", + "score": -0.08113312209791214, + "first_commit": "2021-04-08 12:10:36", + "latest_commit": "2024-06-22 20:18:09", "languages": [ - "C#" + "JavaScript" ], - "model_or_dataset": "dataset" + "model_or_dataset": null }, { - "description": "HODACHI-EZO-Common-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-9B-gemma-2-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-9B-gemma-2-it-gguf", - "project_name": "HODACHI-EZO-Common-9B-gemma-2-it-gguf", - "downloads": 1582, + "description": "japanese-large-lm-3.6b-instruction-sft", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft", + "project_name": "japanese-large-lm-3.6b-instruction-sft", + "downloads": 1456, "source": "Hugging Face", - "score": -0.07601797082777816, - "first_commit": "2024-07-15 15:42:39", - "latest_commit": "2024-07-15 16:20:33", + "score": -0.08161529924486244, + "first_commit": "2023-08-14 17:18:09", + "latest_commit": "2023-08-24 10:08:28", "languages": [], "model_or_dataset": "model", - "model_size": 9.24, - "model_architectures": null + "model_size": 3.68, + "model_architectures": "GPTNeoXForCausalLM" }, { "description": "ELYZA-japanese-Llama-2-13b-fast-instruct Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-fast-instruct", "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct", - "downloads": 1577, + "downloads": 1451, + "source": "Hugging Face", + "score": -0.08166388940735746, + "first_commit": "2023-12-25 18:14:10", + "latest_commit": "2023-12-27 01:41:51", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", + "url": "https://huggingface.co./fishaudio/fish-speech-1.2-sft", + "project_name": "fish-speech-1.2-sft", + "downloads": 1422, + "source": "Hugging Face", + "score": -0.08194571234982853, + "first_commit": "2024-07-18 08:00:29", + "latest_commit": "2024-08-02 08:13:06", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": null + }, + { + "description": "Tanuki-8x8B-dpo-v1.0 モデルについて Tanuki-8x8Bは、フルスクラッチで約1.7Tトークン事前学習を行った8x8Bパラメータ(総パラメータ約47B、アクティブパラメータ約13B)の大規模言語モデルです。", + "url": "https://huggingface.co./weblab-GENIAC/Tanuki-8x8B-dpo-v1.0", + "project_name": "Tanuki-8x8B-dpo-v1.0", + "downloads": 1402, + "source": "Hugging Face", + "score": -0.08214007299980858, + "first_commit": "2024-08-12 12:47:11", + "latest_commit": "2024-09-02 23:47:09", + "languages": [], + "model_or_dataset": "model", + "model_size": 47.0, + "model_architectures": "TanukiForCausalLM" + }, + { + "description": "Japanese-LLaMA-3-8B Japanese-LLaMA-3-8Bは基盤モデル、フルモデルです。 ", + "url": "https://huggingface.co./owner203/japanese-llama-3-8b", + "project_name": "japanese-llama-3-8b", + "downloads": 1358, + "source": "Hugging Face", + "score": -0.0825676664297647, + "first_commit": "2024-06-05 02:19:05", + "latest_commit": "2024-06-21 06:35:41", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "bert-large-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-upos", + "project_name": "bert-large-japanese-upos", + "downloads": 1358, + "source": "Hugging Face", + "score": -0.0825676664297647, + "first_commit": "2021-08-19 10:39:38", + "latest_commit": "2022-09-18 19:43:53", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForTokenClassification" + }, + { + "description": "Ninja-v1-NSFW-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFW-128kのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-128k-gguf", + "project_name": "Ninja-v1-NSFW-128k-gguf", + "downloads": 1345, + "source": "Hugging Face", + "score": -0.08269400085225173, + "first_commit": "2024-05-01 17:45:52", + "latest_commit": "2024-05-04 13:25:47", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24, + "model_architectures": null + }, + { + "description": "OpenCALM-1B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-1b", + "project_name": "open-calm-1b", + "downloads": 1329, + "source": "Hugging Face", + "score": -0.08284948937223577, + "first_commit": "2023-05-15 07:00:18", + "latest_commit": "2023-05-18 01:11:30", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "GPTNeoXForCausalLM" + }, + { + "description": "ELYZA-tasks-100: 日本語instructionモデル評価データセット Data Description 本データセットはinstruction-tuningを行ったモデルの評価用データセットです。", + "url": "https://huggingface.co./datasets/elyza/ELYZA-tasks-100", + "project_name": "ELYZA-tasks-100", + "downloads": 1327, + "source": "Hugging Face", + "score": -0.08286892543723377, + "first_commit": "2023-08-28 09:01:44", + "latest_commit": "2023-12-27 18:17:36", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "roberta-small-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-luw-upos", + "project_name": "roberta-small-japanese-luw-upos", + "downloads": 1318, "source": "Hugging Face", - "score": -0.07607099558045982, - "first_commit": "2023-12-25 18:14:10", - "latest_commit": "2023-12-27 01:41:51", + "score": -0.0829563877297248, + "first_commit": "2021-11-03 05:51:58", + "latest_commit": "2022-09-18 19:45:09", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "RobertaForTokenClassification" }, { - "description": "bert-base-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-upos", - "project_name": "bert-base-japanese-upos", - "downloads": 1553, + "description": "Mistral-Nemo-Japanese-Instruct-2408 Model Description", + "url": "https://huggingface.co./cyberagent/Mistral-Nemo-Japanese-Instruct-2408", + "project_name": "Mistral-Nemo-Japanese-Instruct-2408", + "downloads": 1317, "source": "Hugging Face", - "score": -0.07632551439333184, - "first_commit": "2021-08-26 23:02:50", - "latest_commit": "2022-09-18 19:43:26", + "score": -0.0829661057622238, + "first_commit": "2024-08-30 03:57:43", + "latest_commit": "2024-08-30 04:03:41", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_size": 12.2, + "model_architectures": "MistralForCausalLM" }, { - "description": "This dataset contains a diverse set of natural Japanese speech, collected from terrestrial television streams.", - "url": "https://huggingface.co./datasets/reazon-research/reazonspeech", - "project_name": "reazonspeech", - "downloads": 1539, + "description": "Kotoba-Whisper (v2.0)", + "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v2.0", + "project_name": "kotoba-whisper-v2.0", + "downloads": 1307, "source": "Hugging Face", - "score": -0.0764739837008405, - "first_commit": null, - "latest_commit": null, + "score": -0.08306328608721383, + "first_commit": "2024-09-17 12:49:47", + "latest_commit": "2024-09-20 01:56:01", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 0.756, + "model_architectures": "WhisperForConditionalGeneration" }, { "description": "Model Description llava-calm2-siglip is an experimental Vision Language Model that can answer questions in Japanese about images.", "url": "https://huggingface.co./cyberagent/llava-calm2-siglip", "project_name": "llava-calm2-siglip", - "downloads": 1512, + "downloads": 1289, "source": "Hugging Face", - "score": -0.07676031736532153, + "score": -0.08323821067219588, "first_commit": "2024-06-12 19:35:20", "latest_commit": "2024-06-12 19:40:39", "languages": [], @@ -4113,96 +4209,96 @@ "model_architectures": "LlavaForConditionalGeneration" }, { - "description": "OpenCALM-3B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-3b", - "project_name": "open-calm-3b", - "downloads": 1482, + "description": "llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", + "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", + "project_name": "llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", + "downloads": 1284, "source": "Hugging Face", - "score": -0.07707846588141154, - "first_commit": "2023-05-15 07:14:36", - "latest_commit": "2023-05-18 01:11:50", + "score": -0.0832868008346909, + "first_commit": "2023-10-18 19:01:48", + "latest_commit": "2023-10-20 08:41:17", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" + "model_architectures": null }, { - "description": "roberta-small-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-luw-upos", - "project_name": "roberta-small-japanese-luw-upos", - "downloads": 1435, + "description": "Places in japan.", + "url": "https://huggingface.co./datasets/JapanDegitalMaterial/Places_in_Japan", + "project_name": "Places_in_Japan", + "downloads": 1283, "source": "Hugging Face", - "score": -0.07757689855661923, - "first_commit": "2021-11-03 05:51:58", - "latest_commit": "2022-09-18 19:45:09", + "score": -0.08329651886718989, + "first_commit": "2023-09-23 12:35:06", + "latest_commit": "2023-09-23 14:00:16", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "RobertaForTokenClassification" + "model_architectures": null }, { - "description": "japanese-gpt2-small This repository provides a small-sized Japanese GPT-2 model.", - "url": "https://huggingface.co./rinna/japanese-gpt2-small", - "project_name": "japanese-gpt2-small", - "downloads": 1432, + "description": "PLaMo-13B Model Description PLaMo-13B is a LLaMA-based 13B model pre-trained on English and Japanese open datasets, developed by Preferred Networks, Inc. ", + "url": "https://huggingface.co./pfnet/plamo-13b", + "project_name": "plamo-13b", + "downloads": 1250, "source": "Hugging Face", - "score": -0.07760871340822824, - "first_commit": "2021-06-15 06:32:27", - "latest_commit": "2024-07-20 07:49:31", + "score": -0.08361721393965697, + "first_commit": "2023-09-25 12:47:05", + "latest_commit": "2023-10-10 15:24:54", "languages": [], "model_or_dataset": "model", - "model_size": 0.123, - "model_architectures": "GPT2LMHeadModel" + "model_size": 13.1, + "model_architectures": "PlamoForCausalLM" }, { - "description": "Ninja-v1-NSFW-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFW-128kのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-128k-gguf", - "project_name": "Ninja-v1-NSFW-128k-gguf", - "downloads": 1427, + "description": "In this study, we introduce a new dataset, WRIME, for emotional intensity estimation.", + "url": "https://huggingface.co./datasets/shunk031/wrime", + "project_name": "wrime", + "downloads": 1247, "source": "Hugging Face", - "score": -0.0776617381609099, - "first_commit": "2024-05-01 17:45:52", - "latest_commit": "2024-05-04 13:25:47", + "score": -0.08364636803715399, + "first_commit": "2023-01-12 10:43:54", + "latest_commit": "2023-01-15 12:39:01", "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "Phi-3-mini-128k-instruct-gguf microsoftさんが公開しているPhi-3-mini-128k-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Phi-3-mini-128k-instruct-gguf", - "project_name": "Phi-3-mini-128k-instruct-gguf", - "downloads": 1409, + "description": "Japanese-StableLM-Base-Alpha-7B \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-base-alpha-7b is a 7B-parameter decoder-only language model pre-trained on a diverse collection of Japanese and English datasets which focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-alpha-7b", + "project_name": "japanese-stablelm-base-alpha-7b", + "downloads": 1231, "source": "Hugging Face", - "score": -0.07785262727056391, - "first_commit": "2024-04-24 13:50:51", - "latest_commit": "2024-04-24 14:24:09", + "score": -0.08380185655713802, + "first_commit": "2023-08-09 14:30:09", + "latest_commit": "2023-08-22 09:36:29", "languages": [], "model_or_dataset": "model", - "model_size": 3.82, - "model_architectures": null + "model_size": null, + "model_architectures": "JapaneseStableLMAlphaForCausalLM" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-instruct-hf", - "project_name": "Swallow-7b-NVE-instruct-hf", - "downloads": 1375, + "description": "japanese-gpt-neox-3.6b-instruction-sft Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft", + "project_name": "japanese-gpt-neox-3.6b-instruction-sft", + "downloads": 1229, "source": "Hugging Face", - "score": -0.07821319558879926, - "first_commit": "2023-12-07 02:08:59", - "latest_commit": "2024-07-06 15:18:11", + "score": -0.08382129262213603, + "first_commit": "2023-05-17 02:16:28", + "latest_commit": "2024-07-20 07:56:34", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" + "model_size": 3.76, + "model_architectures": "GPTNeoXForCausalLM" }, { "description": "Vecteus-v1-gguf Local-Novel-LLM-projectさんが公開しているVecteus-v1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Vecteus-v1-gguf", "project_name": "Vecteus-v1-gguf", - "downloads": 1359, + "downloads": 1216, "source": "Hugging Face", - "score": -0.07838287479738061, + "score": -0.08394762704462307, "first_commit": "2024-05-01 17:49:42", "latest_commit": "2024-05-01 18:37:01", "languages": [], @@ -4210,27 +4306,13 @@ "model_size": 7.24, "model_architectures": null }, - { - "description": "nlp-waseda/roberta-large-japanese-seq512 Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100 with the maximum sequence length of 512.", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512", - "project_name": "roberta-large-japanese-seq512", - "downloads": 1349, - "source": "Hugging Face", - "score": -0.07848892430274394, - "first_commit": "2022-06-13 09:46:45", - "latest_commit": "2022-10-21 14:49:40", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "RobertaForMaskedLM" - }, { "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-fast", "project_name": "ELYZA-japanese-Llama-2-7b-fast", - "downloads": 1335, + "downloads": 1212, "source": "Hugging Face", - "score": -0.07863739361025261, + "score": -0.08398649917461908, "first_commit": "2023-08-28 13:17:58", "latest_commit": "2023-08-29 03:46:37", "languages": [], @@ -4239,94 +4321,110 @@ "model_architectures": "LlamaForCausalLM" }, { - "description": "alpacaデータセットを日本語化したものです", - "url": "https://github.com/shi3z/alpaca_ja", - "project_name": "alpaca_ja", - "stargazers_count": 87, - "source": "GitHub", - "score": -0.0788759388080548, - "first_commit": "2023-04-01 07:03:33", - "latest_commit": "2023-05-17 15:43:50", + "description": "gpt-neox-japanese-2.7b", + "url": "https://huggingface.co./abeja/gpt-neox-japanese-2.7b", + "project_name": "gpt-neox-japanese-2.7b", + "downloads": 1203, + "source": "Hugging Face", + "score": -0.0840739614671101, + "first_commit": "2022-08-29 02:15:44", + "latest_commit": "2023-04-10 05:12:30", "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "GPTNeoXJapaneseForCausalLM" + }, + { + "description": "Corpus of Annual Reports in Japan", + "url": "https://github.com/chakki-works/CoARiJ", + "project_name": "CoARiJ", + "stargazers_count": 85, + "source": "GitHub", + "score": -0.08409117457755556, + "first_commit": "2019-09-02 14:12:48", + "latest_commit": "2020-12-19 14:00:34", + "languages": [ + "Python" + ], "model_or_dataset": "dataset" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-hf", - "project_name": "Swallow-7b-NVE-hf", - "downloads": 1307, + "description": "stockmark/stockmark-100b Stockmark-100b is a 100 billion parameter LLM pretrained from scratch based on Japanese and English corpus of about 910 billion tokens.", + "url": "https://huggingface.co./stockmark/stockmark-100b", + "project_name": "stockmark-100b", + "downloads": 1201, "source": "Hugging Face", - "score": -0.07893433222526997, - "first_commit": "2023-11-30 09:02:26", - "latest_commit": "2024-06-29 08:56:18", + "score": -0.08409339753210811, + "first_commit": "2024-05-13 09:31:40", + "latest_commit": "2024-05-15 06:18:10", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 96.2, "model_architectures": "LlamaForCausalLM" }, { - "description": "Japanese-StableLM-Instruct-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-70b is a 70B-parameter decoder-only language model based on japanese-stablelm-base-beta-70b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-70b", - "project_name": "japanese-stablelm-instruct-beta-70b", - "downloads": 1304, + "description": "Japanese-StableLM-Base-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-70b is a 70B-parameter decoder-only language model based on Llama-2-70b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-70b", + "project_name": "japanese-stablelm-base-beta-70b", + "downloads": 1194, "source": "Hugging Face", - "score": -0.07896614707687896, - "first_commit": "2023-10-30 07:47:31", - "latest_commit": "2023-12-19 06:45:10", + "score": -0.08416142375960113, + "first_commit": "2023-10-30 07:46:28", + "latest_commit": "2023-12-19 06:44:53", "languages": [], "model_or_dataset": "model", "model_size": 69.0, "model_architectures": "LlamaForCausalLM" }, { - "description": "gpt-neox-japanese-2.7b", - "url": "https://huggingface.co./abeja/gpt-neox-japanese-2.7b", - "project_name": "gpt-neox-japanese-2.7b", - "downloads": 1300, + "description": "bert-base-japanese-v3-marc_ja 「大規模言語モデル入門」の第5章で紹介している(感情分析)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-marc_ja", + "project_name": "bert-base-japanese-v3-marc_ja", + "downloads": 1170, "source": "Hugging Face", - "score": -0.0790085668790243, - "first_commit": "2022-08-29 02:15:44", - "latest_commit": "2023-04-10 05:12:30", + "score": -0.08439465653957719, + "first_commit": "2023-06-01 14:29:06", + "latest_commit": "2023-07-24 06:49:13", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoXJapaneseForCausalLM" + "model_architectures": "BertForSequenceClassification" }, { - "description": "Japanese-StableLM-Base-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-70b is a 70B-parameter decoder-only language model based on Llama-2-70b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-70b", - "project_name": "japanese-stablelm-base-beta-70b", - "downloads": 1291, + "description": "Llama-3-Swallow-70B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-70B-Instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3-Swallow-70B-Instruct-v0.1-gguf", + "project_name": "Llama-3-Swallow-70B-Instruct-v0.1-gguf", + "downloads": 1163, "source": "Hugging Face", - "score": -0.0791040114338513, - "first_commit": "2023-10-30 07:46:28", - "latest_commit": "2023-12-19 06:44:53", + "score": -0.08446268276707021, + "first_commit": "2024-07-01 14:21:29", + "latest_commit": "2024-07-07 05:04:16", "languages": [], "model_or_dataset": "model", - "model_size": 69.0, - "model_architectures": "LlamaForCausalLM" + "model_size": 70.6, + "model_architectures": null }, { - "description": "BERT Base Japanese for Irony", - "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-irony", - "project_name": "bert-base-japanese-sentiment-irony", - "downloads": 1278, + "description": "Japanese-StableLM-Instruct-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-70b is a 70B-parameter decoder-only language model based on japanese-stablelm-base-beta-70b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-70b", + "project_name": "japanese-stablelm-instruct-beta-70b", + "downloads": 1154, "source": "Hugging Face", - "score": -0.07924187579082365, - "first_commit": "2022-11-07 06:29:21", - "latest_commit": "2022-11-08 04:23:27", + "score": -0.08455014505956122, + "first_commit": "2023-10-30 07:47:31", + "latest_commit": "2023-12-19 06:45:10", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_size": 69.0, + "model_architectures": "LlamaForCausalLM" }, { "description": "ELYZA-japanese-Llama-2-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf", "project_name": "ELYZA-japanese-Llama-2-7b-instruct-gguf", - "downloads": 1260, + "downloads": 1144, "source": "Hugging Face", - "score": -0.07943276490047765, + "score": -0.08464732538455126, "first_commit": "2023-08-29 05:33:45", "latest_commit": "2023-11-16 14:27:23", "languages": [], @@ -4335,70 +4433,112 @@ "model_architectures": null }, { - "description": "Dataset Summary RealPersonaChat は,話者本人のペルソナと性格特性を含む,約14,000件の日本語雑談対話からなるコーパスです.", - "url": "https://huggingface.co./datasets/nu-dialogue/real-persona-chat", - "project_name": "real-persona-chat", - "downloads": 1220, + "description": "Reflection-Llama-3.1-70B-gguf mattshumerさんが公開しているReflection-Llama-3.1-70Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Reflection-Llama-3.1-70B-gguf", + "project_name": "Reflection-Llama-3.1-70B-gguf", + "downloads": 1135, + "source": "Hugging Face", + "score": -0.08473478767704228, + "first_commit": "2024-09-06 17:18:27", + "latest_commit": "2024-09-07 04:00:27", + "languages": [], + "model_or_dataset": "model", + "model_size": 70.6, + "model_architectures": null + }, + { + "description": "hotchpotch/japanese-reranker-cross-encoder-base-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-base-v1", + "project_name": "japanese-reranker-cross-encoder-base-v1", + "downloads": 1107, + "source": "Hugging Face", + "score": -0.08500689258701435, + "first_commit": "2024-03-29 07:07:38", + "latest_commit": "2024-04-01 02:39:31", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" + }, + { + "description": "old? ", + "url": "https://huggingface.co./Lasorco/lametta_old", + "project_name": "lametta_old", + "downloads": 1088, + "source": "Hugging Face", + "score": -0.0851915352044954, + "first_commit": "2023-05-21 11:16:50", + "latest_commit": "2024-07-23 07:24:33", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": null + }, + { + "description": "This is a model for named entity recognition of Japanese medical documents.", + "url": "https://huggingface.co./sociocom/MedNER-CR-JA", + "project_name": "MedNER-CR-JA", + "downloads": 1064, "source": "Hugging Face", - "score": -0.07985696292193101, - "first_commit": "2024-03-09 22:52:22", - "latest_commit": "2024-03-13 10:26:42", + "score": -0.08542476798447146, + "first_commit": "2022-08-23 03:30:43", + "latest_commit": "2024-07-31 07:44:00", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 0.11, + "model_architectures": "BertForTokenClassification" }, { - "description": "japanese-large-lm-3.6b-instruction-sft", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft", - "project_name": "japanese-large-lm-3.6b-instruction-sft", - "downloads": 1205, + "description": "Llama-3-ELYZA-JP-8B-AWQ Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-AWQ", + "project_name": "Llama-3-ELYZA-JP-8B-AWQ", + "downloads": 1049, "source": "Hugging Face", - "score": -0.08001603717997602, - "first_commit": "2023-08-14 17:18:09", - "latest_commit": "2023-08-24 10:08:28", + "score": -0.0855705384719565, + "first_commit": "2024-06-25 04:31:31", + "latest_commit": "2024-06-26 02:56:39", "languages": [], "model_or_dataset": "model", - "model_size": 3.68, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 1.98, + "model_architectures": "LlamaForCausalLM" }, { - "description": "rinna/nekomata-7b Overview We conduct continual pre-training of qwen-7b on 30B tokens from a mixture of Japanese and English datasets.", - "url": "https://huggingface.co./rinna/nekomata-7b", - "project_name": "nekomata-7b", - "downloads": 1198, + "description": "ELYZA-japanese-Llama-2-13b-fast Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-fast", + "project_name": "ELYZA-japanese-Llama-2-13b-fast", + "downloads": 1049, "source": "Hugging Face", - "score": -0.08009027183373035, - "first_commit": "2023-12-19 06:58:44", - "latest_commit": "2024-07-20 08:35:21", + "score": -0.0855705384719565, + "first_commit": "2023-12-25 17:14:45", + "latest_commit": "2023-12-27 01:41:31", "languages": [], "model_or_dataset": "model", - "model_size": 7.72, - "model_architectures": "QWenLMHeadModel" + "model_size": null, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Places in japan.", - "url": "https://huggingface.co./datasets/JapanDegitalMaterial/Places_in_Japan", - "project_name": "Places_in_Japan", - "downloads": 1186, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-plus-hf", + "project_name": "Swallow-7b-plus-hf", + "downloads": 1044, "source": "Hugging Face", - "score": -0.08021753124016637, - "first_commit": "2023-09-23 12:35:06", - "latest_commit": "2023-09-23 14:00:16", + "score": -0.08561912863445152, + "first_commit": "2024-02-29 11:28:52", + "latest_commit": "2024-06-29 08:56:19", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "LlamaForCausalLM" }, { - "description": "In this study, we introduce a new dataset, WRIME, for emotional intensity estimation.", - "url": "https://huggingface.co./datasets/shunk031/wrime", - "project_name": "wrime", - "downloads": 1184, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-debug\", split=\"test\") 概要 大喜利生成の動作確認用データセットです。", + "url": "https://huggingface.co./datasets/YANS-official/ogiri-debug", + "project_name": "ogiri-debug", + "downloads": 1040, "source": "Hugging Face", - "score": -0.08023874114123904, - "first_commit": "2023-01-12 10:43:54", - "latest_commit": "2023-01-15 12:39:01", + "score": -0.08565800076444753, + "first_commit": "2024-08-30 04:18:35", + "latest_commit": "2024-08-30 14:52:03", "languages": [], "model_or_dataset": "dataset", "model_size": null, @@ -4408,9 +4548,9 @@ "description": "Please feel free to open an issue or pull request. ", "url": "https://huggingface.co./datasets/kumapo/JAQKET", "project_name": "JAQKET", - "downloads": 1154, + "downloads": 1024, "source": "Hugging Face", - "score": -0.08055688965732904, + "score": -0.08581348928443157, "first_commit": "2023-06-21 13:04:38", "latest_commit": "2023-10-09 06:44:28", "languages": [], @@ -4419,68 +4559,54 @@ "model_architectures": null }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-large-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-large-v1", - "project_name": "japanese-reranker-cross-encoder-large-v1", - "downloads": 1120, - "source": "Hugging Face", - "score": -0.08091745797556439, - "first_commit": "2024-03-28 20:53:30", - "latest_commit": "2024-04-01 02:39:45", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.337, - "model_architectures": "BertForSequenceClassification" - }, - { - "description": "Japanese-LLaMA-3-8B Japanese-LLaMA-3-8Bは基盤モデル、フルモデルです。 ", - "url": "https://huggingface.co./owner203/japanese-llama-3-8b", - "project_name": "japanese-llama-3-8b", - "downloads": 1109, + "description": "Tanuki-8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGGUF量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GGUF", + "project_name": "Tanuki-8B-dpo-v1.0-GGUF", + "downloads": 1016, "source": "Hugging Face", - "score": -0.08103411243146406, - "first_commit": "2024-06-05 02:19:05", - "latest_commit": "2024-06-21 06:35:41", + "score": -0.08589123354442359, + "first_commit": "2024-08-14 15:05:50", + "latest_commit": "2024-08-27 18:00:44", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": 7.51, + "model_architectures": null }, { - "description": "Llama-3-ELYZA-JP-8B-gguf elyzaさんが公開しているLlama-3-ELYZA-JP-8Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3-ELYZA-JP-8B-gguf", - "project_name": "Llama-3-ELYZA-JP-8B-gguf", - "downloads": 1096, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base", + "project_name": "Orion-14B-Base", + "downloads": 983, "source": "Hugging Face", - "score": -0.0811719767884364, - "first_commit": "2024-06-26 16:36:04", - "latest_commit": "2024-06-26 17:55:35", + "score": -0.08621192861689067, + "first_commit": "2024-01-16 06:07:42", + "latest_commit": "2024-03-26 09:21:52", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": null, + "model_architectures": "OrionForCausalLM" }, { - "description": "Ninja-v1-NSFW-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFWのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-gguf", - "project_name": "Ninja-v1-NSFW-gguf", - "downloads": 1093, + "description": "luke-japanese-large-lite luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-large-lite", + "project_name": "luke-japanese-large-lite", + "downloads": 976, "source": "Hugging Face", - "score": -0.08120379164004542, - "first_commit": "2024-05-03 14:03:23", - "latest_commit": "2024-05-04 13:26:52", + "score": -0.0862799548443837, + "first_commit": "2022-11-07 14:26:40", + "latest_commit": "2022-11-09 11:19:36", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": null, + "model_architectures": "LukeForMaskedLM" }, { "description": "HODACHI-Borea-Phi-3.5-mini-Instruct-Jp-gguf HODACHIさんが公開しているBorea-Phi-3.5-mini-Instruct-Jpのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/HODACHI-Borea-Phi-3.5-mini-Instruct-Jp-gguf", "project_name": "HODACHI-Borea-Phi-3.5-mini-Instruct-Jp-gguf", - "downloads": 1083, + "downloads": 969, "source": "Hugging Face", - "score": -0.08130984114540875, + "score": -0.08634798107187672, "first_commit": "2024-08-21 09:58:41", "latest_commit": "2024-08-21 11:08:38", "languages": [], @@ -4489,140 +4615,154 @@ "model_architectures": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-v0.1", - "project_name": "Swallow-13b-instruct-v0.1", - "downloads": 1082, + "description": "Tanuki-8x8B-dpo-v1.0-AWQ 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のAWQ 4bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-AWQ", + "project_name": "Tanuki-8x8B-dpo-v1.0-AWQ", + "downloads": 936, "source": "Hugging Face", - "score": -0.08132044609594509, - "first_commit": "2024-03-04 11:30:28", - "latest_commit": "2024-06-29 09:00:15", + "score": -0.0866686761443438, + "first_commit": "2024-08-27 09:31:22", + "latest_commit": "2024-09-03 09:26:20", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "LlamaForCausalLM" + "model_size": 6.75, + "model_architectures": "TanukiForCausalLM" }, { - "description": "Wav2Vec2-Large-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice, JSUT, TEDxJP and some other data.", - "url": "https://huggingface.co./NTQAI/wav2vec2-large-japanese", - "project_name": "wav2vec2-large-japanese", - "downloads": 1075, + "description": "t5-base-japanese-web (with Byte-fallback, 32K) Description megagonlabs/t5-base-japanese-web is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", + "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web", + "project_name": "t5-base-japanese-web", + "downloads": 935, "source": "Hugging Face", - "score": -0.08139468074969942, - "first_commit": "2021-07-05 02:44:40", - "latest_commit": "2023-02-17 13:07:47", + "score": -0.0866783941768428, + "first_commit": "2021-08-24 04:41:45", + "latest_commit": "2021-09-06 19:32:21", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "Wav2Vec2ForCTC" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "ELYZA-japanese-Llama-2-13b-fast Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-fast", - "project_name": "ELYZA-japanese-Llama-2-13b-fast", - "downloads": 1073, + "description": "rinna/nekomata-7b Overview We conduct continual pre-training of qwen-7b on 30B tokens from a mixture of Japanese and English datasets.", + "url": "https://huggingface.co./rinna/nekomata-7b", + "project_name": "nekomata-7b", + "downloads": 931, "source": "Hugging Face", - "score": -0.0814158906507721, - "first_commit": "2023-12-25 17:14:45", - "latest_commit": "2023-12-27 01:41:31", + "score": -0.08671726630683882, + "first_commit": "2023-12-19 06:58:44", + "latest_commit": "2024-07-20 08:35:21", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_size": 7.72, + "model_architectures": "QWenLMHeadModel" }, { - "description": "Llama-3.1-70B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-70B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Instruct-gguf", - "project_name": "Llama-3.1-70B-Instruct-gguf", - "downloads": 1044, + "description": "Tanuki-8x8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGGUF量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GGUF", + "project_name": "Tanuki-8x8B-dpo-v1.0-GGUF", + "downloads": 929, "source": "Hugging Face", - "score": -0.08172343421632577, - "first_commit": "2024-07-23 17:25:23", - "latest_commit": "2024-07-24 21:04:27", + "score": -0.08673670237183682, + "first_commit": "2024-08-14 18:48:45", + "latest_commit": "2024-08-29 17:42:37", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, + "model_size": 47.0, "model_architectures": null }, { - "description": "GitHub リポジトリ ids-cv/wrime で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/wrime-sentiment", - "project_name": "wrime-sentiment", - "downloads": 1039, + "description": "nlp-waseda/roberta-base-japanese Model description This is a Japanese RoBERTa base model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese", + "project_name": "roberta-base-japanese", + "downloads": 919, "source": "Hugging Face", - "score": -0.08177645896900744, - "first_commit": "2023-07-29 06:38:26", - "latest_commit": "2023-10-06 00:56:38", + "score": -0.08683388269682685, + "first_commit": "2021-12-20 05:12:06", + "latest_commit": "2022-10-21 14:46:36", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, + "model_architectures": "RobertaForMaskedLM" + }, + { + "description": "Phi-3-mini-128k-instruct-gguf microsoftさんが公開しているPhi-3-mini-128k-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Phi-3-mini-128k-instruct-gguf", + "project_name": "Phi-3-mini-128k-instruct-gguf", + "downloads": 916, + "source": "Hugging Face", + "score": -0.08686303679432385, + "first_commit": "2024-04-24 13:50:51", + "latest_commit": "2024-04-24 14:24:09", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.82, "model_architectures": null }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-debug\", split=\"test\") 概要 大喜利生成の動作確認用データセットです。", - "url": "https://huggingface.co./datasets/YANS-official/ogiri-debug", - "project_name": "ogiri-debug", - "downloads": 1038, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-hf", + "project_name": "Swallow-70b-hf", + "downloads": 907, "source": "Hugging Face", - "score": -0.08178706391954377, - "first_commit": "2024-08-30 04:18:35", - "latest_commit": "2024-08-30 14:52:03", + "score": -0.08695049908681488, + "first_commit": "2023-11-25 02:13:04", + "latest_commit": "2024-06-29 08:56:23", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "LlamaForCausalLM" }, { - "description": "GUIで動作する文書校正ツール GUI tool for textlinting.", - "url": "https://github.com/gecko655/proofreading-tool", - "project_name": "proofreading-tool", - "stargazers_count": 86, + "description": "このツールは、複数のデータセットを横断して日本語の大規模言語モデルを自動評価するものです.", + "url": "https://github.com/llm-jp/llm-jp-eval", + "project_name": "llm-jp-eval", + "stargazers_count": 84, "source": "GitHub", - "score": -0.08182230330761342, - "first_commit": "2021-04-08 12:10:36", - "latest_commit": "2024-06-22 20:18:09", + "score": -0.08704922705719899, + "first_commit": "2023-10-19 19:36:10", + "latest_commit": "2024-07-18 21:31:53", "languages": [ - "JavaScript" + "Python" ], "model_or_dataset": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-plus-hf", - "project_name": "Swallow-7b-plus-hf", - "downloads": 1022, + "description": "BERT Base Japanese for Irony", + "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-irony", + "project_name": "bert-base-japanese-sentiment-irony", + "downloads": 847, "source": "Hugging Face", - "score": -0.08195674312812512, - "first_commit": "2024-02-29 11:28:52", - "latest_commit": "2024-06-29 08:56:19", + "score": -0.08753358103675503, + "first_commit": "2022-11-07 06:29:21", + "latest_commit": "2022-11-08 04:23:27", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "BertForSequenceClassification" }, { - "description": "japanese-gpt-neox-3.6b-instruction-sft Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft", - "project_name": "japanese-gpt-neox-3.6b-instruction-sft", - "downloads": 1017, + "description": "Japanese-LLaMA-3-8B-Instruct-v2 Japanese-LLaMA-3-8B-Instruct-v2は指示実行モデル、フルモデルです。 ", + "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2", + "project_name": "japanese-llama-3-8b-instruct-v2", + "downloads": 841, "source": "Hugging Face", - "score": -0.08200976788080679, - "first_commit": "2023-05-17 02:16:28", - "latest_commit": "2024-07-20 07:56:34", + "score": -0.08759188923174904, + "first_commit": "2024-06-10 10:10:19", + "latest_commit": "2024-06-21 06:35:31", "languages": [], - "model_or_dataset": "model", - "model_size": 3.76, - "model_architectures": "GPTNeoXForCausalLM" + "model_or_dataset": "model", + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-debug\", split=\"test\") 概要 大喜利生成の動作確認用データセットです。", - "url": "https://huggingface.co./datasets/YANS-official/senryu-debug", - "project_name": "senryu-debug", - "downloads": 1017, + "description": "Japanese Laws This dataset comprises 8.75K law records retrieved from the official Japanese government website e-Gov. ", + "url": "https://huggingface.co./datasets/y2lan/japan-law", + "project_name": "japan-law", + "downloads": 840, "source": "Hugging Face", - "score": -0.08200976788080679, - "first_commit": "2024-08-30 05:47:58", - "latest_commit": "2024-09-04 10:49:15", + "score": -0.08760160726424805, + "first_commit": "2023-07-20 06:26:25", + "latest_commit": "2023-07-20 06:45:14", "languages": [], "model_or_dataset": "dataset", "model_size": null, @@ -4632,9 +4772,9 @@ "description": "PLaMo-13B-Instruct Model Description PLaMo-13B-Instruct is an instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", "url": "https://huggingface.co./pfnet/plamo-13b-instruct", "project_name": "plamo-13b-instruct", - "downloads": 987, + "downloads": 837, "source": "Hugging Face", - "score": -0.0823279163968968, + "score": -0.08763076136174507, "first_commit": "2023-10-26 02:11:24", "latest_commit": "2024-01-25 07:46:09", "languages": [], @@ -4643,54 +4783,40 @@ "model_architectures": "PlamoForCausalLM" }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-base-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-base-v1", - "project_name": "japanese-reranker-cross-encoder-base-v1", - "downloads": 980, - "source": "Hugging Face", - "score": -0.08240215105065114, - "first_commit": "2024-03-29 07:07:38", - "latest_commit": "2024-04-01 02:39:31", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" - }, - { - "description": "Tanuki-8x8B-dpo-v1.0 モデルについて Tanuki-8x8Bは、フルスクラッチで約1.7Tトークン事前学習を行った8x8Bパラメータ(総パラメータ約47B、アクティブパラメータ約13B)の大規模言語モデルです。", - "url": "https://huggingface.co./weblab-GENIAC/Tanuki-8x8B-dpo-v1.0", - "project_name": "Tanuki-8x8B-dpo-v1.0", - "downloads": 973, + "description": "(简体中文|English|日本語) Introduction github repo : https://github.com/FunAudioLLM/SenseVoice SenseVoice is a speech foundation model with multiple speech understanding capabilities, including automatic speech recognition (ASR), spoken language identification (LID), speech emotion recognition (SER), and audio event detection (AED).", + "url": "https://huggingface.co./FunAudioLLM/SenseVoiceSmall", + "project_name": "SenseVoiceSmall", + "downloads": 836, "source": "Hugging Face", - "score": -0.08247638570440548, - "first_commit": "2024-08-12 12:47:11", - "latest_commit": "2024-09-02 23:47:09", + "score": -0.08764047939424406, + "first_commit": "2024-07-03 03:56:49", + "latest_commit": "2024-07-31 05:47:48", "languages": [], "model_or_dataset": "model", - "model_size": 47.0, - "model_architectures": "TanukiForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Model Card for Japanese BART base Model description This is a Japanese BART base model pre-trained on Japanese Wikipedia.", - "url": "https://huggingface.co./ku-nlp/bart-base-japanese", - "project_name": "bart-base-japanese", - "downloads": 973, + "description": "Leia-Swallow-13B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", + "url": "https://huggingface.co./leia-llm/Leia-Swallow-13b", + "project_name": "Leia-Swallow-13b", + "downloads": 822, "source": "Hugging Face", - "score": -0.08247638570440548, - "first_commit": "2023-05-09 07:00:51", - "latest_commit": "2023-05-12 11:03:20", + "score": -0.0877765318492301, + "first_commit": "2024-04-17 07:32:11", + "latest_commit": "2024-04-18 05:21:10", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MBartForConditionalGeneration" + "model_size": 13.1, + "model_architectures": "LlamaForCausalLM" }, { "description": "OpenCALM-Medium Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", "url": "https://huggingface.co./cyberagent/open-calm-medium", "project_name": "open-calm-medium", - "downloads": 969, + "downloads": 821, "source": "Hugging Face", - "score": -0.08251880550655082, + "score": -0.0877862498817291, "first_commit": "2023-05-15 06:44:47", "latest_commit": "2023-05-18 01:10:54", "languages": [], @@ -4698,13 +4824,27 @@ "model_size": null, "model_architectures": "GPTNeoXForCausalLM" }, + { + "description": "Leia-Swallow-7B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", + "url": "https://huggingface.co./leia-llm/Leia-Swallow-7b", + "project_name": "Leia-Swallow-7b", + "downloads": 820, + "source": "Hugging Face", + "score": -0.0877959679142281, + "first_commit": "2024-04-17 07:12:28", + "latest_commit": "2024-04-17 10:29:56", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.83, + "model_architectures": "LlamaForCausalLM" + }, { "description": "japanese-large-lm-1.7b This repository provides a 1.7B parameters Japanese language model, trained by LINE Corporation.", "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b", "project_name": "japanese-large-lm-1.7b", - "downloads": 961, + "downloads": 814, "source": "Hugging Face", - "score": -0.08260364511084148, + "score": -0.08785427610922213, "first_commit": "2023-07-21 00:46:33", "latest_commit": "2023-08-17 01:06:37", "languages": [], @@ -4713,40 +4853,54 @@ "model_architectures": "GPT2LMHeadModel" }, { - "description": "nlp-waseda/roberta-base-japanese Model description This is a Japanese RoBERTa base model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese", - "project_name": "roberta-base-japanese", - "downloads": 956, + "description": "Model Card for Japanese BART base Model description This is a Japanese BART base model pre-trained on Japanese Wikipedia.", + "url": "https://huggingface.co./ku-nlp/bart-base-japanese", + "project_name": "bart-base-japanese", + "downloads": 805, "source": "Hugging Face", - "score": -0.08265666986352314, - "first_commit": "2021-12-20 05:12:06", - "latest_commit": "2022-10-21 14:46:36", + "score": -0.08794173840171314, + "first_commit": "2023-05-09 07:00:51", + "latest_commit": "2023-05-12 11:03:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": "MBartForConditionalGeneration" }, { - "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fast-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", - "downloads": 933, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-instruct-hf", + "project_name": "Swallow-7b-NVE-instruct-hf", + "downloads": 805, "source": "Hugging Face", - "score": -0.08290058372585883, - "first_commit": "2023-12-27 09:46:04", - "latest_commit": "2023-12-27 11:39:18", + "score": -0.08794173840171314, + "first_commit": "2023-12-07 02:08:59", + "latest_commit": "2024-07-06 15:18:11", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": null + "model_size": 6.74, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "DeBERTa V2 base Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", + "url": "https://huggingface.co./izumi-lab/deberta-v2-base-japanese", + "project_name": "deberta-v2-base-japanese", + "downloads": 789, + "source": "Hugging Face", + "score": -0.08809722692169719, + "first_commit": "2023-10-21 13:24:11", + "latest_commit": "2024-07-19 03:07:57", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DebertaV2ForMaskedLM" }, { "description": "Japanese-StableLM-Instruct-Alpha-7B-v2 \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-instruct-alpha-7b-v2 is a 7B parameter decoder-only language models pre-trained built on top of the Japanese-StableLM-Base-Alpha-7B model and further fine-tuned on various instruction-following datasets.", "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b-v2", "project_name": "japanese-stablelm-instruct-alpha-7b-v2", - "downloads": 903, + "downloads": 786, "source": "Hugging Face", - "score": -0.08321873224194884, + "score": -0.08812638101919419, "first_commit": "2023-10-06 08:40:24", "latest_commit": "2023-10-06 08:40:24", "languages": [], @@ -4755,292 +4909,222 @@ "model_architectures": "JapaneseStableLMAlphaForCausalLM" }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base", - "project_name": "Orion-14B-Base", - "downloads": 898, - "source": "Hugging Face", - "score": -0.08327175699463052, - "first_commit": "2024-01-16 06:07:42", - "latest_commit": "2024-03-26 09:21:52", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "OrionForCausalLM" - }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-hf", - "project_name": "Swallow-70b-hf", - "downloads": 895, - "source": "Hugging Face", - "score": -0.08330357184623952, - "first_commit": "2023-11-25 02:13:04", - "latest_commit": "2024-06-29 08:56:23", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf HODACHIさんが公開しているBorea-Phi-3.5-mini-Instruct-Commonのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf", - "project_name": "HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf", - "downloads": 892, + "description": "weblab-10b-instruction-sft-GPTQ Original model weblab-10b-instruction-sft which is a Japanese-centric multilingual GPT-NeoX model of 10 billion parameters created by matsuo-lab Takeshi Kojima.", + "url": "https://huggingface.co./dahara1/weblab-10b-instruction-sft-GPTQ", + "project_name": "weblab-10b-instruction-sft-GPTQ", + "downloads": 774, "source": "Hugging Face", - "score": -0.08333538669784851, - "first_commit": "2024-08-21 10:33:58", - "latest_commit": "2024-08-21 11:42:56", + "score": -0.08824299740918222, + "first_commit": "2023-08-21 05:45:35", + "latest_commit": "2023-11-14 00:24:22", "languages": [], "model_or_dataset": "model", - "model_size": 3.82, - "model_architectures": null + "model_size": 1.86, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "🎈 FlexDreamHK FlexDreamHKはリークされたNovelAIモデルの入っていない、あるいはそのリスクを可能な限り低くしたモデルを目指して作���しました。 ", - "url": "https://huggingface.co./den2nova/FlexDreamHK", - "project_name": "FlexDreamHK", - "downloads": 889, + "description": "This is the filtered Japanese subset of XL-Sum followed by PaLM 2 filters 15-gram overlap * code: https://gist.github.com/mkshing/d6371cbfdd50d4f352cee247fd4dd86a number of examples train: 4215 (before: 7113) validation: 758 (before: 889) test: 766 (before: 889)", + "url": "https://huggingface.co./datasets/mkshing/xlsum_ja", + "project_name": "xlsum_ja", + "downloads": 762, "source": "Hugging Face", - "score": -0.08336720154945752, - "first_commit": "2023-07-06 10:11:45", - "latest_commit": "2023-07-29 04:21:29", + "score": -0.08835961379917026, + "first_commit": "2023-06-16 04:15:41", + "latest_commit": "2023-06-20 23:28:48", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Ruri: Japanese General Text Embeddings Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-large", - "project_name": "ruri-large", - "downloads": 888, - "source": "Hugging Face", - "score": -0.08337780649999385, - "first_commit": "2024-08-28 17:11:42", - "latest_commit": "2024-09-04 08:49:10", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.337, - "model_architectures": "BertModel" - }, - { - "description": "Llama-3-ELYZA-JP-8B-AWQ Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-AWQ", - "project_name": "Llama-3-ELYZA-JP-8B-AWQ", - "downloads": 858, + "description": "Tanuki-8B-dpo-v1.0-AWQ 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のAWQ 4bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-AWQ", + "project_name": "Tanuki-8B-dpo-v1.0-AWQ", + "downloads": 761, "source": "Hugging Face", - "score": -0.08369595501608387, - "first_commit": "2024-06-25 04:31:31", - "latest_commit": "2024-06-26 02:56:39", + "score": -0.08836933183166926, + "first_commit": "2024-08-27 04:50:35", + "latest_commit": "2024-09-03 09:29:23", "languages": [], "model_or_dataset": "model", - "model_size": 1.98, + "model_size": 1.47, "model_architectures": "LlamaForCausalLM" }, { - "description": "Japanese StableLM-3B-4E1T Instruct Model Description", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-instruct", - "project_name": "japanese-stablelm-3b-4e1t-instruct", - "downloads": 844, - "source": "Hugging Face", - "score": -0.08384442432359254, - "first_commit": "2023-10-16 07:50:31", - "latest_commit": "2024-04-26 03:20:42", - "languages": [], - "model_or_dataset": "model", - "model_size": 2.8, - "model_architectures": "StableLMEpochForCausalLM" - }, - { - "description": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "project_name": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "downloads": 835, + "description": "hotchpotch/japanese-reranker-cross-encoder-large-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-large-v1", + "project_name": "japanese-reranker-cross-encoder-large-v1", + "downloads": 759, "source": "Hugging Face", - "score": -0.08393986887841955, - "first_commit": "2024-01-29 12:52:31", - "latest_commit": "2024-02-07 19:49:25", + "score": -0.08838876789666726, + "first_commit": "2024-03-28 20:53:30", + "latest_commit": "2024-04-01 02:39:45", "languages": [], "model_or_dataset": "model", - "model_size": 12.9, - "model_architectures": "GPT2LMHeadModel" + "model_size": 0.337, + "model_architectures": "BertForSequenceClassification" }, { - "description": "llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", - "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", - "project_name": "llm-jp-13b-instruct-lora-jaster-dolly-oasst-v1.0", - "downloads": 820, + "description": "GitHub リポジトリ ids-cv/wrime で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/wrime-sentiment", + "project_name": "wrime-sentiment", + "downloads": 759, "source": "Hugging Face", - "score": -0.08409894313646456, - "first_commit": "2023-10-18 19:01:48", - "latest_commit": "2023-10-20 08:41:17", + "score": -0.08838876789666726, + "first_commit": "2023-07-29 06:38:26", + "latest_commit": "2023-10-06 00:56:38", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "t5-base-japanese-web (with Byte-fallback, 32K) Description megagonlabs/t5-base-japanese-web is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", - "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web", - "project_name": "t5-base-japanese-web", - "downloads": 814, + "description": "Ninja-v1-NSFW-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFWのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-gguf", + "project_name": "Ninja-v1-NSFW-gguf", + "downloads": 756, "source": "Hugging Face", - "score": -0.08416257283968255, - "first_commit": "2021-08-24 04:41:45", - "latest_commit": "2021-09-06 19:32:21", + "score": -0.08841792199416428, + "first_commit": "2024-05-03 14:03:23", + "latest_commit": "2024-05-04 13:26:52", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 7.24, + "model_architectures": null }, { - "description": "This is the filtered Japanese subset of XL-Sum followed by PaLM 2 filters 15-gram overlap * code: https://gist.github.com/mkshing/d6371cbfdd50d4f352cee247fd4dd86a number of examples train: 4215 (before: 7113) validation: 758 (before: 889) test: 766 (before: 889)", - "url": "https://huggingface.co./datasets/mkshing/xlsum_ja", - "project_name": "xlsum_ja", - "downloads": 798, + "description": "Llama-3-ELYZA-JP-8B-gguf elyzaさんが公開しているLlama-3-ELYZA-JP-8Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3-ELYZA-JP-8B-gguf", + "project_name": "Llama-3-ELYZA-JP-8B-gguf", + "downloads": 756, "source": "Hugging Face", - "score": -0.0843322520482639, - "first_commit": "2023-06-16 04:15:41", - "latest_commit": "2023-06-20 23:28:48", + "score": -0.08841792199416428, + "first_commit": "2024-06-26 16:36:04", + "latest_commit": "2024-06-26 17:55:35", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 8.03, "model_architectures": null }, { - "description": "Corpus of Annual Reports in Japan", - "url": "https://github.com/chakki-works/CoARiJ", - "project_name": "CoARiJ", - "stargazers_count": 85, - "source": "GitHub", - "score": -0.08476866780717204, - "first_commit": "2019-09-02 14:12:48", - "latest_commit": "2020-12-19 14:00:34", - "languages": [ - "Python" - ], - "model_or_dataset": "dataset" - }, - { - "description": "shisa-base-7b-v1 shisa-base-7b-v1 takes Mistral 7B and adds an additional 8B tokens of primarily Japanese pre-training.", - "url": "https://huggingface.co./augmxnt/shisa-base-7b-v1", - "project_name": "shisa-base-7b-v1", - "downloads": 755, + "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fast-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", + "downloads": 752, "source": "Hugging Face", - "score": -0.08478826492132625, - "first_commit": "2023-11-19 09:44:36", - "latest_commit": "2023-12-09 10:34:29", + "score": -0.08845679412416028, + "first_commit": "2023-12-27 09:46:04", + "latest_commit": "2023-12-27 11:39:18", "languages": [], "model_or_dataset": "model", - "model_size": 7.96, - "model_architectures": "MistralForCausalLM" + "model_size": 13.1, + "model_architectures": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./augmxnt/shisa-7b-v1", - "project_name": "shisa-7b-v1", - "downloads": 754, + "description": "japanese-gpt-neox-3.6b-instruction-sft-v2 Overview", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft-v2", + "project_name": "japanese-gpt-neox-3.6b-instruction-sft-v2", + "downloads": 740, "source": "Hugging Face", - "score": -0.0847988698718626, - "first_commit": "2023-11-27 17:55:31", - "latest_commit": "2023-12-20 18:11:13", + "score": -0.08857341051414831, + "first_commit": "2023-05-30 01:50:25", + "latest_commit": "2024-07-20 07:57:35", "languages": [], "model_or_dataset": "model", - "model_size": 7.96, - "model_architectures": "MistralForCausalLM" + "model_size": 3.76, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "haqishen-Llama-3-8B-Japanese-Instruct-gguf haqishenさんが公開しているLlama-3-8B-Japanese-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/haqishen-Llama-3-8B-Japanese-Instruct-gguf", - "project_name": "haqishen-Llama-3-8B-Japanese-Instruct-gguf", - "downloads": 753, + "description": "Llama-3.1-70B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-70B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Instruct-gguf", + "project_name": "Llama-3.1-70B-Instruct-gguf", + "downloads": 734, "source": "Hugging Face", - "score": -0.08480947482239892, - "first_commit": "2024-04-23 13:55:17", - "latest_commit": "2024-04-23 14:54:23", + "score": -0.08863171870914233, + "first_commit": "2024-07-23 17:25:23", + "latest_commit": "2024-07-24 21:04:27", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": null, "model_architectures": null }, { - "description": "Model Card for Model ID Original model elyza/ELYZA-japanese-Llama-2-7b-fast-instruct which is based on Meta's \"Llama 2\" and has undergone additional pre-training in Japanese, and thier original post-training and speed up tuning.", - "url": "https://huggingface.co./dahara1/ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", - "downloads": 734, + "description": "このモデルは何? ", + "url": "https://huggingface.co./Lasorco/lametta", + "project_name": "lametta", + "downloads": 727, "source": "Hugging Face", - "score": -0.08501096888258927, - "first_commit": "2023-08-30 09:18:50", - "latest_commit": "2023-11-14 00:10:58", + "score": -0.08869974493663535, + "first_commit": "2023-03-28 14:29:55", + "latest_commit": "2023-11-08 07:37:12", "languages": [], "model_or_dataset": "model", - "model_size": 1.24, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Tanuki-8x8B-dpo-v1.0-AWQ 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のAWQ 4bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-AWQ", - "project_name": "Tanuki-8x8B-dpo-v1.0-AWQ", - "downloads": 732, + "description": "DataPilot-ArrowPro-7B-KUJIRA-gguf DataPilotさんが公開しているArrowPro-7B-KUJIRAのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-KUJIRA-gguf", + "project_name": "DataPilot-ArrowPro-7B-KUJIRA-gguf", + "downloads": 717, "source": "Hugging Face", - "score": -0.08503217878366193, - "first_commit": "2024-08-27 09:31:22", - "latest_commit": "2024-09-03 09:26:20", + "score": -0.08879692526162537, + "first_commit": "2024-05-09 13:21:27", + "latest_commit": "2024-05-11 07:24:16", "languages": [], "model_or_dataset": "model", - "model_size": 6.75, - "model_architectures": "TanukiForCausalLM" + "model_size": 7.24, + "model_architectures": null }, { - "description": "weblab-10b-instruction-sft-GPTQ Original model weblab-10b-instruction-sft which is a Japanese-centric multilingual GPT-NeoX model of 10 billion parameters created by matsuo-lab Takeshi Kojima.", - "url": "https://huggingface.co./dahara1/weblab-10b-instruction-sft-GPTQ", - "project_name": "weblab-10b-instruction-sft-GPTQ", - "downloads": 723, + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./augmxnt/shisa-7b-v1", + "project_name": "shisa-7b-v1", + "downloads": 716, "source": "Hugging Face", - "score": -0.08512762333848894, - "first_commit": "2023-08-21 05:45:35", - "latest_commit": "2023-11-14 00:24:22", + "score": -0.08880664329412438, + "first_commit": "2023-11-27 17:55:31", + "latest_commit": "2023-12-20 18:11:13", "languages": [], "model_or_dataset": "model", - "model_size": 1.86, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 7.96, + "model_architectures": "MistralForCausalLM" }, { - "description": "Tanuki-8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGGUF量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GGUF", - "project_name": "Tanuki-8B-dpo-v1.0-GGUF", - "downloads": 720, + "description": "albert-base-japanese-v1 日本語事前学習済みALBERTモデルです", + "url": "https://huggingface.co./ken11/albert-base-japanese-v1", + "project_name": "albert-base-japanese-v1", + "downloads": 710, "source": "Hugging Face", - "score": -0.08515943819009794, - "first_commit": "2024-08-14 15:05:50", - "latest_commit": "2024-08-27 18:00:44", + "score": -0.0888649514891184, + "first_commit": "2021-12-19 17:07:14", + "latest_commit": "2021-12-22 03:04:30", "languages": [], "model_or_dataset": "model", - "model_size": 7.51, - "model_architectures": null + "model_size": null, + "model_architectures": "AlbertForMaskedLM" }, { - "description": "japanese-stablelm-2-instruct-1_6b-gguf stabilityaiさんが公開しているjapanese-stablelm-2-instruct-1_6bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/japanese-stablelm-2-instruct-1_6b-gguf", - "project_name": "japanese-stablelm-2-instruct-1_6b-gguf", - "downloads": 708, + "description": "Wav2Vec2-Large-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice, JSUT, TEDxJP and some other data.", + "url": "https://huggingface.co./NTQAI/wav2vec2-large-japanese", + "project_name": "wav2vec2-large-japanese", + "downloads": 691, "source": "Hugging Face", - "score": -0.08528669759653394, - "first_commit": "2024-05-11 07:26:43", - "latest_commit": "2024-05-11 09:56:19", + "score": -0.08904959410659945, + "first_commit": "2021-07-05 02:44:40", + "latest_commit": "2023-02-17 13:07:47", "languages": [], "model_or_dataset": "model", - "model_size": 1.64, - "model_architectures": null + "model_size": null, + "model_architectures": "Wav2Vec2ForCTC" }, { "description": "Llama-3-Swallow-8B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-8B-Instruct-v0.1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Llama-3-Swallow-8B-Instruct-v0.1-gguf", "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-gguf", - "downloads": 705, + "downloads": 681, "source": "Hugging Face", - "score": -0.08531851244814295, + "score": -0.08914677443158947, "first_commit": "2024-07-01 16:42:54", "latest_commit": "2024-07-02 10:43:55", "languages": [], @@ -5049,26 +5133,40 @@ "model_architectures": null }, { - "description": "Japanese-LLaMA-3-8B-Instruct-v2 Japanese-LLaMA-3-8B-Instruct-v2は指示実行モデル、フルモデルです。 ", - "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2", - "project_name": "japanese-llama-3-8b-instruct-v2", - "downloads": 705, + "description": "Japanese StableLM-3B-4E1T Instruct Model Description", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-instruct", + "project_name": "japanese-stablelm-3b-4e1t-instruct", + "downloads": 673, "source": "Hugging Face", - "score": -0.08531851244814295, - "first_commit": "2024-06-10 10:10:19", - "latest_commit": "2024-06-21 06:35:31", + "score": -0.08922451869158149, + "first_commit": "2023-10-16 07:50:31", + "latest_commit": "2024-04-26 03:20:42", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 2.8, + "model_architectures": "StableLMEpochForCausalLM" + }, + { + "description": "Model Card for Model ID Original model elyza/ELYZA-japanese-Llama-2-7b-fast-instruct which is based on Meta's \"Llama 2\" and has undergone additional pre-training in Japanese, and thier original post-training and speed up tuning.", + "url": "https://huggingface.co./dahara1/ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", + "downloads": 673, + "source": "Hugging Face", + "score": -0.08922451869158149, + "first_commit": "2023-08-30 09:18:50", + "latest_commit": "2023-11-14 00:10:58", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.24, "model_architectures": "LlamaForCausalLM" }, { "description": "ODEX is an Open-Domain EXecution-based NL-to-Code generation data benchmark.", "url": "https://huggingface.co./datasets/neulab/odex", "project_name": "odex", - "downloads": 694, + "downloads": 673, "source": "Hugging Face", - "score": -0.08543516690404263, + "score": -0.08922451869158149, "first_commit": "2023-01-06 14:30:00", "latest_commit": "2023-02-10 18:01:34", "languages": [], @@ -5077,40 +5175,82 @@ "model_architectures": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-v0.1", - "project_name": "Swallow-70b-instruct-v0.1", - "downloads": 693, + "description": "オリジナルのサイトと同じものを使用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/llm-jp-eval", + "project_name": "llm-jp-eval", + "downloads": 665, "source": "Hugging Face", - "score": -0.08544577185457895, - "first_commit": "2024-03-06 14:39:34", - "latest_commit": "2024-06-29 09:00:17", + "score": -0.08930226295157351, + "first_commit": "2024-06-19 10:31:57", + "latest_commit": "2024-08-31 12:40:31", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "🎈 FlexDreamHK FlexDreamHKはリークされたNovelAIモデルの入っていない、あるいはそのリスクを可能な限り低くしたモデルを目指して作成しました。 ", + "url": "https://huggingface.co./den2nova/FlexDreamHK", + "project_name": "FlexDreamHK", + "downloads": 661, + "source": "Hugging Face", + "score": -0.08934113508156952, + "first_commit": "2023-07-06 10:11:45", + "latest_commit": "2023-07-29 04:21:29", "languages": [], "model_or_dataset": "model", - "model_size": 69.2, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Japanese-StableLM-Instruct-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-7b is a 7B-parameter decoder-only language model based on", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-7b", - "project_name": "japanese-stablelm-instruct-beta-7b", - "downloads": 667, + "description": "shisa-base-7b-v1 shisa-base-7b-v1 takes Mistral 7B and adds an additional 8B tokens of primarily Japanese pre-training.", + "url": "https://huggingface.co./augmxnt/shisa-base-7b-v1", + "project_name": "shisa-base-7b-v1", + "downloads": 658, "source": "Hugging Face", - "score": -0.08572150056852364, - "first_commit": "2023-10-30 07:47:09", - "latest_commit": "2023-12-19 06:43:49", + "score": -0.08937028917906653, + "first_commit": "2023-11-19 09:44:36", + "latest_commit": "2023-12-09 10:34:29", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" + "model_size": 7.96, + "model_architectures": "MistralForCausalLM" + }, + { + "description": "hotchpotch/japanese-bge-reranker-v2-m3-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-bge-reranker-v2-m3-v1", + "project_name": "japanese-bge-reranker-v2-m3-v1", + "downloads": 653, + "source": "Hugging Face", + "score": -0.08941887934156154, + "first_commit": "2024-03-28 20:45:16", + "latest_commit": "2024-04-01 02:40:22", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.5680000000000001, + "model_architectures": "XLMRobertaForSequenceClassification" + }, + { + "description": "bert-base-japanese-v3-unsup-simcse-jawiki 「大規模言語モデル入門」の第8章で紹介している教師なしSimCSEのモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-unsup-simcse-jawiki", + "project_name": "bert-base-japanese-v3-unsup-simcse-jawiki", + "downloads": 644, + "source": "Hugging Face", + "score": -0.08950634163405256, + "first_commit": "2023-06-21 10:52:27", + "latest_commit": "2023-07-24 07:07:44", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertModel" }, { "description": "gpt2-large-japanese This repository provides a large sized Japanese GPT-2 model.", "url": "https://huggingface.co./abeja/gpt2-large-japanese", "project_name": "gpt2-large-japanese", - "downloads": 667, + "downloads": 635, "source": "Hugging Face", - "score": -0.08572150056852364, + "score": -0.08959380392654359, "first_commit": "2022-08-29 05:17:36", "latest_commit": "2022-08-29 16:10:11", "languages": [], @@ -5118,13 +5258,27 @@ "model_size": null, "model_architectures": "GPT2LMHeadModel" }, + { + "description": "Polyglot-math-4x7b-24b Polyglot-4x7b is a Mixture of Experts approach to a multilingual model.", + "url": "https://huggingface.co./macadeliccc/polyglot-math-4x7b", + "project_name": "polyglot-math-4x7b", + "downloads": 616, + "source": "Hugging Face", + "score": -0.08977844654402464, + "first_commit": "2024-01-13 03:05:44", + "latest_commit": "2024-03-04 19:25:12", + "languages": [], + "model_or_dataset": "model", + "model_size": 24.2, + "model_architectures": "MixtralForCausalLM" + }, { "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-instruct-hf", "project_name": "Swallow-70b-NVE-instruct-hf", - "downloads": 667, + "downloads": 607, "source": "Hugging Face", - "score": -0.08572150056852364, + "score": -0.08986590883651566, "first_commit": "2023-12-13 03:56:30", "latest_commit": "2024-07-06 15:18:24", "languages": [], @@ -5133,40 +5287,82 @@ "model_architectures": "LlamaForCausalLM" }, { - "description": "Polyglot-math-4x7b-24b Polyglot-4x7b is a Mixture of Experts approach to a multilingual model.", - "url": "https://huggingface.co./macadeliccc/polyglot-math-4x7b", - "project_name": "polyglot-math-4x7b", - "downloads": 657, + "description": "bilingual-gpt-neox-4b-instruction-ppo Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", + "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-instruction-ppo", + "project_name": "bilingual-gpt-neox-4b-instruction-ppo", + "downloads": 604, + "source": "Hugging Face", + "score": -0.08989506293401267, + "first_commit": "2023-08-02 05:56:07", + "latest_commit": "2024-07-20 08:05:14", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.95, + "model_architectures": "GPTNeoXForCausalLM" + }, + { + "description": "Dataset Summary RealPersonaChat は,話者本人のペルソナと性格特性を含む,約14,000件の日本語雑談対話からなるコーパスです.", + "url": "https://huggingface.co./datasets/nu-dialogue/real-persona-chat", + "project_name": "real-persona-chat", + "downloads": 602, + "source": "Hugging Face", + "score": -0.08991449899901068, + "first_commit": "2024-03-09 22:52:22", + "latest_commit": "2024-03-13 10:26:42", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-large-medium", + "project_name": "t5-large-medium", + "downloads": 601, + "source": "Hugging Face", + "score": -0.08992421703150968, + "first_commit": "2023-04-26 08:31:45", + "latest_commit": "2023-05-10 10:00:45", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" + }, + { + "description": "alabnii/jmedroberta-base-manbyo-wordpiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece", + "project_name": "jmedroberta-base-manbyo-wordpiece", + "downloads": 600, "source": "Hugging Face", - "score": -0.08582755007388697, - "first_commit": "2024-01-13 03:05:44", - "latest_commit": "2024-03-04 19:25:12", + "score": -0.08993393506400868, + "first_commit": "2022-12-22 17:17:03", + "latest_commit": "2023-03-08 01:44:36", "languages": [], "model_or_dataset": "model", - "model_size": 24.2, - "model_architectures": "MixtralForCausalLM" + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "Ninja-v1-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-gguf", - "project_name": "Ninja-v1-gguf", - "downloads": 648, + "description": "HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf HODACHIさんが公開しているBorea-Phi-3.5-mini-Instruct-Commonのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf", + "project_name": "HODACHI-Borea-Phi-3.5-mini-Instruct-Common-gguf", + "downloads": 600, "source": "Hugging Face", - "score": -0.08592299462871397, - "first_commit": "2024-05-03 14:03:22", - "latest_commit": "2024-05-04 13:26:22", + "score": -0.08993393506400868, + "first_commit": "2024-08-21 10:33:58", + "latest_commit": "2024-08-21 11:42:56", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 3.82, "model_architectures": null }, { "description": "rinna/nekomata-14b Overview We conduct continual pre-training of qwen-14b on 66B tokens from a mixture of Japanese and English datasets.", "url": "https://huggingface.co./rinna/nekomata-14b", "project_name": "nekomata-14b", - "downloads": 646, + "downloads": 597, "source": "Hugging Face", - "score": -0.08594420452978664, + "score": -0.0899630891615057, "first_commit": "2023-12-19 08:09:53", "latest_commit": "2024-07-22 07:58:40", "languages": [], @@ -5175,154 +5371,182 @@ "model_architectures": "QWenLMHeadModel" }, { - "description": "stockmark/stockmark-100b Stockmark-100b is a 100 billion parameter LLM pretrained from scratch based on Japanese and English corpus of about 910 billion tokens.", - "url": "https://huggingface.co./stockmark/stockmark-100b", - "project_name": "stockmark-100b", - "downloads": 645, + "description": "llm-lora-classification", + "url": "https://github.com/hppRC/llm-lora-classification", + "project_name": "llm-lora-classification", + "stargazers_count": 83, + "source": "GitHub", + "score": -0.09000727953684241, + "first_commit": "2023-07-17 12:42:57", + "latest_commit": "2023-07-22 19:46:45", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, + { + "description": "Word2vec (word to vectors) approach for Japanese language using Gensim and Mecab.", + "url": "https://github.com/philipperemy/japanese-words-to-vectors", + "project_name": "japanese-words-to-vectors", + "stargazers_count": 83, + "source": "GitHub", + "score": -0.09000727953684241, + "first_commit": "2016-09-04 09:43:00", + "latest_commit": "2020-08-09 19:48:23", + "languages": [ + "Python" + ], + "model_or_dataset": "model" + }, + { + "description": "bert-base-japanese-v3-jnli 「大規模言語モデル入門」の第5章で紹介している(自然言語推論)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jnli", + "project_name": "bert-base-japanese-v3-jnli", + "downloads": 592, "source": "Hugging Face", - "score": -0.08595480948032298, - "first_commit": "2024-05-13 09:31:40", - "latest_commit": "2024-05-15 06:18:10", + "score": -0.0900116793240007, + "first_commit": "2023-06-12 14:15:16", + "latest_commit": "2023-07-24 06:49:14", "languages": [], "model_or_dataset": "model", - "model_size": 96.2, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "BertForSequenceClassification" }, { - "description": "Tanuki-8x8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGGUF量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GGUF", - "project_name": "Tanuki-8x8B-dpo-v1.0-GGUF", - "downloads": 627, + "description": "nlp-waseda/roberta-large-japanese-seq512-with-auto-jumanpp Model description", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512-with-auto-jumanpp", + "project_name": "roberta-large-japanese-seq512-with-auto-jumanpp", + "downloads": 591, "source": "Hugging Face", - "score": -0.08614569858997699, - "first_commit": "2024-08-14 18:48:45", - "latest_commit": "2024-08-29 17:42:37", + "score": -0.09002139735649971, + "first_commit": "2022-10-15 06:04:06", + "latest_commit": "2022-10-21 15:56:38", "languages": [], "model_or_dataset": "model", - "model_size": 47.0, - "model_architectures": null + "model_size": null, + "model_architectures": "RobertaForMaskedLM" }, { - "description": "このモデルは何? ", - "url": "https://huggingface.co./Lasorco/lametta", - "project_name": "lametta", - "downloads": 619, + "description": "Converted from clu-ling/whisper-large-v2-japanese-5k-steps using CTranslate2.", + "url": "https://huggingface.co./zh-plus/faster-whisper-large-v2-japanese-5k-steps", + "project_name": "faster-whisper-large-v2-japanese-5k-steps", + "downloads": 587, "source": "Hugging Face", - "score": -0.08623053819426765, - "first_commit": "2023-03-28 14:29:55", - "latest_commit": "2023-11-08 07:37:12", + "score": -0.09006026948649572, + "first_commit": "2023-07-03 08:29:37", + "latest_commit": "2023-07-03 18:42:31", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "japanese-gpt-neox-3.6b-instruction-sft-v2 Overview", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft-v2", - "project_name": "japanese-gpt-neox-3.6b-instruction-sft-v2", - "downloads": 616, + "description": "Sarashina1-7B This repository provides Japanese language models trained by SB Intuitions.", + "url": "https://huggingface.co./sbintuitions/sarashina1-7b", + "project_name": "sarashina1-7b", + "downloads": 585, "source": "Hugging Face", - "score": -0.08626235304587666, - "first_commit": "2023-05-30 01:50:25", - "latest_commit": "2024-07-20 07:57:35", + "score": -0.09007970555149372, + "first_commit": "2024-06-07 10:13:21", + "latest_commit": "2024-06-27 06:55:38", "languages": [], "model_or_dataset": "model", - "model_size": 3.76, + "model_size": null, "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "rinna-llama-3-youko-8b-gguf rinnaさんが公開しているllama-3-youko-8bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-8b-gguf", - "project_name": "rinna-llama-3-youko-8b-gguf", - "downloads": 601, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GGUF", + "project_name": "japanese-stablelm-instruct-beta-70B-GGUF", + "downloads": 573, "source": "Hugging Face", - "score": -0.08642142730392167, - "first_commit": "2024-05-01 14:17:53", - "latest_commit": "2024-05-01 15:11:21", + "score": -0.09019632194148175, + "first_commit": "2023-11-02 15:45:24", + "latest_commit": "2023-11-02 18:22:05", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 69.0, "model_architectures": null }, { - "description": "albert-base-japanese-v1 日本語事前学習済みALBERTモデルです", - "url": "https://huggingface.co./ken11/albert-base-japanese-v1", - "project_name": "albert-base-japanese-v1", - "downloads": 601, + "description": "Kotoba-Whisper-v2.1 Kotoba-Whisper-v2.1 is a Japanese ASR model based on kotoba-tech/kotoba-whisper-v2.0, with additional postprocessing stacks integrated as pipeline.", + "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v2.1", + "project_name": "kotoba-whisper-v2.1", + "downloads": 571, "source": "Hugging Face", - "score": -0.08642142730392167, - "first_commit": "2021-12-19 17:07:14", - "latest_commit": "2021-12-22 03:04:30", + "score": -0.09021575800647975, + "first_commit": "2024-09-17 14:19:45", + "latest_commit": "2024-09-20 01:55:12", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "AlbertForMaskedLM" + "model_size": 0.756, + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "PLaMo-13B-Instruct-NC Model Description PLaMo-13B-Instruct-NC is a noncommercial instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", - "url": "https://huggingface.co./pfnet/plamo-13b-instruct-nc", - "project_name": "plamo-13b-instruct-nc", - "downloads": 596, + "description": "Japanese CLIP ViT-H/14 (Wider) Table of Contents Overview Usage Model Details Evaluation Limitations and Biases Citation See Also Contact Information Overview Developed by:", + "url": "https://huggingface.co./hakuhodo-tech/japanese-clip-vit-h-14-bert-wider", + "project_name": "japanese-clip-vit-h-14-bert-wider", + "downloads": 570, "source": "Hugging Face", - "score": -0.08647445205660334, - "first_commit": "2023-10-26 05:36:26", - "latest_commit": "2024-01-25 07:46:45", + "score": -0.09022547603897876, + "first_commit": "2024-03-06 03:30:25", + "latest_commit": "2024-03-06 21:46:11", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "PlamoForCausalLM" + "model_size": 0.91, + "model_architectures": "CustomCLIPModel" }, { - "description": "※llama.cpp Releases b3428(7/21)", - "url": "https://huggingface.co./MCZK/EZO-Common-9B-gemma-2-it-GGUF", - "project_name": "EZO-Common-9B-gemma-2-it-GGUF", - "downloads": 586, + "description": "ELYZA-japanese-Llama-2-7b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fastのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-gguf", + "downloads": 568, "source": "Hugging Face", - "score": -0.08658050156196667, - "first_commit": "2024-07-10 11:12:59", - "latest_commit": "2024-07-21 11:26:08", + "score": -0.09024491210397677, + "first_commit": "2023-08-29 07:23:20", + "latest_commit": "2023-11-16 14:27:36", "languages": [], "model_or_dataset": "model", - "model_size": 9.24, + "model_size": 6.85, "model_architectures": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GGUF", - "project_name": "japanese-stablelm-instruct-beta-70B-GGUF", - "downloads": 586, + "description": "recruit-jp/japanese-typo-detector-roberta-base モデルの概要 日本語の文章を入力すると各文字ごとに誤字脱字である確率を出力します 各ラベルの意味は以下の通りです id label meaning 0 OK 誤字なし 1 deletion 1文字の抜け 2 insertion_a 余分な1文字の挿入 3 insertion_b 直前の文字列と一致する2文字以上の余分な文字の挿入 4 kanji-conversion_a 同一の読みを持つ漢字の入れ替え(誤変換) 5 kanji-conversion_b 近い読みを持つ漢字の入れ替え(誤変換) 6 substitution 1文字の入れ替え 7 transposition 隣接する2文字間の転置 8 others その他の入力誤り 誤り種類の詳細については学習データセットの元論文をご参照ください 日本語 Wikipedia の編集履歴に基づく 入力誤りデータセットと訂正システムの改良 その他、モデルの詳細については当社ブログ記事をご参照ください 誤字脱字検出モデルをHugging Face Hubに公開しました (Re", + "url": "https://huggingface.co./recruit-jp/japanese-typo-detector-roberta-base", + "project_name": "japanese-typo-detector-roberta-base", + "downloads": 567, "source": "Hugging Face", - "score": -0.08658050156196667, - "first_commit": "2023-11-02 15:45:24", - "latest_commit": "2023-11-02 18:22:05", + "score": -0.09025463013647576, + "first_commit": "2023-11-09 06:27:40", + "latest_commit": "2023-12-21 03:07:31", "languages": [], "model_or_dataset": "model", - "model_size": 69.0, - "model_architectures": null + "model_size": 0.0996, + "model_architectures": "RobertaForTokenClassification" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-hf", - "project_name": "Swallow-70b-NVE-hf", - "downloads": 583, + "description": "Japanese-StableLM-Instruct-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-7b is a 7B-parameter decoder-only language model based on", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-7b", + "project_name": "japanese-stablelm-instruct-beta-7b", + "downloads": 566, "source": "Hugging Face", - "score": -0.08661231641357568, - "first_commit": "2023-12-07 07:34:35", - "latest_commit": "2024-06-29 08:56:25", + "score": -0.09026434816897477, + "first_commit": "2023-10-30 07:47:09", + "latest_commit": "2023-12-19 06:43:49", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 6.74, "model_architectures": "LlamaForCausalLM" }, { - "description": "ichikara-instruction (Non Commercial) LLMのための日本語インストラクションデータ 公開ページ 公開ページより、 本データに関して、言語処理学会第30回年次大会において発表を行います。", - "url": "https://huggingface.co./datasets/p1atdev/ichikara-instruction", - "project_name": "ichikara-instruction", - "downloads": 580, + "description": "Overview This dataset provides a convenient and user-friendly format of data from Aozora Bunko (青空文庫), a website that compiles public-domain books in Japan, ideal for Machine Learning applications.", + "url": "https://huggingface.co./datasets/globis-university/aozorabunko-clean", + "project_name": "aozorabunko-clean", + "downloads": 566, "source": "Hugging Face", - "score": -0.08664413126518468, - "first_commit": "2024-03-12 07:09:56", - "latest_commit": "2024-03-12 08:36:40", + "score": -0.09026434816897477, + "first_commit": "2023-06-26 13:31:28", + "latest_commit": "2023-10-27 13:22:32", "languages": [], "model_or_dataset": "dataset", "model_size": null, @@ -5332,9 +5556,9 @@ "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-v0.1", "project_name": "karakuri-lm-70b-v0.1", - "downloads": 576, + "downloads": 562, "source": "Hugging Face", - "score": -0.08668655106733002, + "score": -0.09030322029897078, "first_commit": "2024-01-26 10:49:53", "latest_commit": "2024-05-07 09:00:06", "languages": [], @@ -5343,110 +5567,152 @@ "model_architectures": "LlamaForCausalLM" }, { - "description": "stockmark/gpt-neox-japanese-1.4b This repository provides a GPT-NeoX based model with 1.4B parameters pre-trained on Japanese corpus of about 20B tokens.", - "url": "https://huggingface.co./stockmark/gpt-neox-japanese-1.4b", - "project_name": "gpt-neox-japanese-1.4b", - "downloads": 569, + "description": "This is a Japanese translated version of HumanEval, an evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\".", + "url": "https://huggingface.co./datasets/kogi-jwu/jhumaneval", + "project_name": "jhumaneval", + "downloads": 562, "source": "Hugging Face", - "score": -0.08676078572108435, - "first_commit": "2023-08-06 07:37:38", - "latest_commit": "2023-09-07 03:44:19", + "score": -0.09030322029897078, + "first_commit": "2023-10-21 08:20:14", + "latest_commit": "2024-01-10 21:52:35", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "Model Card for Japanese character-level DeBERTa V2 tiny Model description This is a Japanese DeBERTa V2 tiny model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese-char-wwm", + "project_name": "deberta-v2-tiny-japanese-char-wwm", + "downloads": 552, + "source": "Hugging Face", + "score": -0.0904004006239608, + "first_commit": "2023-01-05 08:48:29", + "latest_commit": "2023-03-23 07:31:19", "languages": [], "model_or_dataset": "model", - "model_size": 1.44, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 0.0101, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "japanese-large-lm-1.7b-instruction-sft This repository provides a 1.7B parameters Japanese language model, fine-tuned and trained by LINE Corporation.", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft", - "project_name": "japanese-large-lm-1.7b-instruction-sft", - "downloads": 566, + "description": "ELYZA-japanese-CodeLlama-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-gguf", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-gguf", + "downloads": 551, "source": "Hugging Face", - "score": -0.08679260057269335, - "first_commit": "2023-08-14 17:19:11", - "latest_commit": "2023-08-14 17:19:11", + "score": -0.09041011865645981, + "first_commit": "2023-11-15 09:48:32", + "latest_commit": "2023-11-16 14:28:24", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 6.74, + "model_architectures": null + }, + { + "description": "Japanese StableLM-3B-4E1T Base Model Description This is a 3B-parameter decoder-only language model with a focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-base", + "project_name": "japanese-stablelm-3b-4e1t-base", + "downloads": 549, + "source": "Hugging Face", + "score": -0.09042955472145782, + "first_commit": "2023-10-16 06:04:58", + "latest_commit": "2024-04-26 03:20:34", + "languages": [], + "model_or_dataset": "model", + "model_size": 2.8, + "model_architectures": "StableLMEpochForCausalLM" + }, + { + "description": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "project_name": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "downloads": 547, + "source": "Hugging Face", + "score": -0.09044899078645582, + "first_commit": "2024-01-29 12:52:31", + "latest_commit": "2024-02-07 19:49:25", + "languages": [], + "model_or_dataset": "model", + "model_size": 12.9, "model_architectures": "GPT2LMHeadModel" }, { - "description": "[Llama-3-EZO model card]", - "url": "https://huggingface.co./AXCXEPT/Llama-3-EZO-8b-Common-it", - "project_name": "Llama-3-EZO-8b-Common-it", - "downloads": 564, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-hf", + "project_name": "Swallow-70b-NVE-hf", + "downloads": 546, "source": "Hugging Face", - "score": -0.08681381047376602, - "first_commit": "2024-07-13 06:42:31", - "latest_commit": "2024-08-23 10:52:05", + "score": -0.09045870881895482, + "first_commit": "2023-12-07 07:34:35", + "latest_commit": "2024-06-29 08:56:25", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": null, "model_architectures": "LlamaForCausalLM" }, { - "description": "オリジナルのサイトと同じものを使用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/llm-jp-eval", - "project_name": "llm-jp-eval", - "downloads": 564, + "description": "Llama-3.1-70B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-70B-EZO-1.1-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-70B-EZO-1.1-it-gguf", + "project_name": "Llama-3.1-70B-EZO-1.1-it-gguf", + "downloads": 542, "source": "Hugging Face", - "score": -0.08681381047376602, - "first_commit": "2024-06-19 10:31:57", - "latest_commit": "2024-08-31 12:40:31", + "score": -0.09049758094895083, + "first_commit": "2024-07-31 12:12:13", + "latest_commit": "2024-07-31 21:47:25", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "bilingual-gpt-neox-4b-instruction-ppo Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", - "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-instruction-ppo", - "project_name": "bilingual-gpt-neox-4b-instruction-ppo", - "downloads": 563, + "description": "japanese-large-lm-1.7b-instruction-sft This repository provides a 1.7B parameters Japanese language model, fine-tuned and trained by LINE Corporation.", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft", + "project_name": "japanese-large-lm-1.7b-instruction-sft", + "downloads": 528, "source": "Hugging Face", - "score": -0.08682441542430236, - "first_commit": "2023-08-02 05:56:07", - "latest_commit": "2024-07-20 08:05:14", + "score": -0.09063363340393688, + "first_commit": "2023-08-14 17:19:11", + "latest_commit": "2023-08-14 17:19:11", "languages": [], "model_or_dataset": "model", - "model_size": 3.95, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": null, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "DataPilot-ArrowPro-7B-KUJIRA-gguf DataPilotさんが公開しているArrowPro-7B-KUJIRAのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-KUJIRA-gguf", - "project_name": "DataPilot-ArrowPro-7B-KUJIRA-gguf", - "downloads": 560, + "description": "[Llama-3.1-70B-EZO-1.1-it] Model Card モデル情報 / Model Information このモデルは、Meta AI の Llama 3.1 をベースに、日本語タスクでの性能を向上させるためにファインチューニングを行ったものです。", + "url": "https://huggingface.co./AXCXEPT/Llama-3.1-70B-EZO-1.1-it", + "project_name": "Llama-3.1-70B-EZO-1.1-it", + "downloads": 526, "source": "Hugging Face", - "score": -0.08685623027591136, - "first_commit": "2024-05-09 13:21:27", - "latest_commit": "2024-05-11 07:24:16", + "score": -0.09065306946893488, + "first_commit": "2024-07-29 01:35:35", + "latest_commit": "2024-08-23 10:52:31", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": 70.6, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Sarashina1-7B This repository provides Japanese language models trained by SB Intuitions.", - "url": "https://huggingface.co./sbintuitions/sarashina1-7b", - "project_name": "sarashina1-7b", - "downloads": 560, + "description": "stockmark/gpt-neox-japanese-1.4b This repository provides a GPT-NeoX based model with 1.4B parameters pre-trained on Japanese corpus of about 20B tokens.", + "url": "https://huggingface.co./stockmark/gpt-neox-japanese-1.4b", + "project_name": "gpt-neox-japanese-1.4b", + "downloads": 520, "source": "Hugging Face", - "score": -0.08685623027591136, - "first_commit": "2024-06-07 10:13:21", - "latest_commit": "2024-06-27 06:55:38", + "score": -0.0907113776639289, + "first_commit": "2023-08-06 07:37:38", + "latest_commit": "2023-09-07 03:44:19", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 1.44, "model_architectures": "GPTNeoXForCausalLM" }, { "description": "オリジナルのサイトと同じものを使用しています。 ", "url": "https://huggingface.co./datasets/llm-book/livedoor-news-corpus", "project_name": "livedoor-news-corpus", - "downloads": 559, + "downloads": 517, "source": "Hugging Face", - "score": -0.08686683522644768, + "score": -0.0907405317614259, "first_commit": "2023-06-21 07:16:52", "latest_commit": "2023-12-12 11:19:43", "languages": [], @@ -5455,82 +5721,40 @@ "model_architectures": null }, { - "description": "ELYZA-japanese-Llama-2-7b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fastのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-gguf", - "downloads": 557, - "source": "Hugging Face", - "score": -0.08688804512752035, - "first_commit": "2023-08-29 07:23:20", - "latest_commit": "2023-11-16 14:27:36", - "languages": [], - "model_or_dataset": "model", - "model_size": 6.85, - "model_architectures": null - }, - { - "description": "Japanese-StableLM-Base-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-ja_vocab-beta-7b", - "project_name": "japanese-stablelm-base-ja_vocab-beta-7b", - "downloads": 555, + "description": "PLaMo-13B-Instruct-NC Model Description PLaMo-13B-Instruct-NC is a noncommercial instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", + "url": "https://huggingface.co./pfnet/plamo-13b-instruct-nc", + "project_name": "plamo-13b-instruct-nc", + "downloads": 514, "source": "Hugging Face", - "score": -0.08690925502859302, - "first_commit": "2023-10-30 07:49:15", - "latest_commit": "2023-12-19 06:45:58", + "score": -0.09076968585892291, + "first_commit": "2023-10-26 05:36:26", + "latest_commit": "2024-01-25 07:46:45", "languages": [], "model_or_dataset": "model", - "model_size": 6.88, - "model_architectures": "LlamaForCausalLM" + "model_size": 13.1, + "model_architectures": "PlamoForCausalLM" }, { - "description": "Qwen1.5-110B-Chat-gguf Qwenさんが公開しているQwen1.5-110B-Chatのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Qwen1.5-110B-Chat-gguf", - "project_name": "Qwen1.5-110B-Chat-gguf", - "downloads": 554, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-shashin\", split=\"train\") 概要 株式会社東建コーポレーションが運営するホームメイト・リサーチによる『ホームメイト川柳大賞』のうち、お題が画像形式で提供される『写真川柳』に関するクロールデータです。", + "url": "https://huggingface.co./datasets/YANS-official/senryu-shashin", + "project_name": "senryu-shashin", + "downloads": 509, "source": "Hugging Face", - "score": -0.08691985997912936, - "first_commit": "2024-04-27 19:35:48", - "latest_commit": "2024-04-28 08:09:17", + "score": -0.09081827602141791, + "first_commit": "2024-08-28 18:50:08", + "latest_commit": "2024-08-31 03:47:50", "languages": [], - "model_or_dataset": "model", - "model_size": 111.0, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, - { - "description": "bilingual-gpt-neox-4b-8k Overview Notice: This model requires transformers>=4.31.0 to work properly.", - "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-8k", - "project_name": "bilingual-gpt-neox-4b-8k", - "downloads": 554, - "source": "Hugging Face", - "score": -0.08691985997912936, - "first_commit": "2023-07-31 02:34:21", - "latest_commit": "2024-07-20 08:03:16", - "languages": [], - "model_or_dataset": "model", - "model_size": 3.95, - "model_architectures": "GPTNeoXForCausalLM" - }, - { - "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", - "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-chat-v0.1", - "project_name": "karakuri-lm-70b-chat-v0.1", - "downloads": 550, - "source": "Hugging Face", - "score": -0.0869622797812747, - "first_commit": "2024-01-26 09:08:09", - "latest_commit": "2024-05-07 09:00:17", - "languages": [], - "model_or_dataset": "model", - "model_size": 69.2, - "model_architectures": "LlamaForCausalLM" - }, { "description": "Sarashina1-65B", "url": "https://huggingface.co./sbintuitions/sarashina1-65b", "project_name": "sarashina1-65b", - "downloads": 546, + "downloads": 507, "source": "Hugging Face", - "score": -0.08700469958342003, + "score": -0.09083771208641593, "first_commit": "2024-06-07 11:57:56", "latest_commit": "2024-06-27 06:56:36", "languages": [], @@ -5539,194 +5763,152 @@ "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "DeBERTa V2 base Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", - "url": "https://huggingface.co./izumi-lab/deberta-v2-base-japanese", - "project_name": "deberta-v2-base-japanese", - "downloads": 542, - "source": "Hugging Face", - "score": -0.08704711938556536, - "first_commit": "2023-10-21 13:24:11", - "latest_commit": "2024-07-19 03:07:57", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" - }, - { - "description": "Japanese CLIP ViT-H/14 (Wider) Table of Contents Overview Usage Model Details Evaluation Limitations and Biases Citation See Also Contact Information Overview Developed by:", - "url": "https://huggingface.co./hakuhodo-tech/japanese-clip-vit-h-14-bert-wider", - "project_name": "japanese-clip-vit-h-14-bert-wider", - "downloads": 540, + "description": "Japanese-StableLM-Base-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-ja_vocab-beta-7b", + "project_name": "japanese-stablelm-base-ja_vocab-beta-7b", + "downloads": 504, "source": "Hugging Face", - "score": -0.08706832928663803, - "first_commit": "2024-03-06 03:30:25", - "latest_commit": "2024-03-06 21:46:11", + "score": -0.09086686618391293, + "first_commit": "2023-10-30 07:49:15", + "latest_commit": "2023-12-19 06:45:58", "languages": [], "model_or_dataset": "model", - "model_size": 0.91, - "model_architectures": "CustomCLIPModel" + "model_size": 6.88, + "model_architectures": "LlamaForCausalLM" }, { - "description": "(简体中文|English|日本語) Introduction github repo : https://github.com/FunAudioLLM/SenseVoice SenseVoice is a speech foundation model with multiple speech understanding capabilities, including automatic speech recognition (ASR), spoken language identification (LID), speech emotion recognition (SER), and audio event detection (AED).", - "url": "https://huggingface.co./FunAudioLLM/SenseVoiceSmall", - "project_name": "SenseVoiceSmall", - "downloads": 539, + "description": "Model Card for Japanese character-level GPT-2 Small Model description This is a Japanese character-level GPT-2 Small (90M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/gpt2-small-japanese-char", + "project_name": "gpt2-small-japanese-char", + "downloads": 502, "source": "Hugging Face", - "score": -0.08707893423717437, - "first_commit": "2024-07-03 03:56:49", - "latest_commit": "2024-07-31 05:47:48", + "score": -0.09088630224891094, + "first_commit": "2023-04-18 08:24:55", + "latest_commit": "2023-05-08 10:08:13", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 0.10300000000000001, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf aixsatoshiさんが公開しているLlama-3-8b-Cosmopedia-japaneseのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", - "project_name": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", - "downloads": 536, + "description": "※llama.cpp Releases b3428(7/21)", + "url": "https://huggingface.co./MCZK/EZO-Common-9B-gemma-2-it-GGUF", + "project_name": "EZO-Common-9B-gemma-2-it-GGUF", + "downloads": 487, "source": "Hugging Face", - "score": -0.08711074908878337, - "first_commit": "2024-05-01 12:36:43", - "latest_commit": "2024-05-19 08:27:21", + "score": -0.09103207273639598, + "first_commit": "2024-07-10 11:12:59", + "latest_commit": "2024-07-21 11:26:08", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 9.24, "model_architectures": null }, { - "description": "Model Card for Japanese character-level DeBERTa V2 tiny Model description This is a Japanese DeBERTa V2 tiny model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese-char-wwm", - "project_name": "deberta-v2-tiny-japanese-char-wwm", - "downloads": 534, - "source": "Hugging Face", - "score": -0.08713195898985604, - "first_commit": "2023-01-05 08:48:29", - "latest_commit": "2023-03-23 07:31:19", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.0101, - "model_architectures": "DebertaV2ForMaskedLM" - }, - { - "description": "bert-base-japanese-v3-jnli 「大規模言語モデル入門」の第5章で紹介している(自然言語推論)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jnli", - "project_name": "bert-base-japanese-v3-jnli", - "downloads": 527, + "description": "Sarashina1-13B", + "url": "https://huggingface.co./sbintuitions/sarashina1-13b", + "project_name": "sarashina1-13b", + "downloads": 485, "source": "Hugging Face", - "score": -0.08720619364361037, - "first_commit": "2023-06-12 14:15:16", - "latest_commit": "2023-07-24 06:49:14", + "score": -0.09105150880139398, + "first_commit": "2024-06-07 11:56:53", + "latest_commit": "2024-06-27 06:56:06", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "hotchpotch/japanese-bge-reranker-v2-m3-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-bge-reranker-v2-m3-v1", - "project_name": "japanese-bge-reranker-v2-m3-v1", - "downloads": 518, + "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with LlamaEdge LlamaEdge version: v0.10.1 and above Prompt template Prompt type: llama-3-chat Prompt string <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|><|start_header", + "url": "https://huggingface.co./second-state/Llama-3-8B-Japanese-Instruct-GGUF", + "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", + "downloads": 482, "source": "Hugging Face", - "score": -0.08730163819843738, - "first_commit": "2024-03-28 20:45:16", - "latest_commit": "2024-04-01 02:40:22", + "score": -0.091080662898891, + "first_commit": "2024-05-14 05:37:53", + "latest_commit": "2024-05-14 06:42:38", "languages": [], "model_or_dataset": "model", - "model_size": 0.5680000000000001, - "model_architectures": "XLMRobertaForSequenceClassification" + "model_size": 8.03, + "model_architectures": null }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-shashin\", split=\"train\") 概要 株式会社東建コーポレーションが運営するホームメイト・リサーチによる『ホームメイト川柳大賞』のうち、お題が画像形式で提供される『写真川柳』に関するクロールデータです。", - "url": "https://huggingface.co./datasets/YANS-official/senryu-shashin", - "project_name": "senryu-shashin", - "downloads": 507, + "description": "Qwen1.5-110B-Chat-gguf Qwenさんが公開しているQwen1.5-110B-Chatのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Qwen1.5-110B-Chat-gguf", + "project_name": "Qwen1.5-110B-Chat-gguf", + "downloads": 472, "source": "Hugging Face", - "score": -0.08741829265433705, - "first_commit": "2024-08-28 18:50:08", - "latest_commit": "2024-08-31 03:47:50", + "score": -0.09117784322388102, + "first_commit": "2024-04-27 19:35:48", + "latest_commit": "2024-04-28 08:09:17", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 111.0, "model_architectures": null }, { - "description": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-T2-2B-gemma-2-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", - "project_name": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", - "downloads": 502, + "description": "bert-base-japanese-wikipedia-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-wikipedia-ud-head", + "project_name": "bert-base-japanese-wikipedia-ud-head", + "downloads": 472, "source": "Hugging Face", - "score": -0.08747131740701872, - "first_commit": "2024-08-01 17:32:31", - "latest_commit": "2024-08-01 18:38:31", + "score": -0.09117784322388102, + "first_commit": "2022-06-20 21:58:53", + "latest_commit": "2023-03-04 20:16:55", "languages": [], "model_or_dataset": "model", - "model_size": 2.61, - "model_architectures": null + "model_size": null, + "model_architectures": "BertForQuestionAnswering" }, { - "description": "ku-nlp/roberta-base-japanese-char-wwm Model description This is a Japanese RoBERTa base model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./ku-nlp/roberta-base-japanese-char-wwm", - "project_name": "roberta-base-japanese-char-wwm", - "downloads": 501, + "description": "Stockmark-13b-instruct Stockmark-13b-instruct is an instruction-tuned version of Stockmark-13b, a 13 billion parameter Japanese LLM.", + "url": "https://huggingface.co./stockmark/stockmark-13b-instruct", + "project_name": "stockmark-13b-instruct", + "downloads": 470, "source": "Hugging Face", - "score": -0.08748192235755506, - "first_commit": "2022-09-20 05:07:34", - "latest_commit": "2023-03-20 08:05:45", + "score": -0.09119727928887902, + "first_commit": "2023-11-08 16:56:34", + "latest_commit": "2023-11-08 17:02:17", "languages": [], "model_or_dataset": "model", - "model_size": 0.1, - "model_architectures": "RobertaForMaskedLM" + "model_size": 13.2, + "model_architectures": "LlamaForCausalLM" }, { "description": "stockmark-gpt-neox-japanese-1.4b-gguf stockmarkさんが公開しているgpt-neox-japanese-1.4bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/stockmark-gpt-neox-japanese-1.4b-gguf", "project_name": "stockmark-gpt-neox-japanese-1.4b-gguf", - "downloads": 496, + "downloads": 470, "source": "Hugging Face", - "score": -0.08753494711023672, + "score": -0.09119727928887902, "first_commit": "2023-08-22 12:45:18", "latest_commit": "2023-09-08 22:00:37", "languages": [], "model_or_dataset": "model", - "model_size": 1.41, + "model_size": null, "model_architectures": null }, { - "description": "ELYZA-japanese-Llama-2-7b-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-gguf", - "downloads": 495, + "description": "haqishen-Llama-3-8B-Japanese-Instruct-gguf haqishenさ���が公開しているLlama-3-8B-Japanese-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/haqishen-Llama-3-8B-Japanese-Instruct-gguf", + "project_name": "haqishen-Llama-3-8B-Japanese-Instruct-gguf", + "downloads": 469, "source": "Hugging Face", - "score": -0.08754555206077305, - "first_commit": "2023-08-29 06:32:01", - "latest_commit": "2023-11-16 14:27:12", + "score": -0.09120699732137802, + "first_commit": "2024-04-23 13:55:17", + "latest_commit": "2024-04-23 14:54:23", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, + "model_size": 8.03, "model_architectures": null }, - { - "description": "alabnii/jmedroberta-base-manbyo-wordpiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece", - "project_name": "jmedroberta-base-manbyo-wordpiece", - "downloads": 494, - "source": "Hugging Face", - "score": -0.0875561570113094, - "first_commit": "2022-12-22 17:17:03", - "latest_commit": "2023-03-08 01:44:36", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForMaskedLM" - }, { "description": "Japanese-StableLM-Instruct-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on japanese-stablelm-ja_vocab-beta-7b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-ja_vocab-beta-7b", "project_name": "japanese-stablelm-instruct-ja_vocab-beta-7b", - "downloads": 482, + "downloads": 460, "source": "Hugging Face", - "score": -0.0876834164177454, + "score": -0.09129445961386905, "first_commit": "2023-10-30 07:49:38", "latest_commit": "2023-12-19 06:46:01", "languages": [], @@ -5734,55 +5916,13 @@ "model_size": 6.88, "model_architectures": "LlamaForCausalLM" }, - { - "description": "このツールは、複数のデータセットを横断して日本語の大規模言語モデルを自動評価するものです.", - "url": "https://github.com/llm-jp/llm-jp-eval", - "project_name": "llm-jp-eval", - "stargazers_count": 84, - "source": "GitHub", - "score": -0.08771503230673067, - "first_commit": "2023-10-19 19:36:10", - "latest_commit": "2024-07-18 21:31:53", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, - { - "description": "To load a language pair which isn't part of the config, all you need to do is specify the language code as pairs.", - "url": "https://huggingface.co./datasets/Helsinki-NLP/tatoeba", - "project_name": "tatoeba", - "downloads": 479, - "source": "Hugging Face", - "score": -0.0877152312693544, - "first_commit": "2022-01-25 16:36:30", - "latest_commit": "2024-01-18 11:16:48", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, - { - "description": "Japanese StableLM-3B-4E1T Base Model Description This is a 3B-parameter decoder-only language model with a focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-base", - "project_name": "japanese-stablelm-3b-4e1t-base", - "downloads": 478, - "source": "Hugging Face", - "score": -0.08772583621989073, - "first_commit": "2023-10-16 06:04:58", - "latest_commit": "2024-04-26 03:20:34", - "languages": [], - "model_or_dataset": "model", - "model_size": 2.8, - "model_architectures": "StableLMEpochForCausalLM" - }, { "description": "This repository contains some GGUF quantizations of the merge of the VNTL LLaMA 3 8B qlora.", "url": "https://huggingface.co./lmg-anon/vntl-llama3-8b-gguf", "project_name": "vntl-llama3-8b-gguf", - "downloads": 477, + "downloads": 459, "source": "Hugging Face", - "score": -0.08773644117042706, + "score": -0.09130417764636806, "first_commit": "2024-06-13 17:17:30", "latest_commit": "2024-06-15 17:33:02", "languages": [], @@ -5790,27 +5930,13 @@ "model_size": 8.03, "model_architectures": null }, - { - "description": "japanese-gpt-neox-small This repository provides a small-sized Japanese GPT-NeoX model.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-small", - "project_name": "japanese-gpt-neox-small", - "downloads": 477, - "source": "Hugging Face", - "score": -0.08773644117042706, - "first_commit": "2022-08-31 05:58:25", - "latest_commit": "2024-07-20 07:53:40", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.20400000000000001, - "model_architectures": "GPTNeoXForCausalLM" - }, { "description": "This repository is publicly accessible, but you have to accept the conditions to access its files and content.", "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b", "project_name": "japanese-stablelm-instruct-alpha-7b", - "downloads": 473, + "downloads": 458, "source": "Hugging Face", - "score": -0.0877788609725724, + "score": -0.09131389567886705, "first_commit": null, "latest_commit": null, "languages": [], @@ -5819,656 +5945,586 @@ "model_architectures": null }, { - "description": "Japanese-Starling-ChatV-7B-GGUF GGUF conversion of \"Japanese-Starling-ChatV-7B\" \"Japanese-Starling-ChatV-7B\" is a Japanese chat model built on top of \"chatntq-ja-7b-v1.0\", originally based on Mistral-7B-v0.1.", - "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B-GGUF", - "project_name": "Japanese-Starling-ChatV-7B-GGUF", - "downloads": 473, - "source": "Hugging Face", - "score": -0.0877788609725724, - "first_commit": "2024-04-14 12:42:01", - "latest_commit": "2024-04-20 01:23:10", - "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null - }, - { - "description": "このモデルはluke-japanese-baseをファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-ner", - "project_name": "luke-japanese-base-finetuned-ner", - "downloads": 471, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-NVE-hf", + "project_name": "Swallow-13b-NVE-hf", + "downloads": 455, "source": "Hugging Face", - "score": -0.08780007087364507, - "first_commit": "2023-01-17 23:36:52", - "latest_commit": "2023-05-12 00:36:17", + "score": -0.09134304977636407, + "first_commit": "2024-01-30 11:39:05", + "latest_commit": "2024-06-29 08:56:22", "languages": [], "model_or_dataset": "model", - "model_size": 0.279, - "model_architectures": "LukeForTokenClassification" + "model_size": null, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Llama-3.1-70B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-70B-EZO-1.1-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-70B-EZO-1.1-it-gguf", - "project_name": "Llama-3.1-70B-EZO-1.1-it-gguf", - "downloads": 465, + "description": "ELYZA-japanese-Llama-2-7b-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-gguf", + "downloads": 451, "source": "Hugging Face", - "score": -0.08786370057686307, - "first_commit": "2024-07-31 12:12:13", - "latest_commit": "2024-07-31 21:47:25", + "score": -0.09138192190636008, + "first_commit": "2023-08-29 06:32:01", + "latest_commit": "2023-11-16 14:27:12", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, + "model_size": 6.74, "model_architectures": null }, { - "description": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf umiyukiさんが公開しているUmievo-itr012-Gleipnir-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf", - "project_name": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf", - "downloads": 461, + "description": "rinna-llama-3-youko-8b-gguf rinnaさんが公開しているllama-3-youko-8bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-8b-gguf", + "project_name": "rinna-llama-3-youko-8b-gguf", + "downloads": 446, "source": "Hugging Face", - "score": -0.08790612037900841, - "first_commit": "2024-05-29 15:05:32", - "latest_commit": "2024-05-29 15:53:40", + "score": -0.09143051206885508, + "first_commit": "2024-05-01 14:17:53", + "latest_commit": "2024-05-01 15:11:21", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 8.03, "model_architectures": null }, { - "description": "This is a model for named entity recognition of Japanese medical documents.", - "url": "https://huggingface.co./sociocom/MedNER-CR-JA", - "project_name": "MedNER-CR-JA", - "downloads": 460, - "source": "Hugging Face", - "score": -0.08791672532954474, - "first_commit": "2022-08-23 03:30:43", - "latest_commit": "2024-07-31 07:44:00", - "languages": [], - "model_or_dataset": "model", - "model_size": 0.11, - "model_architectures": "BertForTokenClassification" - }, - { - "description": "Sarashina1-13B", - "url": "https://huggingface.co./sbintuitions/sarashina1-13b", - "project_name": "sarashina1-13b", - "downloads": 460, - "source": "Hugging Face", - "score": -0.08791672532954474, - "first_commit": "2024-06-07 11:56:53", - "latest_commit": "2024-06-27 06:56:06", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPTNeoXForCausalLM" - }, - { - "description": "Overview This dataset provides a convenient and user-friendly format of data from Aozora Bunko (青空文庫), a website that compiles public-domain books in Japan, ideal for Machine Learning applications.", - "url": "https://huggingface.co./datasets/globis-university/aozorabunko-clean", - "project_name": "aozorabunko-clean", - "downloads": 458, + "description": "To load a language pair which isn't part of the config, all you need to do is specify the language code as pairs.", + "url": "https://huggingface.co./datasets/Helsinki-NLP/tatoeba", + "project_name": "tatoeba", + "downloads": 443, "source": "Hugging Face", - "score": -0.08793793523061741, - "first_commit": "2023-06-26 13:31:28", - "latest_commit": "2023-10-27 13:22:32", + "score": -0.0914596661663521, + "first_commit": "2022-01-25 16:36:30", + "latest_commit": "2024-01-18 11:16:48", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-base", - "project_name": "luke-japanese-base", - "downloads": 455, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-test\", split=\"test\") 概要 川柳投稿サイトの『写真川柳』と『川柳投稿まるせん』のクロールデータ、および YANS 委員が作成したデータを含みます。 ", + "url": "https://huggingface.co./datasets/YANS-official/senryu-test", + "project_name": "senryu-test", + "downloads": 442, "source": "Hugging Face", - "score": -0.0879697500822264, - "first_commit": "2022-10-25 06:30:23", - "latest_commit": "2022-11-09 15:23:20", + "score": -0.09146938419885109, + "first_commit": "2024-09-03 15:02:47", + "latest_commit": "2024-09-09 05:53:26", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "LukeForMaskedLM" + "model_architectures": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-NVE-hf", - "project_name": "Swallow-13b-NVE-hf", - "downloads": 453, + "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b-instruct", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct", + "downloads": 435, "source": "Hugging Face", - "score": -0.08799095998329907, - "first_commit": "2024-01-30 11:39:05", - "latest_commit": "2024-06-29 08:56:22", + "score": -0.09153741042634411, + "first_commit": "2023-11-07 12:04:07", + "latest_commit": "2023-11-17 05:01:00", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 6.74, "model_architectures": "LlamaForCausalLM" }, { - "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", - "url": "https://huggingface.co./fishaudio/fish-speech-1.2", - "project_name": "fish-speech-1.2", - "downloads": 449, + "description": "JA-VG-VQA-500 Dataset Description JA-VG-VQA-500 is a 500-sample subset of Japanese Visual Genome VQA dataset.", + "url": "https://huggingface.co./datasets/SakanaAI/JA-VG-VQA-500", + "project_name": "JA-VG-VQA-500", + "downloads": 434, "source": "Hugging Face", - "score": -0.08803337978544441, - "first_commit": "2024-07-02 04:24:09", - "latest_commit": "2024-07-02 04:31:26", + "score": -0.09154712845884312, + "first_commit": "2024-03-21 09:51:10", + "latest_commit": "2024-05-14 04:11:31", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "ELYZA-japanese-CodeLlama-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-gguf", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-gguf", - "downloads": 448, + "description": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-T2-2B-gemma-2-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", + "project_name": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", + "downloads": 433, "source": "Hugging Face", - "score": -0.08804398473598075, - "first_commit": "2023-11-15 09:48:32", - "latest_commit": "2023-11-16 14:28:24", + "score": -0.09155684649134212, + "first_commit": "2024-08-01 17:32:31", + "latest_commit": "2024-08-01 18:38:31", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, + "model_size": 2.61, "model_architectures": null }, { - "description": "bert-base-japanese-wikipedia-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-wikipedia-ud-head", - "project_name": "bert-base-japanese-wikipedia-ud-head", - "downloads": 447, + "description": "このモデルはluke-japanese-baseをファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-ner", + "project_name": "luke-japanese-base-finetuned-ner", + "downloads": 428, "source": "Hugging Face", - "score": -0.08805458968651708, - "first_commit": "2022-06-20 21:58:53", - "latest_commit": "2023-03-04 20:16:55", + "score": -0.09160543665383714, + "first_commit": "2023-01-17 23:36:52", + "latest_commit": "2023-05-12 00:36:17", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForQuestionAnswering" - }, - { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-test\", split=\"test\") 概要 川柳投稿サイトの『写真川柳』と『川柳投稿まるせん』のクロールデータ、および YANS 委員が作成したデータを含みます。 ", - "url": "https://huggingface.co./datasets/YANS-official/senryu-test", - "project_name": "senryu-test", - "downloads": 440, - "source": "Hugging Face", - "score": -0.08812882434027142, - "first_commit": "2024-09-03 15:02:47", - "latest_commit": "2024-09-09 05:53:26", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_size": 0.279, + "model_architectures": "LukeForTokenClassification" }, { - "description": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf umiyukiさんが公開しているJapanese-Chat-Umievo-itr001-7bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", - "project_name": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", - "downloads": 426, + "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of June 25, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", + "url": "https://huggingface.co./sazyou-roukaku/LittleStepMix", + "project_name": "LittleStepMix", + "downloads": 413, "source": "Hugging Face", - "score": -0.08827729364778009, - "first_commit": "2024-04-27 09:55:39", - "latest_commit": "2024-04-27 10:52:17", + "score": -0.09175120714132218, + "first_commit": "2023-06-25 06:57:42", + "latest_commit": "2023-07-04 10:47:46", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": null, "model_architectures": null }, { - "description": "ELYZA-japanese-Llama-2-13b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fastのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-gguf", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-gguf", - "downloads": 424, + "description": "Ninja-v1-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-gguf", + "project_name": "Ninja-v1-gguf", + "downloads": 403, "source": "Hugging Face", - "score": -0.08829850354885276, - "first_commit": "2023-12-27 10:40:52", - "latest_commit": "2023-12-27 13:18:46", + "score": -0.0918483874663122, + "first_commit": "2024-05-03 14:03:22", + "latest_commit": "2024-05-04 13:26:22", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, + "model_size": 7.24, "model_architectures": null }, { - "description": "JQaRA : Japanese Question Answering with Retrieval Augmentation - 検索拡張(RAG)評価のための日本語 Q&A データセット 高性能な LLM の台頭に伴い、LLM を用いた質疑応答のユースケースが増加しています。", - "url": "https://huggingface.co./datasets/hotchpotch/JQaRA", - "project_name": "JQaRA", - "downloads": 421, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-marusen\", split=\"train\") 概要 月に1万句以上の投稿がある国内最大級の川柳投稿サイト『川柳投稿まるせん』のクロールデータです。", + "url": "https://huggingface.co./datasets/YANS-official/senryu-marusen", + "project_name": "senryu-marusen", + "downloads": 400, "source": "Hugging Face", - "score": -0.08833031840046177, - "first_commit": "2024-03-03 01:58:34", - "latest_commit": "2024-08-10 02:56:05", + "score": -0.0918775415638092, + "first_commit": "2024-08-28 18:49:03", + "latest_commit": "2024-08-30 11:41:46", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Japanese Natural Language Inference Model", - "url": "https://huggingface.co./cyberagent/xlm-roberta-large-jnli-jsick", - "project_name": "xlm-roberta-large-jnli-jsick", - "downloads": 418, + "description": "Dataset Details Dataset Type:Japanese LLaVA Instruct 150K is a localized version of the original LLaVA Visual Instruct 150K dataset.", + "url": "https://huggingface.co./datasets/turing-motors/LLaVA-Instruct-150K-JA", + "project_name": "LLaVA-Instruct-150K-JA", + "downloads": 395, "source": "Hugging Face", - "score": -0.08836213325207076, - "first_commit": "2022-12-23 10:51:12", - "latest_commit": "2022-12-23 10:51:12", + "score": -0.09192613172630422, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "XLMRobertaForSequenceClassification" + "model_architectures": null }, { - "description": "rinna/japanese-hubert-large Overview This is a Japanese HuBERT Large model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-hubert-large", - "project_name": "japanese-hubert-large", - "downloads": 414, + "description": "mt5_summarize_japanese (Japanese caption : 日本語の要約のモデル)", + "url": "https://huggingface.co./tsmatz/mt5_summarize_japanese", + "project_name": "mt5_summarize_japanese", + "downloads": 393, "source": "Hugging Face", - "score": -0.0884045530542161, - "first_commit": "2024-03-05 10:24:37", - "latest_commit": "2024-07-22 08:12:21", + "score": -0.09194556779130222, + "first_commit": "2022-11-26 10:51:27", + "latest_commit": "2024-07-12 00:01:31", "languages": [], "model_or_dataset": "model", - "model_size": 0.315, - "model_architectures": "HubertModel" + "model_size": 0.3, + "model_architectures": "MT5ForConditionalGeneration" }, { - "description": "This is a Japanese sentence-LUKE model.", - "url": "https://huggingface.co./cheonboy/sentence_embedding_japanese", - "project_name": "sentence_embedding_japanese", - "downloads": 413, + "description": "日本語T5 Prefix Language Model", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-adapt", + "project_name": "t5-base-japanese-adapt", + "downloads": 391, "source": "Hugging Face", - "score": -0.08841515800475243, - "first_commit": "2023-10-05 05:10:25", - "latest_commit": "2023-10-05 05:13:09", + "score": -0.09196500385630023, + "first_commit": "2022-08-27 08:51:11", + "latest_commit": "2022-11-05 09:34:10", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LukeModel" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of June 25, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", - "url": "https://huggingface.co./sazyou-roukaku/LittleStepMix", - "project_name": "LittleStepMix", - "downloads": 411, + "description": "llm-book/t5-base-long-livedoor-news-corpus 「大規模言語モデル入門」の第7章で紹介している要約生成のモデルです。 ", + "url": "https://huggingface.co./llm-book/t5-base-long-livedoor-news-corpus", + "project_name": "t5-base-long-livedoor-news-corpus", + "downloads": 389, "source": "Hugging Face", - "score": -0.0884363679058251, - "first_commit": "2023-06-25 06:57:42", - "latest_commit": "2023-07-04 10:47:46", + "score": -0.09198443992129823, + "first_commit": "2023-06-27 13:32:54", + "latest_commit": "2023-07-25 13:10:36", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "hubert-base-jtube This repo provides model weights for the hubert-base model trained on the JTubeSpeech corpus. ", - "url": "https://huggingface.co./sarulab-speech/hubert-base-jtube", - "project_name": "hubert-base-jtube", - "downloads": 410, + "description": "japanese-gpt-neox-small This repository provides a small-sized Japanese GPT-NeoX model.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-small", + "project_name": "japanese-gpt-neox-small", + "downloads": 385, "source": "Hugging Face", - "score": -0.08844697285636144, - "first_commit": "2024-02-02 04:15:22", - "latest_commit": "2024-02-05 11:49:57", + "score": -0.09202331205129424, + "first_commit": "2022-08-31 05:58:25", + "latest_commit": "2024-07-20 07:53:40", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "HubertModel" - }, - { - "description": "This is a Japanese translated version of HumanEval, an evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\".", - "url": "https://huggingface.co./datasets/kogi-jwu/jhumaneval", - "project_name": "jhumaneval", - "downloads": 410, - "source": "Hugging Face", - "score": -0.08844697285636144, - "first_commit": "2023-10-21 08:20:14", - "latest_commit": "2024-01-10 21:52:35", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_size": 0.20400000000000001, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-13b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", - "downloads": 409, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GGUF", + "project_name": "japanese-stablelm-instruct-gamma-7B-GGUF", + "downloads": 384, "source": "Hugging Face", - "score": -0.08845757780689777, - "first_commit": "2024-05-02 14:18:27", - "latest_commit": "2024-05-03 04:36:24", + "score": -0.09203303008379325, + "first_commit": "2023-10-28 19:03:17", + "latest_commit": "2023-10-28 19:07:41", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, + "model_size": 7.24, "model_architectures": null }, { - "description": "JaCWIR: Japanese Casual Web IR - 日本語情報検索評価のための小規模でカジュアルなWebタイトルと概要のデータセット ���年、大規模言語モデル(LLM)の台頭により、一般的な日本語を用いた自然な検索クエリで質問するユースケースが増えています。", - "url": "https://huggingface.co./datasets/hotchpotch/JaCWIR", - "project_name": "JaCWIR", - "downloads": 407, + "description": "Githubリポジトリstockmarkteam/ner-wikipedia-datasetで公開されているデータセットを利用しています。", + "url": "https://huggingface.co./datasets/llm-book/ner-wikipedia-dataset", + "project_name": "ner-wikipedia-dataset", + "downloads": 375, "source": "Hugging Face", - "score": -0.08847878770797044, - "first_commit": "2024-03-23 05:57:58", - "latest_commit": "2024-04-01 02:34:34", + "score": -0.09212049237628427, + "first_commit": "2023-04-15 10:43:21", + "latest_commit": "2023-12-12 11:25:51", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GGUF", - "project_name": "japanese-stablelm-instruct-gamma-7B-GGUF", - "downloads": 406, + "description": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf umiyukiさんが公開しているUmievo-itr012-Gleipnir-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf", + "project_name": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf", + "downloads": 370, "source": "Hugging Face", - "score": -0.08848939265850678, - "first_commit": "2023-10-28 19:03:17", - "latest_commit": "2023-10-28 19:07:41", + "score": -0.09216908253877928, + "first_commit": "2024-05-29 15:05:32", + "latest_commit": "2024-05-29 15:53:40", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "bert-base-japanese-v3-unsup-simcse-jawiki 「大規模言語モデル入門」の第8章で紹介している教師なしSimCSEのモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-unsup-simcse-jawiki", - "project_name": "bert-base-japanese-v3-unsup-simcse-jawiki", - "downloads": 405, + "description": "Additional pretrained BERT base Japanese finance This is a BERT model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/bert-base-japanese-fin-additional", + "project_name": "bert-base-japanese-fin-additional", + "downloads": 370, "source": "Hugging Face", - "score": -0.0884999976090431, - "first_commit": "2023-06-21 10:52:27", - "latest_commit": "2023-07-24 07:07:44", + "score": -0.09216908253877928, + "first_commit": "2022-03-11 17:41:11", + "latest_commit": "2022-12-09 00:40:25", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertModel" + "model_architectures": "BertForPreTraining" }, { - "description": "Model Card for Japanese BART large Model description", - "url": "https://huggingface.co./ku-nlp/bart-large-japanese", - "project_name": "bart-large-japanese", - "downloads": 400, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-large-short", + "project_name": "t5-large-short", + "downloads": 369, "source": "Hugging Face", - "score": -0.08855302236172477, - "first_commit": "2023-05-09 07:44:59", - "latest_commit": "2023-05-12 11:05:03", + "score": -0.09217880057127828, + "first_commit": "2023-04-26 08:18:58", + "latest_commit": "2023-05-10 10:00:54", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MBartForConditionalGeneration" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Phi-3-medium-128k-instruct-gguf microsoftさんが公開しているPhi-3-medium-128k-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Phi-3-medium-128k-instruct-gguf", - "project_name": "Phi-3-medium-128k-instruct-gguf", - "downloads": 399, + "description": "Tanuki-8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0-4kのGGUF量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-4k-GGUF", + "project_name": "Tanuki-8B-dpo-v1.0-4k-GGUF", + "downloads": 369, "source": "Hugging Face", - "score": -0.08856362731226111, - "first_commit": "2024-05-22 15:27:33", - "latest_commit": "2024-05-22 16:56:55", + "score": -0.09217880057127828, + "first_commit": "2024-08-16 12:39:31", + "latest_commit": "2024-08-27 18:05:25", "languages": [], "model_or_dataset": "model", - "model_size": 14.0, + "model_size": 7.51, "model_architectures": null }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/senryu-marusen\", split=\"train\") 概要 月に1万句以上の投稿がある国内最大級の川柳投稿サイト『川柳投稿まるせん』のクロールデータです。", - "url": "https://huggingface.co./datasets/YANS-official/senryu-marusen", - "project_name": "senryu-marusen", - "downloads": 398, + "description": "What’s this?", + "url": "https://huggingface.co./globis-university/deberta-v3-japanese-base", + "project_name": "deberta-v3-japanese-base", + "downloads": 369, "source": "Hugging Face", - "score": -0.08857423226279744, - "first_commit": "2024-08-28 18:49:03", - "latest_commit": "2024-08-30 11:41:46", + "score": -0.09217880057127828, + "first_commit": "2023-09-21 16:19:31", + "latest_commit": "2024-07-05 05:49:13", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "recruit-jp/japanese-typo-detector-roberta-base モデルの概要 日本語の文章を入力すると各文字ごとに誤字脱字である確率を出力します 各ラベルの意味は以下の通りです id label meaning 0 OK 誤字なし 1 deletion 1文字の抜け 2 insertion_a 余分な1文字の挿入 3 insertion_b 直前の文字列と一致する2文字以上の余分な文字の挿入 4 kanji-conversion_a 同一の読みを持つ漢字の入れ替え(誤変換) 5 kanji-conversion_b 近い読みを持つ漢字の入れ替え(誤変換) 6 substitution 1文字の入れ替え 7 transposition 隣接する2文字間の転置 8 others その他の入力誤り 誤り種類の詳細については学習データセットの元論文をご参照ください 日本語 Wikipedia の編集履歴に基づく 入力誤りデータセットと訂正システムの改良 その他、モデルの詳細については当社ブログ記事をご参照ください 誤字脱字検出モデルをHugging Face Hubに公開しました (Re", - "url": "https://huggingface.co./recruit-jp/japanese-typo-detector-roberta-base", - "project_name": "japanese-typo-detector-roberta-base", - "downloads": 397, + "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", + "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese", + "project_name": "wav2vec2-large-xlsr-japanese", + "downloads": 359, "source": "Hugging Face", - "score": -0.08858483721333378, - "first_commit": "2023-11-09 06:27:40", - "latest_commit": "2023-12-21 03:07:31", + "score": -0.09227598089626832, + "first_commit": "2021-03-28 04:21:20", + "latest_commit": "2023-02-08 00:15:23", "languages": [], "model_or_dataset": "model", - "model_size": 0.0996, - "model_architectures": "RobertaForTokenClassification" + "model_size": 0.318, + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-GGUF", - "project_name": "japanese-stablelm-instruct-beta-7B-GGUF", - "downloads": 397, + "description": "alabnii/jmedroberta-base-sentencepiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece-vocab50000", + "project_name": "jmedroberta-base-sentencepiece-vocab50000", + "downloads": 358, "source": "Hugging Face", - "score": -0.08858483721333378, - "first_commit": "2023-11-03 01:04:31", - "latest_commit": "2023-11-03 12:54:55", + "score": -0.09228569892876731, + "first_commit": "2022-12-22 17:22:14", + "latest_commit": "2023-06-27 03:44:17", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": null - }, - { - "description": "Japanese Laws This dataset comprises 8.75K law records retrieved from the official Japanese government website e-Gov. ", - "url": "https://huggingface.co./datasets/y2lan/japan-law", - "project_name": "japan-law", - "downloads": 397, - "source": "Hugging Face", - "score": -0.08858483721333378, - "first_commit": "2023-07-20 06:26:25", - "latest_commit": "2023-07-20 06:45:14", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_size": 0.124, + "model_architectures": "BertForMaskedLM" }, { - "description": "Llama3-ArrowSE-8B-v0.3-gguf DataPilotさんが公開しているLlama3-ArrowSE-8B-v0.3のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama3-ArrowSE-8B-v0.3-gguf", - "project_name": "Llama3-ArrowSE-8B-v0.3-gguf", - "downloads": 384, + "description": "Model Card for Japanese DeBERTa V2 tiny Model description", + "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese", + "project_name": "deberta-v2-tiny-japanese", + "downloads": 357, "source": "Hugging Face", - "score": -0.08872270157030611, - "first_commit": "2024-07-07 07:27:12", - "latest_commit": "2024-07-07 09:30:16", + "score": -0.09229541696126632, + "first_commit": "2023-01-18 13:36:09", + "latest_commit": "2023-03-23 16:13:46", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": 0.013900000000000001, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "Githubリポジトリstockmarkteam/ner-wikipedia-datasetで公開されているデータセットを利用しています。", - "url": "https://huggingface.co./datasets/llm-book/ner-wikipedia-dataset", - "project_name": "ner-wikipedia-dataset", - "downloads": 381, + "description": "Anime with caption CC-0 dataset このデータセットはイラストに対する日本語キャプションを 倫理的に学習しやすくするためのデータセットです。 ", + "url": "https://huggingface.co./datasets/alfredplpl/anime-with-caption-cc0", + "project_name": "anime-with-caption-cc0", + "downloads": 357, "source": "Hugging Face", - "score": -0.08875451642191512, - "first_commit": "2023-04-15 10:43:21", - "latest_commit": "2023-12-12 11:25:51", + "score": -0.09229541696126632, + "first_commit": "2024-06-03 04:37:13", + "latest_commit": "2024-06-03 05:49:20", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "日本語T5 Prefix Language Model", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-adapt", - "project_name": "t5-base-japanese-adapt", - "downloads": 380, + "description": "OcuteusのGGUF版です。 ", + "url": "https://huggingface.co./Local-Novel-LLM-project/Ocuteus-v1-gguf", + "project_name": "Ocuteus-v1-gguf", + "downloads": 354, "source": "Hugging Face", - "score": -0.08876512137245145, - "first_commit": "2022-08-27 08:51:11", - "latest_commit": "2022-11-05 09:34:10", + "score": -0.09232457105876332, + "first_commit": "2024-05-07 09:57:49", + "latest_commit": "2024-05-10 06:18:35", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 7.24, + "model_architectures": null }, { - "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-base-lite", - "project_name": "luke-japanese-base-lite", - "downloads": 379, + "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-title-generation", + "project_name": "t5-base-japanese-title-generation", + "downloads": 352, "source": "Hugging Face", - "score": -0.08877572632298779, - "first_commit": "2022-10-25 09:27:16", - "latest_commit": "2022-11-09 15:22:22", + "score": -0.09234400712376133, + "first_commit": "2021-04-04 06:57:18", + "latest_commit": "2022-02-21 13:38:09", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LukeForMaskedLM" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "whisper-large-v2-japanese-5k-steps This model is a fine-tuned version of openai/whisper-large-v2 on the Japanese CommonVoice dataset (v11)..", - "url": "https://huggingface.co./clu-ling/whisper-large-v2-japanese-5k-steps", - "project_name": "whisper-large-v2-japanese-5k-steps", - "downloads": 371, + "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct", + "project_name": "Fugaku-LLM-13B-instruct", + "downloads": 351, "source": "Hugging Face", - "score": -0.08886056592727845, - "first_commit": "2023-01-28 22:14:29", - "latest_commit": "2023-03-03 21:11:39", + "score": -0.09235372515626034, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "WhisperForConditionalGeneration" + "model_size": 13.2, + "model_architectures": null }, { - "description": "lightblue-suzume-llama-3-8B-multilingual-gguf lightblueさんが公開しているsuzume-llama-3-8B-multilingualのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-multilingual-gguf", - "project_name": "lightblue-suzume-llama-3-8B-multilingual-gguf", - "downloads": 370, + "description": "rinna/japanese-hubert-large Overview This is a Japanese HuBERT Large model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-hubert-large", + "project_name": "japanese-hubert-large", + "downloads": 350, "source": "Hugging Face", - "score": -0.08887117087781479, - "first_commit": "2024-05-06 16:31:55", - "latest_commit": "2024-05-07 12:59:57", + "score": -0.09236344318875933, + "first_commit": "2024-03-05 10:24:37", + "latest_commit": "2024-07-22 08:12:21", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": 0.315, + "model_architectures": "HubertModel" }, { - "description": "aya-23-35B-gguf CohereForAIさんが公開しているaya-23-35Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aya-23-35B-gguf", - "project_name": "aya-23-35B-gguf", - "downloads": 366, + "description": "Japanese Natural Language Inference Model", + "url": "https://huggingface.co./cyberagent/xlm-roberta-large-jnli-jsick", + "project_name": "xlm-roberta-large-jnli-jsick", + "downloads": 349, "source": "Hugging Face", - "score": -0.08891359067996013, - "first_commit": "2024-05-26 16:32:27", - "latest_commit": "2024-05-27 00:47:56", + "score": -0.09237316122125834, + "first_commit": "2022-12-23 10:51:12", + "latest_commit": "2022-12-23 10:51:12", "languages": [], "model_or_dataset": "model", - "model_size": 35.0, - "model_architectures": null + "model_size": null, + "model_architectures": "XLMRobertaForSequenceClassification" }, { - "description": "ElanMT ElanMT-BT-en-ja is a English to Japanese translation model developed by ELAN MITSUA Project / Abstract Engine.", - "url": "https://huggingface.co./Mitsua/elan-mt-bt-en-ja", - "project_name": "elan-mt-bt-en-ja", - "downloads": 361, + "description": "Llama3-ArrowSE-8B-v0.3-gguf DataPilotさんが公開しているLlama3-ArrowSE-8B-v0.3のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama3-ArrowSE-8B-v0.3-gguf", + "project_name": "Llama3-ArrowSE-8B-v0.3-gguf", + "downloads": 344, "source": "Hugging Face", - "score": -0.0889666154326418, - "first_commit": "2024-05-20 01:51:18", - "latest_commit": "2024-05-20 01:53:38", + "score": -0.09242175138375334, + "first_commit": "2024-07-07 07:27:12", + "latest_commit": "2024-07-07 09:30:16", "languages": [], "model_or_dataset": "model", - "model_size": 0.0606, - "model_architectures": "MarianMTModel" + "model_size": 8.03, + "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-long", - "project_name": "t5-base-long", - "downloads": 355, + "description": "日本語VL-T5事前学習済みモデル", + "url": "https://huggingface.co./sonoisa/vl-t5-base-japanese", + "project_name": "vl-t5-base-japanese", + "downloads": 343, "source": "Hugging Face", - "score": -0.0890302451358598, - "first_commit": "2023-04-26 08:30:59", - "latest_commit": "2023-05-10 10:00:00", + "score": -0.09243146941625235, + "first_commit": "2021-10-03 11:54:43", + "latest_commit": "2021-10-04 11:13:35", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": "VLT5ModelWrapper" }, { - "description": "c4ai-command-r-plus-gguf CohereForAIさんが公開しているc4ai-command-r-plusのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/c4ai-command-r-plus-gguf", - "project_name": "c4ai-command-r-plus-gguf", - "downloads": 351, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-v0.1", + "project_name": "Swallow-13b-instruct-v0.1", + "downloads": 342, "source": "Hugging Face", - "score": -0.08907266493800513, - "first_commit": "2024-04-22 14:46:41", - "latest_commit": "2024-04-23 16:13:37", + "score": -0.09244118744875136, + "first_commit": "2024-03-04 11:30:28", + "latest_commit": "2024-06-29 09:00:15", "languages": [], "model_or_dataset": "model", + "model_size": 13.1, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Rakuda - Questions for Japanese models Repository:", + "url": "https://huggingface.co./datasets/yuzuai/rakuda-questions", + "project_name": "rakuda-questions", + "downloads": 336, + "source": "Hugging Face", + "score": -0.09249949564374538, + "first_commit": "2023-06-23 01:08:54", + "latest_commit": "2023-06-23 08:01:35", + "languages": [], + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Model Card for Japanese DeBERTa V2 tiny Model description", - "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese", - "project_name": "deberta-v2-tiny-japanese", - "downloads": 350, + "description": "Model Card for Japanese DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese", + "project_name": "deberta-v2-large-japanese", + "downloads": 335, "source": "Hugging Face", - "score": -0.08908326988854147, - "first_commit": "2023-01-18 13:36:09", - "latest_commit": "2023-03-23 16:13:46", + "score": -0.09250921367624437, + "first_commit": "2023-01-07 07:45:25", + "latest_commit": "2023-05-12 14:10:35", "languages": [], "model_or_dataset": "model", - "model_size": 0.013900000000000001, + "model_size": 0.373, "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "Meta-Llama-3-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3-8B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Meta-Llama-3-8B-Instruct-gguf", - "project_name": "Meta-Llama-3-8B-Instruct-gguf", - "downloads": 349, + "description": "Japanese GPT2 Lyric Model Model description", + "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-small", + "project_name": "gpt2-japanese-lyric-small", + "downloads": 331, "source": "Hugging Face", - "score": -0.0890938748390778, - "first_commit": "2024-05-12 07:18:00", - "latest_commit": "2024-05-12 08:08:38", + "score": -0.09254808580624038, + "first_commit": "2022-04-21 04:25:18", + "latest_commit": "2023-10-23 12:46:36", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": 0.123, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "Dataset Details Dataset Type:Japanese LLaVA Instruct 150K is a localized version of the original LLaVA Visual Instruct 150K dataset.", - "url": "https://huggingface.co./datasets/turing-motors/LLaVA-Instruct-150K-JA", - "project_name": "LLaVA-Instruct-150K-JA", - "downloads": 348, + "description": "Japanese-Starling-ChatV-7B-GGUF GGUF conversion of \"Japanese-Starling-ChatV-7B\" \"Japanese-Starling-ChatV-7B\" is a Japanese chat model built on top of \"chatntq-ja-7b-v1.0\", originally based on Mistral-7B-v0.1.", + "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B-GGUF", + "project_name": "Japanese-Starling-ChatV-7B-GGUF", + "downloads": 328, "source": "Hugging Face", - "score": -0.08910447978961414, - "first_commit": null, - "latest_commit": null, + "score": -0.0925772399037374, + "first_commit": "2024-04-14 12:42:01", + "latest_commit": "2024-04-20 01:23:10", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 7.24, "model_architectures": null }, { - "description": "OcuteusのGGUF版です。 ", - "url": "https://huggingface.co./Local-Novel-LLM-project/Ocuteus-v1-gguf", - "project_name": "Ocuteus-v1-gguf", - "downloads": 347, + "description": "Phi-3-medium-128k-instruct-gguf microsoftさんが公開しているPhi-3-medium-128k-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Phi-3-medium-128k-instruct-gguf", + "project_name": "Phi-3-medium-128k-instruct-gguf", + "downloads": 327, "source": "Hugging Face", - "score": -0.08911508474015047, - "first_commit": "2024-05-07 09:57:49", - "latest_commit": "2024-05-10 06:18:35", + "score": -0.09258695793623639, + "first_commit": "2024-05-22 15:27:33", + "latest_commit": "2024-05-22 16:56:55", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 14.0, "model_architectures": null }, { "description": "YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf yuisekiさんが公開しているYuisekinAIEvol-Mistral-7B-ja-math-v0.1.1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf", "project_name": "YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf", - "downloads": 346, + "downloads": 325, "source": "Hugging Face", - "score": -0.08912568969068681, + "score": -0.0926063940012344, "first_commit": "2024-04-29 14:18:07", "latest_commit": "2024-04-29 15:52:08", "languages": [], @@ -6477,98 +6533,70 @@ "model_architectures": null }, { - "description": "日本語版CLIPモデル This is a CLIP text/image encoder model for Japanese. ", - "url": "https://huggingface.co./sonoisa/clip-vit-b-32-japanese-v1", - "project_name": "clip-vit-b-32-japanese-v1", - "downloads": 340, - "source": "Hugging Face", - "score": -0.0891893193939048, - "first_commit": "2022-02-15 15:47:34", - "latest_commit": "2022-04-19 14:18:58", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertModel" - }, - { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-large-short", - "project_name": "t5-large-short", - "downloads": 336, - "source": "Hugging Face", - "score": -0.08923173919605014, - "first_commit": "2023-04-26 08:18:58", - "latest_commit": "2023-05-10 10:00:54", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" - }, - { - "description": "llm-book/t5-base-long-livedoor-news-corpus 「大規模言語モデル入門」の第7章で紹介している要約生成のモデルです。 ", - "url": "https://huggingface.co./llm-book/t5-base-long-livedoor-news-corpus", - "project_name": "t5-base-long-livedoor-news-corpus", - "downloads": 335, + "description": "ELYZA-japanese-Llama-2-13b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fastのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-gguf", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-gguf", + "downloads": 324, "source": "Hugging Face", - "score": -0.08924234414658648, - "first_commit": "2023-06-27 13:32:54", - "latest_commit": "2023-07-25 13:10:36", + "score": -0.0926161120337334, + "first_commit": "2023-12-27 10:40:52", + "latest_commit": "2023-12-27 13:18:46", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 13.1, + "model_architectures": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat", - "project_name": "Orion-14B-Chat", - "downloads": 333, + "description": "QuantFactory/Mistral-Nemo-Japanese-Instruct-2408-GGUF This is quantized version of cyberagent/Mistral-Nemo-Japanese-Instruct-2408 created using llama.cpp Original Model Card Mistral-Nemo-Japanese-Instruct-2408 Model Description", + "url": "https://huggingface.co./QuantFactory/Mistral-Nemo-Japanese-Instruct-2408-GGUF", + "project_name": "Mistral-Nemo-Japanese-Instruct-2408-GGUF", + "downloads": 321, "source": "Hugging Face", - "score": -0.08926355404765914, - "first_commit": "2024-01-16 06:03:30", - "latest_commit": "2024-04-11 10:48:51", + "score": -0.0926452661312304, + "first_commit": "2024-09-09 15:17:36", + "latest_commit": "2024-09-09 16:27:10", "languages": [], "model_or_dataset": "model", - "model_size": 14.5, - "model_architectures": "OrionForCausalLM" + "model_size": 12.2, + "model_architectures": null }, { - "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-title-generation", - "project_name": "t5-base-japanese-title-generation", - "downloads": 331, + "description": "Model Card for Japanese BART large Model description", + "url": "https://huggingface.co./ku-nlp/bart-large-japanese", + "project_name": "bart-large-japanese", + "downloads": 320, "source": "Hugging Face", - "score": -0.0892847639487318, - "first_commit": "2021-04-04 06:57:18", - "latest_commit": "2022-02-21 13:38:09", + "score": -0.09265498416372941, + "first_commit": "2023-05-09 07:44:59", + "latest_commit": "2023-05-12 11:05:03", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": "MBartForConditionalGeneration" }, { - "description": "What’s this?", - "url": "https://huggingface.co./globis-university/deberta-v3-japanese-large", - "project_name": "deberta-v3-japanese-large", - "downloads": 330, + "description": "rinna/japanese-gpt-neox-3.6b-instruction-ppo rinnaさんが公開しているjapanese-gpt-neox-3.6b-instruction-ppoのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", + "project_name": "rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", + "downloads": 317, "source": "Hugging Face", - "score": -0.08929536889926815, - "first_commit": "2023-09-21 16:15:15", - "latest_commit": "2024-07-05 05:50:06", + "score": -0.09268413826122643, + "first_commit": "2023-09-02 17:52:26", + "latest_commit": "2023-09-08 02:39:00", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_size": 3.61, + "model_architectures": null }, { - "description": "old? ", - "url": "https://huggingface.co./Lasorco/lametta_old", - "project_name": "lametta_old", - "downloads": 330, + "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", + "url": "https://huggingface.co./fishaudio/fish-speech-1.2", + "project_name": "fish-speech-1.2", + "downloads": 314, "source": "Hugging Face", - "score": -0.08929536889926815, - "first_commit": "2023-05-21 11:16:50", - "latest_commit": "2024-07-23 07:24:33", + "score": -0.09271329235872343, + "first_commit": "2024-07-02 04:24:09", + "latest_commit": "2024-07-02 04:31:26", "languages": [], "model_or_dataset": "model", "model_size": null, @@ -6578,9 +6606,9 @@ "description": "hh-rlhf-12k-ja This repository provides a human preference dataset developed by LLM-jp, a collaborative project launched in Japan.", "url": "https://huggingface.co./datasets/llm-jp/hh-rlhf-12k-ja", "project_name": "hh-rlhf-12k-ja", - "downloads": 326, + "downloads": 314, "source": "Hugging Face", - "score": -0.08933778870141348, + "score": -0.09271329235872343, "first_commit": "2024-02-04 21:19:53", "latest_commit": "2024-02-04 21:45:59", "languages": [], @@ -6589,362 +6617,388 @@ "model_architectures": null }, { - "description": "Parakeet TDT-CTC 0.6B (ja) | | parakeet-tdt_ctc-0.6b-ja is an ASR model that transcribes Japanese speech with Punctuations.", - "url": "https://huggingface.co./nvidia/parakeet-tdt_ctc-0.6b-ja", - "project_name": "parakeet-tdt_ctc-0.6b-ja", - "downloads": 325, + "description": "このモデルはluke-japanese-baseをファインチューニングして、JNLI(文章の関係性判別)に用いれる��うにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-jnli", + "project_name": "luke-japanese-base-finetuned-jnli", + "downloads": 312, "source": "Hugging Face", - "score": -0.08934839365194981, - "first_commit": "2024-05-13 15:39:30", - "latest_commit": "2024-05-17 17:20:17", + "score": -0.09273272842372143, + "first_commit": "2023-02-11 18:39:14", + "latest_commit": "2023-07-21 14:09:44", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 0.279, + "model_architectures": "LukeForSequenceClassification" }, { - "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b-instruct", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct", - "downloads": 325, + "description": "rinna/japanese-gpt-neox-3.6b rinnaさんが公開しているjapanese-gpt-neox-3.6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-gguf", + "project_name": "rinna-japanese-gpt-neox-3.6b-gguf", + "downloads": 310, "source": "Hugging Face", - "score": -0.08934839365194981, - "first_commit": "2023-11-07 12:04:07", - "latest_commit": "2023-11-17 05:01:00", + "score": -0.09275216448871944, + "first_commit": "2023-09-02 18:46:08", + "latest_commit": "2023-09-08 02:37:19", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Mistral-Nemo-Instruct-2407-gguf mistralaiさんが公開しているMistral-Nemo-Instruct-2407のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Mistral-Nemo-Instruct-2407-gguf", - "project_name": "Mistral-Nemo-Instruct-2407-gguf", - "downloads": 320, + "description": "What’s this?", + "url": "https://huggingface.co./globis-university/deberta-v3-japanese-large", + "project_name": "deberta-v3-japanese-large", + "downloads": 308, "source": "Hugging Face", - "score": -0.08940141840463148, - "first_commit": "2024-07-22 13:28:13", - "latest_commit": "2024-07-22 17:25:48", + "score": -0.09277160055371744, + "first_commit": "2023-09-21 16:15:15", + "latest_commit": "2024-07-05 05:50:06", "languages": [], "model_or_dataset": "model", - "model_size": 12.2, - "model_architectures": null + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-70b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", - "downloads": 320, + "description": "This is a Japanese sentence-LUKE model.", + "url": "https://huggingface.co./cheonboy/sentence_embedding_japanese", + "project_name": "sentence_embedding_japanese", + "downloads": 307, "source": "Hugging Face", - "score": -0.08940141840463148, - "first_commit": "2024-05-03 09:00:00", - "latest_commit": "2024-05-04 06:52:16", + "score": -0.09278131858621645, + "first_commit": "2023-10-05 05:10:25", + "latest_commit": "2023-10-05 05:13:09", "languages": [], "model_or_dataset": "model", - "model_size": 69.2, - "model_architectures": null + "model_size": null, + "model_architectures": "LukeModel" }, { - "description": "What’s this?", - "url": "https://huggingface.co./globis-university/deberta-v3-japanese-xsmall", - "project_name": "deberta-v3-japanese-xsmall", - "downloads": 318, + "description": "日本語版CLIPモデル This is a CLIP text/image encoder model for Japanese. ", + "url": "https://huggingface.co./sonoisa/clip-vit-b-32-japanese-v1", + "project_name": "clip-vit-b-32-japanese-v1", + "downloads": 306, "source": "Hugging Face", - "score": -0.08942262830570415, - "first_commit": "2023-09-21 16:12:53", - "latest_commit": "2024-07-05 05:48:15", + "score": -0.09279103661871545, + "first_commit": "2022-02-15 15:47:34", + "latest_commit": "2022-04-19 14:18:58", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "BertModel" }, { - "description": "mt5_summarize_japanese (Japanese caption : 日本語の要約のモデル)", - "url": "https://huggingface.co./tsmatz/mt5_summarize_japanese", - "project_name": "mt5_summarize_japanese", - "downloads": 318, + "description": "AutoWikiQA 東工大が公開しているSwallow-MXを用いて、Wikipedia中のテキストを入力として「質問(query)」と「回答(answer)」を生成し、生成された質問と回答についてフィルタリングを行ったデータセットです。", + "url": "https://huggingface.co./datasets/cl-nagoya/auto-wiki-qa", + "project_name": "auto-wiki-qa", + "downloads": 304, "source": "Hugging Face", - "score": -0.08942262830570415, - "first_commit": "2022-11-26 10:51:27", - "latest_commit": "2024-07-12 00:01:31", + "score": -0.09281047268371345, + "first_commit": "2024-03-28 01:33:42", + "latest_commit": "2024-04-20 12:17:33", "languages": [], - "model_or_dataset": "model", - "model_size": 0.3, - "model_architectures": "MT5ForConditionalGeneration" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "[Llama-3.1-70B-EZO-1.1-it] Model Card モデル情報 / Model Information このモデルは、Meta AI の Llama 3.1 をベースに、日本語タスクでの性能を向上させるためにファインチューニングを行ったものです。", - "url": "https://huggingface.co./AXCXEPT/Llama-3.1-70B-EZO-1.1-it", - "project_name": "Llama-3.1-70B-EZO-1.1-it", - "downloads": 314, + "description": "BERT large Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char-v2", + "project_name": "bert-large-japanese-char-v2", + "downloads": 301, "source": "Hugging Face", - "score": -0.08946504810784948, - "first_commit": "2024-07-29 01:35:35", - "latest_commit": "2024-08-23 10:52:31", + "score": -0.09283962678121047, + "first_commit": "2023-05-19 00:48:06", + "latest_commit": "2023-05-19 00:54:57", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "BertForPreTraining" }, { - "description": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf ryota39さんが公開しているPhi-3-mini-4k-instruct-dpoのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ryota39-Phi-3-mini-4k-instruct-dpo-gguf", - "project_name": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf", - "downloads": 313, + "description": "Japanese to Korean translator Japanese to Korean translator model based on EncoderDecoderModel(bert-japanese+kogpt2)", + "url": "https://huggingface.co./sappho192/aihub-ja-ko-translator", + "project_name": "aihub-ja-ko-translator", + "downloads": 300, "source": "Hugging Face", - "score": -0.08947565305838581, - "first_commit": "2024-04-29 14:27:31", - "latest_commit": "2024-04-29 16:53:45", + "score": -0.09284934481370946, + "first_commit": "2024-02-05 00:51:47", + "latest_commit": "2024-06-28 06:38:39", "languages": [], "model_or_dataset": "model", - "model_size": 3.82, - "model_architectures": null + "model_size": 0.265, + "model_architectures": "EncoderDecoderModel" }, { - "description": "lightblue-suzume-llama-3-8B-japanese-gguf lightblueさんが公開しているsuzume-llama-3-8B-japaneseのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-japanese-gguf", - "project_name": "lightblue-suzume-llama-3-8B-japanese-gguf", - "downloads": 311, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stable-clip-vit-l-16", + "project_name": "japanese-stable-clip-vit-l-16", + "downloads": 300, "source": "Hugging Face", - "score": -0.08949686295945848, - "first_commit": "2024-04-23 13:30:08", - "latest_commit": "2024-05-07 12:58:06", + "score": -0.09284934481370946, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 0.41400000000000003, "model_architectures": null }, { - "description": "Mistral-Nemo-Japanese-Instruct-2408 Model Description", - "url": "https://huggingface.co./cyberagent/Mistral-Nemo-Japanese-Instruct-2408", - "project_name": "Mistral-Nemo-Japanese-Instruct-2408", - "downloads": 310, + "description": "Parakeet TDT-CTC 0.6B (ja) | | parakeet-tdt_ctc-0.6b-ja is an ASR model that transcribes Japanese speech with Punctuations.", + "url": "https://huggingface.co./nvidia/parakeet-tdt_ctc-0.6b-ja", + "project_name": "parakeet-tdt_ctc-0.6b-ja", + "downloads": 299, "source": "Hugging Face", - "score": -0.08950746790999482, - "first_commit": "2024-08-30 03:57:43", - "latest_commit": "2024-08-30 04:03:41", + "score": -0.09285906284620847, + "first_commit": "2024-05-13 15:39:30", + "latest_commit": "2024-05-17 17:20:17", "languages": [], "model_or_dataset": "model", - "model_size": 12.2, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-MS-7b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", - "downloads": 309, + "description": "ku-nlp/roberta-base-japanese-char-wwm Model description This is a Japanese RoBERTa base model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./ku-nlp/roberta-base-japanese-char-wwm", + "project_name": "roberta-base-japanese-char-wwm", + "downloads": 298, "source": "Hugging Face", - "score": -0.08951807286053115, - "first_commit": "2024-05-02 13:37:22", - "latest_commit": "2024-05-03 04:35:34", + "score": -0.09286878087870747, + "first_commit": "2022-09-20 05:07:34", + "latest_commit": "2023-03-20 08:05:45", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, - "model_architectures": null + "model_size": 0.1, + "model_architectures": "RobertaForMaskedLM" }, { - "description": "回答と回答が出てくるパラグラフを与えると質問文を生成するモデル SEE: https://github.com/sonoisa/deep-question-generation 本モデルの作成ステップ概要 SQuAD 1.1を日本語に機械翻訳し、不正なデータをクレンジング(有効なデータは約半分)。", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-question-generation", - "project_name": "t5-base-japanese-question-generation", - "downloads": 307, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-instruct-1_6b", + "project_name": "japanese-stablelm-2-instruct-1_6b", + "downloads": 295, "source": "Hugging Face", - "score": -0.08953928276160382, - "first_commit": "2021-04-03 14:08:55", - "latest_commit": "2022-03-11 02:50:33", + "score": -0.09289793497620448, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 1.64, + "model_architectures": null }, { - "description": "rinna/japanese-gpt-neox-3.6b-instruction-ppo rinnaさんが公開しているjapanese-gpt-neox-3.6b-instruction-ppoのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", - "project_name": "rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", - "downloads": 300, + "description": "ichikara-instruction (Non Commercial) LLMのための日本語インストラクションデータ 公開ページ 公開ページより、 本データに関して、言語処理学会第30回年次大会において発表を行います。", + "url": "https://huggingface.co./datasets/p1atdev/ichikara-instruction", + "project_name": "ichikara-instruction", + "downloads": 293, "source": "Hugging Face", - "score": -0.08961351741535815, - "first_commit": "2023-09-02 17:52:26", - "latest_commit": "2023-09-08 02:39:00", + "score": -0.09291737104120248, + "first_commit": "2024-03-12 07:09:56", + "latest_commit": "2024-03-12 08:36:40", "languages": [], - "model_or_dataset": "model", - "model_size": 3.61, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "What’s this?", - "url": "https://huggingface.co./globis-university/deberta-v3-japanese-base", - "project_name": "deberta-v3-japanese-base", - "downloads": 299, + "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59362 Validation Metrics Loss: 0.13092292845249176 Accuracy: 0.9527127414314258 Precision: 0.9634070704982427 Recall: 0.9842171959602166 AUC: 0.9667289746092403 F1: 0.9737009564152002 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", + "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59362", + "project_name": "autonlp-japanese-sentiment-59362", + "downloads": 292, "source": "Hugging Face", - "score": -0.0896241223658945, - "first_commit": "2023-09-21 16:19:31", - "latest_commit": "2024-07-05 05:49:13", + "score": -0.0929270890737015, + "first_commit": "2021-04-21 11:28:11", + "latest_commit": "2021-05-18 22:55:03", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "BertForSequenceClassification" }, { - "description": "Reflection-Llama-3.1-70B-gguf mattshumerさんが公開しているReflection-Llama-3.1-70Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Reflection-Llama-3.1-70B-gguf", - "project_name": "Reflection-Llama-3.1-70B-gguf", - "downloads": 299, + "description": "tokyotech-llm様の Llama-3-Swallow-8B-Instruct-v0.1 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3-Swallow-8B-Instruct-v0.1-GGUF", + "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-GGUF", + "downloads": 289, "source": "Hugging Face", - "score": -0.0896241223658945, - "first_commit": "2024-09-06 17:18:27", - "latest_commit": "2024-09-07 04:00:27", + "score": -0.0929562431711985, + "first_commit": "2024-07-01 11:45:22", + "latest_commit": "2024-07-01 17:54:05", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, + "model_size": 8.03, "model_architectures": null }, { - "description": "tokyotech-llm様の Llama-3-Swallow-8B-Instruct-v0.1 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3-Swallow-8B-Instruct-v0.1-GGUF", - "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-GGUF", - "downloads": 297, + "description": "External dictionary importer for Yomichan.", + "url": "https://github.com/FooSoft/yomichan-import", + "project_name": "yomichan-import", + "stargazers_count": 82, + "source": "GitHub", + "score": -0.09296533201648584, + "first_commit": "2016-07-26 20:24:33", + "latest_commit": "2023-02-25 12:43:03", + "languages": [ + "Go" + ], + "model_or_dataset": null + }, + { + "description": "日英変換・英語略語展開のための IME 追加辞書 orange_book 日本語から英語への和英変換や英語略語の展開を Google 日本語入力や ATOK などで可能にする IME 拡張辞書", + "url": "https://github.com/peaceiris/google-ime-dictionary", + "project_name": "google-ime-dictionary", + "stargazers_count": 82, + "source": "GitHub", + "score": -0.09296533201648584, + "first_commit": "2018-09-13 01:54:32", + "latest_commit": "2023-01-16 10:47:31", + "languages": [], + "model_or_dataset": "dataset" + }, + { + "description": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf aixsatoshiさんが公開しているLlama-3-8b-Cosmopedia-japaneseのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", + "project_name": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", + "downloads": 285, "source": "Hugging Face", - "score": -0.08964533226696716, - "first_commit": "2024-07-01 11:45:22", - "latest_commit": "2024-07-01 17:54:05", + "score": -0.0929951153011945, + "first_commit": "2024-05-01 12:36:43", + "latest_commit": "2024-05-19 08:27:21", "languages": [], "model_or_dataset": "model", "model_size": 8.03, "model_architectures": null }, { - "description": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Humanities-9B-gemma-2-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", - "project_name": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", - "downloads": 290, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-long", + "project_name": "t5-base-long", + "downloads": 278, "source": "Hugging Face", - "score": -0.0897195669207215, - "first_commit": "2024-07-15 15:43:00", - "latest_commit": "2024-07-15 17:01:09", + "score": -0.09306314152868753, + "first_commit": "2023-04-26 08:30:59", + "latest_commit": "2023-05-10 10:00:00", "languages": [], "model_or_dataset": "model", - "model_size": 9.24, - "model_architectures": null + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "BERT large Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char-v2", - "project_name": "bert-large-japanese-char-v2", - "downloads": 288, + "description": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf umiyukiさんが公開しているJapanese-Chat-Umievo-itr001-7bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", + "project_name": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", + "downloads": 274, "source": "Hugging Face", - "score": -0.08974077682179417, - "first_commit": "2023-05-19 00:48:06", - "latest_commit": "2023-05-19 00:54:57", + "score": -0.09310201365868354, + "first_commit": "2024-04-27 09:55:39", + "latest_commit": "2024-04-27 10:52:17", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForPreTraining" + "model_size": 7.24, + "model_architectures": null }, { - "description": "Model Card for Japanese character-level GPT-2 Small Model description This is a Japanese character-level GPT-2 Small (90M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/gpt2-small-japanese-char", - "project_name": "gpt2-small-japanese-char", - "downloads": 281, + "description": "zenz-v1 Checkpoints zenz-v1 is a language model specialized for kana-kanji conversion tasks based on the GPT-2 architecture.", + "url": "https://huggingface.co./Miwa-Keita/zenz-v1-checkpoints", + "project_name": "zenz-v1-checkpoints", + "downloads": 274, "source": "Hugging Face", - "score": -0.0898150114755485, - "first_commit": "2023-04-18 08:24:55", - "latest_commit": "2023-05-08 10:08:13", + "score": -0.09310201365868354, + "first_commit": "2024-06-28 14:26:33", + "latest_commit": "2024-06-28 14:53:43", "languages": [], "model_or_dataset": "model", - "model_size": 0.10300000000000001, + "model_size": null, "model_architectures": "GPT2LMHeadModel" }, { - "description": "rinna/japanese-gpt-neox-3.6b rinnaさんが公開しているjapanese-gpt-neox-3.6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-gguf", - "project_name": "rinna-japanese-gpt-neox-3.6b-gguf", - "downloads": 279, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-RAG", + "project_name": "Orion-14B-Chat-RAG", + "downloads": 274, "source": "Hugging Face", - "score": -0.08983622137662117, - "first_commit": "2023-09-02 18:46:08", - "latest_commit": "2023-09-08 02:37:19", + "score": -0.09310201365868354, + "first_commit": "2024-01-16 12:19:08", + "latest_commit": "2024-03-26 10:08:09", "languages": [], "model_or_dataset": "model", - "model_size": 3.61, - "model_architectures": null + "model_size": null, + "model_architectures": "OrionForCausalLM" }, { - "description": "JA-VG-VQA-500 Dataset Description JA-VG-VQA-500 is a 500-sample subset of Japanese Visual Genome VQA dataset.", - "url": "https://huggingface.co./datasets/SakanaAI/JA-VG-VQA-500", - "project_name": "JA-VG-VQA-500", - "downloads": 275, + "description": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-13b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", + "downloads": 273, "source": "Hugging Face", - "score": -0.08987864117876651, - "first_commit": "2024-03-21 09:51:10", - "latest_commit": "2024-05-14 04:11:31", + "score": -0.09311173169118253, + "first_commit": "2024-05-02 14:18:27", + "latest_commit": "2024-05-03 04:36:24", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 13.1, "model_architectures": null }, { - "description": "ElanMT ElanMT-BT-ja-en is a Japanese to English translation model developed by ELAN MITSUA Project / Abstract Engine.", - "url": "https://huggingface.co./Mitsua/elan-mt-bt-ja-en", - "project_name": "elan-mt-bt-ja-en", - "downloads": 272, + "description": "Ruri: Japanese General Text Embeddings Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-base", + "project_name": "ruri-base", + "downloads": 270, "source": "Hugging Face", - "score": -0.08991045603037551, - "first_commit": "2024-05-20 01:56:12", - "latest_commit": "2024-05-20 01:56:57", + "score": -0.09314088578867954, + "first_commit": "2024-08-28 13:09:10", + "latest_commit": "2024-09-04 08:49:23", "languages": [], "model_or_dataset": "model", - "model_size": 0.0606, - "model_architectures": "MarianMTModel" + "model_size": 0.111, + "model_architectures": "BertModel" }, { - "description": "zenz-v1 Checkpoints zenz-v1 is a language model specialized for kana-kanji conversion tasks based on the GPT-2 architecture.", - "url": "https://huggingface.co./Miwa-Keita/zenz-v1-checkpoints", - "project_name": "zenz-v1-checkpoints", - "downloads": 272, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-small-short", + "project_name": "t5-small-short", + "downloads": 265, "source": "Hugging Face", - "score": -0.08991045603037551, - "first_commit": "2024-06-28 14:26:33", - "latest_commit": "2024-06-28 14:53:43", + "score": -0.09318947595117456, + "first_commit": "2023-04-25 04:37:20", + "latest_commit": "2023-05-10 09:55:39", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "JaQuAD is developed to provide a SQuAD-like QA dataset in Japanese.", - "url": "https://huggingface.co./datasets/SkelterLabsInc/JaQuAD", - "project_name": "JaQuAD", - "downloads": 272, + "description": "llm-japanese-dataset LLM構築用の日本語インストラクション(チャット)データセット 主に,英語で構築されたLLMモデルなどに対して,チャット(Instruction)応答タスクに関してLoRAなどでチューニングするために使用できます. ", + "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset", + "project_name": "llm-japanese-dataset", + "downloads": 263, "source": "Hugging Face", - "score": -0.08991045603037551, - "first_commit": "2022-01-26 01:34:38", - "latest_commit": "2022-10-25 09:06:40", + "score": -0.09320891201617257, + "first_commit": "2023-04-30 06:13:24", + "latest_commit": "2024-01-18 13:42:50", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "DataPilot-ArrowPro-7B-RobinHood-gguf DataPilotさんが公開しているArrowPro-7B-RobinHoodのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-RobinHood-gguf", - "project_name": "DataPilot-ArrowPro-7B-RobinHood-gguf", - "downloads": 271, + "description": "oasst2-33k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", + "url": "https://huggingface.co./datasets/llm-jp/oasst2-33k-ja", + "project_name": "oasst2-33k-ja", + "downloads": 261, "source": "Hugging Face", - "score": -0.08992106098091183, - "first_commit": "2024-05-11 07:22:37", - "latest_commit": "2024-05-11 13:43:09", + "score": -0.09322834808117057, + "first_commit": "2024-04-28 16:24:00", + "latest_commit": "2024-04-28 16:39:03", "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { "description": "Model Card for Tanrei/GPTSAN-japanese General-purpose Swich transformer based Japanese language model GPTSAN has some unique features.", "url": "https://huggingface.co./Tanrei/GPTSAN-japanese", "project_name": "GPTSAN-japanese", - "downloads": 270, + "downloads": 260, "source": "Hugging Face", - "score": -0.08993166593144818, + "score": -0.09323806611366957, "first_commit": "2023-01-06 05:41:12", "latest_commit": "2023-04-21 19:04:49", "languages": [], @@ -6953,40 +7007,40 @@ "model_architectures": "GPTSanJapaneseForConditionalGeneration" }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-small-short", - "project_name": "t5-small-short", - "downloads": 267, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-v0.1", + "project_name": "Swallow-70b-instruct-v0.1", + "downloads": 258, "source": "Hugging Face", - "score": -0.08996348078305717, - "first_commit": "2023-04-25 04:37:20", - "latest_commit": "2023-05-10 09:55:39", + "score": -0.09325750217866757, + "first_commit": "2024-03-06 14:39:34", + "latest_commit": "2024-06-29 09:00:17", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 69.2, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Ninja-v1-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-128kのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-128k-gguf", - "project_name": "Ninja-v1-128k-gguf", - "downloads": 266, + "description": "WRIME-fine-tuned BERT base Japanese This model is a Japanese BERTBASE fine-tuned on the WRIME dataset.", + "url": "https://huggingface.co./patrickramos/bert-base-japanese-v2-wrime-fine-tune", + "project_name": "bert-base-japanese-v2-wrime-fine-tune", + "downloads": 257, "source": "Hugging Face", - "score": -0.08997408573359351, - "first_commit": "2024-05-01 17:48:06", - "latest_commit": "2024-05-04 13:25:20", + "score": -0.09326722021116658, + "first_commit": "2022-05-22 09:42:14", + "latest_commit": "2023-03-22 08:11:34", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" }, { "description": "XLNet-japanese Model description This model require Mecab and senetencepiece with XLNetTokenizer.", "url": "https://huggingface.co./hajime9652/xlnet-japanese", "project_name": "xlnet-japanese", - "downloads": 265, + "downloads": 257, "source": "Hugging Face", - "score": -0.08998469068412984, + "score": -0.09326722021116658, "first_commit": "2021-04-01 03:12:11", "latest_commit": "2023-01-05 04:28:36", "languages": [], @@ -6995,1006 +7049,1048 @@ "model_architectures": "XLNetLMHeadModel" }, { - "description": "Oumuamua-7b-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP", - "project_name": "Oumuamua-7b-RP", - "downloads": 263, + "description": "ElanMT ElanMT-BT-en-ja is a English to Japanese translation model developed by ELAN MITSUA Project / Abstract Engine.", + "url": "https://huggingface.co./Mitsua/elan-mt-bt-en-ja", + "project_name": "elan-mt-bt-en-ja", + "downloads": 255, "source": "Hugging Face", - "score": -0.09000590058520251, - "first_commit": "2024-06-23 12:30:16", - "latest_commit": "2024-06-23 17:06:53", + "score": -0.09328665627616459, + "first_commit": "2024-05-20 01:51:18", + "latest_commit": "2024-05-20 01:53:38", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, - "model_architectures": "MistralForCausalLM" + "model_size": 0.0606, + "model_architectures": "MarianMTModel" }, { - "description": "Tanuki-8B-dpo-v1.0-GGUF 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0-4kのGGUF量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-4k-GGUF", - "project_name": "Tanuki-8B-dpo-v1.0-4k-GGUF", - "downloads": 259, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-test\", split=\"test\") 概要 大喜利投稿サイトBoketeのクロールデータです。", + "url": "https://huggingface.co./datasets/YANS-official/ogiri-test", + "project_name": "ogiri-test", + "downloads": 253, "source": "Hugging Face", - "score": -0.09004832038734785, - "first_commit": "2024-08-16 12:39:31", - "latest_commit": "2024-08-27 18:05:25", + "score": -0.09330609234116259, + "first_commit": "2024-09-03 15:08:05", + "latest_commit": "2024-09-09 05:53:54", "languages": [], - "model_or_dataset": "model", - "model_size": 7.51, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "This repository contains some GGUF quantizations of the VNTL Gemma 2 27B model.", - "url": "https://huggingface.co./lmg-anon/vntl-gemma2-27b-gguf", - "project_name": "vntl-gemma2-27b-gguf", - "downloads": 259, + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-large", + "project_name": "ruri-reranker-large", + "downloads": 252, "source": "Hugging Face", - "score": -0.09004832038734785, - "first_commit": "2024-07-07 00:28:06", - "latest_commit": "2024-07-08 16:13:54", + "score": -0.09331581037366159, + "first_commit": "2024-08-20 02:37:26", + "latest_commit": "2024-09-04 08:50:12", "languages": [], "model_or_dataset": "model", - "model_size": 27.2, - "model_architectures": null + "model_size": 0.337, + "model_architectures": "BertForSequenceClassification" }, { - "description": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf pfnetさんが公開しているnekomata-14b-pfn-qfin-inst-mergeのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", - "project_name": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", - "downloads": 258, + "description": "Ruri: Japanese General Text Embeddings Usage First install the Sentence Transformers library: pip install -U sentence-transformers Then you can load this model and run inference.", + "url": "https://huggingface.co./cl-nagoya/ruri-pt-base", + "project_name": "ruri-pt-base", + "downloads": 251, "source": "Hugging Face", - "score": -0.09005892533788418, - "first_commit": "2024-04-23 14:53:08", - "latest_commit": "2024-04-24 14:39:32", + "score": -0.0933255284061606, + "first_commit": "2024-08-17 10:38:19", + "latest_commit": "2024-09-13 01:38:07", "languages": [], "model_or_dataset": "model", - "model_size": 14.2, - "model_architectures": null + "model_size": 0.111, + "model_architectures": "BertModel" }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-small-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-small-v1", - "project_name": "japanese-reranker-cross-encoder-small-v1", - "downloads": 256, + "description": "calm3-22b-RP-v2 GGUF版はこちら/Click here for the GGUF version また、こちらで本モデルのデモを公開しています。", + "url": "https://huggingface.co./Aratako/calm3-22b-RP-v2", + "project_name": "calm3-22b-RP-v2", + "downloads": 250, "source": "Hugging Face", - "score": -0.09008013523895685, - "first_commit": "2024-03-28 04:31:45", - "latest_commit": "2024-04-01 02:39:19", + "score": -0.09333524643865959, + "first_commit": "2024-09-12 11:29:23", + "latest_commit": "2024-09-16 05:53:42", "languages": [], "model_or_dataset": "model", - "model_size": 0.11800000000000001, - "model_architectures": "XLMRobertaForSequenceClassification" + "model_size": 22.5, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Tanuki-8x8B-dpo-v1.0-GPTQ-4bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGPTQ 4bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GPTQ-4bit", + "project_name": "Tanuki-8x8B-dpo-v1.0-GPTQ-4bit", + "downloads": 249, + "source": "Hugging Face", + "score": -0.0933449644711586, + "first_commit": "2024-08-27 18:19:13", + "latest_commit": "2024-09-03 09:27:14", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "TanukiForCausalLM" + }, + { + "description": "Evaluation on MIRACL japanese These models don't train on the MIRACL training data.", + "url": "https://huggingface.co./aken12/splade-japanese-v3", + "project_name": "splade-japanese-v3", + "downloads": 248, + "source": "Hugging Face", + "score": -0.0933546825036576, + "first_commit": "2024-03-29 12:35:47", + "latest_commit": "2024-05-22 02:59:37", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertForMaskedLM" + }, + { + "description": "bilingual-gpt-neox-4b-8k Overview Notice: This model requires transformers>=4.31.0 to work properly.", + "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-8k", + "project_name": "bilingual-gpt-neox-4b-8k", + "downloads": 238, + "source": "Hugging Face", + "score": -0.09345186282864763, + "first_commit": "2023-07-31 02:34:21", + "latest_commit": "2024-07-20 08:03:16", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.95, + "model_architectures": "GPTNeoXForCausalLM" }, { "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-diffusion-xl", - "project_name": "japanese-stable-diffusion-xl", - "downloads": 256, + "url": "https://huggingface.co./stabilityai/japanese-stable-vlm", + "project_name": "japanese-stable-vlm", + "downloads": 236, "source": "Hugging Face", - "score": -0.09008013523895685, + "score": -0.09347129889364564, "first_commit": null, "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.57, "model_architectures": null }, { - "description": "日本語向け Llama 3 8B はじめに このリポジトリはLlama 3を日本語化しようとしたモデルのリポジトリです。", - "url": "https://huggingface.co./alfredplpl/Llama-3-8B-Instruct-Ja", - "project_name": "Llama-3-8B-Instruct-Ja", - "downloads": 252, + "description": "c4ai-command-r-plus-gguf CohereForAIさんが公開しているc4ai-command-r-plusのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/c4ai-command-r-plus-gguf", + "project_name": "c4ai-command-r-plus-gguf", + "downloads": 236, "source": "Hugging Face", - "score": -0.09012255504110218, - "first_commit": "2024-04-22 05:14:33", - "latest_commit": "2024-05-01 19:16:01", + "score": -0.09347129889364564, + "first_commit": "2024-04-22 14:46:41", + "latest_commit": "2024-04-23 16:13:37", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": 104.0, + "model_architectures": null }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-test\", split=\"test\") 概要 大喜利投稿サイトBoketeのクロールデータです。", - "url": "https://huggingface.co./datasets/YANS-official/ogiri-test", - "project_name": "ogiri-test", - "downloads": 251, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat", + "project_name": "Orion-14B-Chat", + "downloads": 234, "source": "Hugging Face", - "score": -0.09013315999163851, - "first_commit": "2024-09-03 15:08:05", - "latest_commit": "2024-09-09 05:53:54", + "score": -0.09349073495864364, + "first_commit": "2024-01-16 06:03:30", + "latest_commit": "2024-04-11 10:48:51", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 14.5, + "model_architectures": "OrionForCausalLM" }, { - "description": "Local-Novel-LLM-project様の Assistance をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Assistance-7B-GGUF", - "project_name": "Assistance-7B-GGUF", - "downloads": 247, + "description": "datagemma-rag-27b-it-gguf googleさんが公開しているdatagemma-rag-27b-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/datagemma-rag-27b-it-gguf", + "project_name": "datagemma-rag-27b-it-gguf", + "downloads": 232, "source": "Hugging Face", - "score": -0.09017557979378385, - "first_commit": "2024-05-03 12:16:29", - "latest_commit": "2024-05-04 07:48:41", + "score": -0.09351017102364165, + "first_commit": "2024-09-12 18:03:45", + "latest_commit": "2024-09-12 19:57:32", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 27.2, "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-large-long", - "project_name": "t5-large-long", - "downloads": 246, + "description": "回答と回答が出てくるパラグラフを与えると質問文を生成するモデル SEE: https://github.com/sonoisa/deep-question-generation 本モデルの作成ステップ概要 SQuAD 1.1���日本語に機械翻訳し、不正なデータをクレンジング(有効なデータは約半分)。", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-question-generation", + "project_name": "t5-base-japanese-question-generation", + "downloads": 231, "source": "Hugging Face", - "score": -0.09018618474432019, - "first_commit": "2023-04-26 08:33:12", - "latest_commit": "2023-05-10 10:00:35", + "score": -0.09351988905614064, + "first_commit": "2021-04-03 14:08:55", + "latest_commit": "2022-03-11 02:50:33", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Additional pretrained BERT base Japanese finance This is a BERT model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/bert-base-japanese-fin-additional", - "project_name": "bert-base-japanese-fin-additional", - "downloads": 244, + "description": "QuantFactory/TinySlime-1.1B-Chat-v1.0-GGUF", + "url": "https://huggingface.co./QuantFactory/TinySlime-1.1B-Chat-v1.0-GGUF", + "project_name": "TinySlime-1.1B-Chat-v1.0-GGUF", + "downloads": 230, "source": "Hugging Face", - "score": -0.09020739464539286, - "first_commit": "2022-03-11 17:41:11", - "latest_commit": "2022-12-09 00:40:25", + "score": -0.09352960708863965, + "first_commit": "2024-09-11 05:27:47", + "latest_commit": "2024-09-11 05:55:46", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForPreTraining" + "model_size": 1.1, + "model_architectures": null }, { - "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "url": "https://huggingface.co./QuantFactory/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "downloads": 243, + "description": "こちらでアップロードできないので、civitaiにて先に公開しています。 ", + "url": "https://huggingface.co./sazyou-roukaku/AfterRealXL", + "project_name": "AfterRealXL", + "downloads": 228, "source": "Hugging Face", - "score": -0.09021799959592919, - "first_commit": "2024-07-05 05:56:09", - "latest_commit": "2024-07-13 13:29:45", + "score": -0.09354904315363766, + "first_commit": "2023-09-23 08:43:02", + "latest_commit": "2023-10-01 18:12:09", "languages": [], "model_or_dataset": "model", - "model_size": 1.1, + "model_size": null, "model_architectures": null }, { - "description": "SpeechT5 (TTS task) for Japanese SpeechT5 model fine-tuned for Japanese speech synthesis (text-to-speech)", - "url": "https://huggingface.co./esnya/japanese_speecht5_tts", - "project_name": "japanese_speecht5_tts", - "downloads": 241, + "description": "Mistral-Nemo-Instruct-2407-gguf mistralaiさんが公開しているMistral-Nemo-Instruct-2407のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Mistral-Nemo-Instruct-2407-gguf", + "project_name": "Mistral-Nemo-Instruct-2407-gguf", + "downloads": 225, "source": "Hugging Face", - "score": -0.09023920949700186, - "first_commit": "2023-08-08 18:37:40", - "latest_commit": "2023-08-09 09:25:38", + "score": -0.09357819725113466, + "first_commit": "2024-07-22 13:28:13", + "latest_commit": "2024-07-22 17:25:48", "languages": [], "model_or_dataset": "model", - "model_size": 0.14400000000000002, - "model_architectures": "SpeechT5ForTextToSpeech" + "model_size": 12.2, + "model_architectures": null }, { - "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/bert-small-japanese", - "project_name": "bert-small-japanese", - "downloads": 241, + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-v1.1", + "project_name": "t5-base-japanese-v1.1", + "downloads": 225, "source": "Hugging Face", - "score": -0.09023920949700186, - "first_commit": "2021-10-04 13:09:36", - "latest_commit": "2022-12-09 00:40:57", + "score": -0.09357819725113466, + "first_commit": "2022-08-12 15:41:28", + "latest_commit": "2022-08-27 09:21:01", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Japanese GPT2 Lyric Model Model description", - "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-small", - "project_name": "gpt2-japanese-lyric-small", - "downloads": 241, + "description": "※llama.cpp Releases b3428(7/21)", + "url": "https://huggingface.co./MCZK/EZO-Humanities-9B-gemma-2-it-GGUF", + "project_name": "EZO-Humanities-9B-gemma-2-it-GGUF", + "downloads": 224, "source": "Hugging Face", - "score": -0.09023920949700186, - "first_commit": "2022-04-21 04:25:18", - "latest_commit": "2023-10-23 12:46:36", + "score": -0.09358791528363367, + "first_commit": "2024-07-10 22:02:03", + "latest_commit": "2024-07-21 18:11:21", "languages": [], "model_or_dataset": "model", - "model_size": 0.123, - "model_architectures": "GPT2LMHeadModel" + "model_size": 9.24, + "model_architectures": null }, { - "description": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf alfredplplさんが公開しているLlama-3-8B-Instruct-Jaのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/alfredplpl-Llama-3-8B-Instruct-Ja-gguf", - "project_name": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf", - "downloads": 239, + "description": "QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF This is quantized version of DataPilot/Llama3.1-ArrowSE-v0.4 created using llama.cpp Original Model Card 概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", + "url": "https://huggingface.co./QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF", + "project_name": "Llama3.1-ArrowSE-v0.4-GGUF", + "downloads": 224, "source": "Hugging Face", - "score": -0.09026041939807453, - "first_commit": "2024-04-23 14:18:57", - "latest_commit": "2024-04-23 15:24:47", + "score": -0.09358791528363367, + "first_commit": "2024-07-28 06:17:48", + "latest_commit": "2024-07-28 06:57:40", "languages": [], "model_or_dataset": "model", "model_size": 8.03, "model_architectures": null }, { - "description": "モデル説明 (model explanation) CoolJapanDiffusion 2.1.1とWaifuDiffusion 1.4 anime epoch2のマージ。", - "url": "https://huggingface.co./ThePioneer/CoolerWaifuDiffusion", - "project_name": "CoolerWaifuDiffusion", - "downloads": 236, + "description": "GPT-2 small Japanese model This repository contains a GPT2-small model trained on Japanese Wikipedia dataset.", + "url": "https://huggingface.co./colorfulscoop/gpt2-small-ja", + "project_name": "gpt2-small-ja", + "downloads": 223, "source": "Hugging Face", - "score": -0.09029223424968352, - "first_commit": "2023-01-20 23:52:39", - "latest_commit": "2023-01-22 19:16:59", + "score": -0.09359763331613268, + "first_commit": "2021-03-27 02:27:05", + "latest_commit": "2021-09-27 20:50:17", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "GPT2LMHeadModel" }, { - "description": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-7b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", - "downloads": 236, + "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "url": "https://huggingface.co./QuantFactory/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "downloads": 221, "source": "Hugging Face", - "score": -0.09029223424968352, - "first_commit": "2024-05-03 04:09:27", - "latest_commit": "2024-05-03 04:53:43", + "score": -0.09361706938113068, + "first_commit": "2024-07-05 05:56:09", + "latest_commit": "2024-07-13 13:29:45", "languages": [], "model_or_dataset": "model", - "model_size": 6.83, + "model_size": 1.1, "model_architectures": null }, { - "description": "※llama.cpp Releases b3428(7/21)", - "url": "https://huggingface.co./MCZK/EZO-Humanities-9B-gemma-2-it-GGUF", - "project_name": "EZO-Humanities-9B-gemma-2-it-GGUF", - "downloads": 234, + "description": "whisper-large-v2-japanese-5k-steps This model is a fine-tuned version of openai/whisper-large-v2 on the Japanese CommonVoice dataset (v11)..", + "url": "https://huggingface.co./clu-ling/whisper-large-v2-japanese-5k-steps", + "project_name": "whisper-large-v2-japanese-5k-steps", + "downloads": 218, "source": "Hugging Face", - "score": -0.09031344415075619, - "first_commit": "2024-07-10 22:02:03", - "latest_commit": "2024-07-21 18:11:21", + "score": -0.09364622347862768, + "first_commit": "2023-01-28 22:14:29", + "latest_commit": "2023-03-03 21:11:39", "languages": [], "model_or_dataset": "model", - "model_size": 9.24, - "model_architectures": null + "model_size": null, + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59362 Validation Metrics Loss: 0.13092292845249176 Accuracy: 0.9527127414314258 Precision: 0.9634070704982427 Recall: 0.9842171959602166 AUC: 0.9667289746092403 F1: 0.9737009564152002 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", - "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59362", - "project_name": "autonlp-japanese-sentiment-59362", - "downloads": 233, + "description": "japanese-stablelm-2-instruct-1_6b-gguf stabilityaiさんが公開しているjapanese-stablelm-2-instruct-1_6bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/japanese-stablelm-2-instruct-1_6b-gguf", + "project_name": "japanese-stablelm-2-instruct-1_6b-gguf", + "downloads": 216, "source": "Hugging Face", - "score": -0.09032404910129253, - "first_commit": "2021-04-21 11:28:11", - "latest_commit": "2021-05-18 22:55:03", + "score": -0.09366565954362568, + "first_commit": "2024-05-11 07:26:43", + "latest_commit": "2024-05-11 09:56:19", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_size": 1.64, + "model_architectures": null }, { - "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with Gaianet Prompt template: prompt template: llama-3-chat Context size: chat_ctx_size: 4096 Run with GaiaNet:", - "url": "https://huggingface.co./gaianet/Llama-3-8B-Japanese-Instruct-GGUF", - "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", - "downloads": 232, + "description": "SpeechT5 (TTS task) for Japanese SpeechT5 model fine-tuned for Japanese speech synthesis (text-to-speech)", + "url": "https://huggingface.co./esnya/japanese_speecht5_tts", + "project_name": "japanese_speecht5_tts", + "downloads": 213, "source": "Hugging Face", - "score": -0.09033465405182886, - "first_commit": "2024-05-14 05:38:05", - "latest_commit": "2024-05-16 13:44:53", + "score": -0.0936948136411227, + "first_commit": "2023-08-08 18:37:40", + "latest_commit": "2023-08-09 09:25:38", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": 0.14400000000000002, + "model_architectures": "SpeechT5ForTextToSpeech" }, { - "description": "Umievo-itr012-Gleipnir-7B-GGUF", - "url": "https://huggingface.co./QuantFactory/Umievo-itr012-Gleipnir-7B-GGUF", - "project_name": "Umievo-itr012-Gleipnir-7B-GGUF", - "downloads": 223, + "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/bert-small-japanese", + "project_name": "bert-small-japanese", + "downloads": 210, "source": "Hugging Face", - "score": -0.09043009860665586, - "first_commit": "2024-06-09 03:48:10", - "latest_commit": "2024-06-09 13:12:32", + "score": -0.0937239677386197, + "first_commit": "2021-10-04 13:09:36", + "latest_commit": "2022-12-09 00:40:57", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "Model Card for Japanese DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese", - "project_name": "deberta-v2-large-japanese", - "downloads": 222, + "description": "This repository contains some GGUF quantizations of the VNTL Gemma 2 27B model.", + "url": "https://huggingface.co./lmg-anon/vntl-gemma2-27b-gguf", + "project_name": "vntl-gemma2-27b-gguf", + "downloads": 204, "source": "Hugging Face", - "score": -0.0904407035571922, - "first_commit": "2023-01-07 07:45:25", - "latest_commit": "2023-05-12 14:10:35", + "score": -0.09378227593361371, + "first_commit": "2024-07-07 00:28:06", + "latest_commit": "2024-07-08 16:13:54", "languages": [], "model_or_dataset": "model", - "model_size": 0.373, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": 27.2, + "model_architectures": null }, { - "description": "pfnet-nekomata-14b-pfn-qfin-gguf pfnetさんが公開しているnekomata-14b-pfn-qfinのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-gguf", - "project_name": "pfnet-nekomata-14b-pfn-qfin-gguf", - "downloads": 219, + "description": "gemma-2-2b-it-gguf googleさんが公開しているgemma-2-2b-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/gemma-2-2b-it-gguf", + "project_name": "gemma-2-2b-it-gguf", + "downloads": 198, "source": "Hugging Face", - "score": -0.0904725184088012, - "first_commit": "2024-04-24 12:58:10", - "latest_commit": "2024-04-24 14:46:15", + "score": -0.09384058412860774, + "first_commit": "2024-08-01 17:22:58", + "latest_commit": "2024-08-01 18:29:08", "languages": [], "model_or_dataset": "model", - "model_size": 14.2, + "model_size": 2.61, "model_architectures": null }, { - "description": "QuantFactory/llama-3-youko-8b-GGUF", - "url": "https://huggingface.co./QuantFactory/llama-3-youko-8b-GGUF", - "project_name": "llama-3-youko-8b-GGUF", - "downloads": 218, + "description": "ElanMT ElanMT-BT-ja-en is a Japanese to English translation model developed by ELAN MITSUA Project / Abstract Engine.", + "url": "https://huggingface.co./Mitsua/elan-mt-bt-ja-en", + "project_name": "elan-mt-bt-ja-en", + "downloads": 197, "source": "Hugging Face", - "score": -0.09048312335933753, - "first_commit": "2024-06-24 05:04:12", - "latest_commit": "2024-06-24 06:35:40", + "score": -0.09385030216110674, + "first_commit": "2024-05-20 01:56:12", + "latest_commit": "2024-05-20 01:56:57", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 0.0606, + "model_architectures": "MarianMTModel" }, { - "description": "Evaluation on MIRACL japanese These models don't train on the MIRACL training data.", - "url": "https://huggingface.co./aken12/splade-japanese-v3", - "project_name": "splade-japanese-v3", - "downloads": 217, + "description": "モデル説明 (model explanation) CoolJapanDiffusion 2.1.1とWaifuDiffusion 1.4 anime epoch2のマージ。", + "url": "https://huggingface.co./ThePioneer/CoolerWaifuDiffusion", + "project_name": "CoolerWaifuDiffusion", + "downloads": 194, "source": "Hugging Face", - "score": -0.09049372830987387, - "first_commit": "2024-03-29 12:35:47", - "latest_commit": "2024-05-22 02:59:37", + "score": -0.09387945625860375, + "first_commit": "2023-01-20 23:52:39", + "latest_commit": "2023-01-22 19:16:59", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": null }, { - "description": "Rakuda - Questions for Japanese models Repository:", - "url": "https://huggingface.co./datasets/yuzuai/rakuda-questions", - "project_name": "rakuda-questions", - "downloads": 217, + "description": "bert-japanese_finetuned-sentiment-analysis This model was trained from scratch on the Japanese Sentiment Polarity Dictionary dataset.", + "url": "https://huggingface.co./minutillamolinara/bert-japanese_finetuned-sentiment-analysis", + "project_name": "bert-japanese_finetuned-sentiment-analysis", + "downloads": 192, "source": "Hugging Face", - "score": -0.09049372830987387, - "first_commit": "2023-06-23 01:08:54", - "latest_commit": "2023-06-23 08:01:35", + "score": -0.09389889232360175, + "first_commit": "2023-03-31 02:28:09", + "latest_commit": "2023-03-31 13:13:37", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "BertForSequenceClassification" }, { - "description": "We provide an Amazon product reviews dataset for multilingual text classification.", - "url": "https://huggingface.co./datasets/defunct-datasets/amazon_reviews_multi", - "project_name": "amazon_reviews_multi", - "downloads": 215, + "description": "SakanaAI-EvoLLM-JP-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-v1-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-v1-7B-gguf", + "project_name": "SakanaAI-EvoLLM-JP-v1-7B-gguf", + "downloads": 192, "source": "Hugging Face", - "score": -0.09051493821094654, - "first_commit": "2022-01-25 16:34:54", - "latest_commit": "2023-11-02 14:52:21", + "score": -0.09389889232360175, + "first_commit": "2024-03-21 13:04:25", + "latest_commit": "2024-03-21 14:41:04", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 7.24, "model_architectures": null }, { - "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct-gguf", - "project_name": "Fugaku-LLM-13B-instruct-gguf", - "downloads": 214, + "description": "llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset", + "project_name": "bert-base-japanese-v3-crf-ner-wikipedia-dataset", + "downloads": 185, "source": "Hugging Face", - "score": -0.09052554316148287, - "first_commit": null, - "latest_commit": null, + "score": -0.09396691855109476, + "first_commit": "2023-05-28 08:19:43", + "latest_commit": "2023-07-25 15:04:39", "languages": [], "model_or_dataset": "model", - "model_size": 13.4, - "model_architectures": null + "model_size": null, + "model_architectures": "BertWithCrfForTokenClassification" }, { - "description": "Dataset Summary This is the Business Scene Dialogue (BSD) dataset, a Japanese-English parallel corpus containing written conversations in various business scenarios.", - "url": "https://huggingface.co./datasets/ryo0634/bsd_ja_en", - "project_name": "bsd_ja_en", - "downloads": 213, + "description": "JQaRA : Japanese Question Answering with Retrieval Augmentation - 検索拡張(RAG)評価のための日本語 Q&A データセット 高性能な LLM の台頭に伴い、LLM を用いた質疑応答のユースケースが増加しています。", + "url": "https://huggingface.co./datasets/hotchpotch/JQaRA", + "project_name": "JQaRA", + "downloads": 185, "source": "Hugging Face", - "score": -0.09053614811201921, - "first_commit": "2022-01-25 16:35:02", - "latest_commit": "2024-01-11 07:36:44", + "score": -0.09396691855109476, + "first_commit": "2024-03-03 01:58:34", + "latest_commit": "2024-08-10 02:56:05", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Mistral-7B-Instruct-v0.3-gguf mistralaiさんが公開しているMistral-7B-Instruct-v0.3のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Mistral-7B-Instruct-v0.3-gguf", - "project_name": "Mistral-7B-Instruct-v0.3-gguf", - "downloads": 212, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-GGUF", + "project_name": "japanese-stablelm-instruct-beta-7B-GGUF", + "downloads": 184, "source": "Hugging Face", - "score": -0.09054675306255554, - "first_commit": "2024-05-23 14:44:25", - "latest_commit": "2024-05-23 15:58:46", + "score": -0.09397663658359377, + "first_commit": "2023-11-03 01:04:31", + "latest_commit": "2023-11-03 12:54:55", "languages": [], "model_or_dataset": "model", - "model_size": 7.25, + "model_size": 6.74, "model_architectures": null }, { - "description": "ArrowPro-7B-KillerWhale-gguf DataPilotさんが公開しているArrowPro-7B-KillerWhaleのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ArrowPro-7B-KillerWhale-gguf", - "project_name": "ArrowPro-7B-KillerWhale-gguf", - "downloads": 209, + "description": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf pfnetさんが公開しているLlama3-Preferred-MedSwallow-70Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-Llama3-Preferred-MedSwallow-70B-gguf", + "project_name": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf", + "downloads": 182, "source": "Hugging Face", - "score": -0.09057856791416453, - "first_commit": "2024-05-29 15:06:55", - "latest_commit": "2024-05-29 15:53:17", + "score": -0.09399607264859178, + "first_commit": "2024-07-18 15:45:16", + "latest_commit": "2024-07-19 09:14:38", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 70.6, "model_architectures": null }, { - "description": "stockmark-100b-gguf stockmarkさんが公開しているstockmark-100bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/stockmark-100b-gguf", - "project_name": "stockmark-100b-gguf", - "downloads": 203, + "description": "We provide an Amazon product reviews dataset for multilingual text classification.", + "url": "https://huggingface.co./datasets/defunct-datasets/amazon_reviews_multi", + "project_name": "amazon_reviews_multi", + "downloads": 181, "source": "Hugging Face", - "score": -0.09064219761738254, - "first_commit": "2024-05-17 12:45:56", - "latest_commit": "2024-05-18 09:14:46", + "score": -0.09400579068109077, + "first_commit": "2022-01-25 16:34:54", + "latest_commit": "2023-11-02 14:52:21", "languages": [], - "model_or_dataset": "model", - "model_size": 96.2, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "llm-lora-classification", - "url": "https://github.com/hppRC/llm-lora-classification", - "project_name": "llm-lora-classification", - "stargazers_count": 83, - "source": "GitHub", - "score": -0.09066139680628928, - "first_commit": "2023-07-17 12:42:57", - "latest_commit": "2023-07-22 19:46:45", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, - { - "description": "Word2vec (word to vectors) approach for Japanese language using Gensim and Mecab.", - "url": "https://github.com/philipperemy/japanese-words-to-vectors", - "project_name": "japanese-words-to-vectors", - "stargazers_count": 83, - "source": "GitHub", - "score": -0.09066139680628928, - "first_commit": "2016-09-04 09:43:00", - "latest_commit": "2020-08-09 19:48:23", - "languages": [ - "Python" - ], - "model_or_dataset": "model" - }, - { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-v1.1", - "project_name": "t5-base-japanese-v1.1", - "downloads": 201, + "description": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-70b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", + "downloads": 174, "source": "Hugging Face", - "score": -0.09066340751845521, - "first_commit": "2022-08-12 15:41:28", - "latest_commit": "2022-08-27 09:21:01", + "score": -0.0940738169085838, + "first_commit": "2024-05-03 09:00:00", + "latest_commit": "2024-05-04 06:52:16", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 69.2, + "model_architectures": null }, { - "description": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-A-v1-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-A-v1-7B-gguf", - "project_name": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf", - "downloads": 199, + "description": "Mistral-Large-Instruct-2407-gguf mistralaiさんが公開しているMistral-Large-Instruct-2407のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Mistral-Large-Instruct-2407-gguf", + "project_name": "Mistral-Large-Instruct-2407-gguf", + "downloads": 174, "source": "Hugging Face", - "score": -0.09068461741952788, - "first_commit": "2024-03-21 13:25:41", - "latest_commit": "2024-03-21 14:48:28", + "score": -0.0940738169085838, + "first_commit": "2024-07-24 18:59:58", + "latest_commit": "2024-07-26 12:21:45", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 123.0, "model_architectures": null }, { - "description": "Hibiki ASR Phonemizer This model is a Phoneme Level Speech Recognition network, originally a fine-tuned version of openai/whisper-large-v3 on a mixture of Different Japanese datasets.", - "url": "https://huggingface.co./Respair/Hibiki_ASR_Phonemizer_v0.2", - "project_name": "Hibiki_ASR_Phonemizer_v0.2", - "downloads": 196, + "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", + "url": "https://huggingface.co./TeamFnord/manga-ocr", + "project_name": "manga-ocr", + "downloads": 173, "source": "Hugging Face", - "score": -0.09071643227113688, - "first_commit": "2024-08-12 01:30:08", - "latest_commit": "2024-08-19 18:13:01", + "score": -0.0940835349410828, + "first_commit": "2022-01-15 17:39:06", + "latest_commit": "2022-02-10 07:50:15", "languages": [], "model_or_dataset": "model", - "model_size": 1.54, - "model_architectures": "WhisperForConditionalGeneration" + "model_size": null, + "model_architectures": "VisionEncoderDecoderModel" }, { - "description": "aya-23-8B-gguf CohereForAIさんが公開しているaya-23-8Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aya-23-8B-gguf", - "project_name": "aya-23-8B-gguf", - "downloads": 196, + "description": "Stanza model for Japanese (ja)", + "url": "https://huggingface.co./stanfordnlp/stanza-ja", + "project_name": "stanza-ja", + "downloads": 172, "source": "Hugging Face", - "score": -0.09071643227113688, - "first_commit": "2024-05-26 16:32:53", - "latest_commit": "2024-05-27 00:54:36", + "score": -0.0940932529735818, + "first_commit": "2021-09-07 12:05:41", + "latest_commit": "2024-07-31 05:09:43", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": null, "model_architectures": null }, { - "description": "llm-japanese-dataset LLM構築用の日本語インストラクション(チャット)データセット 主に,英語で構築されたLLMモデルなどに対して,チャット(Instruction)応答タスクに関してLoRAなどでチューニングするために使用できます. ", - "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset", - "project_name": "llm-japanese-dataset", - "downloads": 194, + "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-bokete\", split=\"train\") 概要 大喜利投稿サイトBoketeのクロールデータです。", + "url": "https://huggingface.co./datasets/YANS-official/ogiri-bokete", + "project_name": "ogiri-bokete", + "downloads": 172, "source": "Hugging Face", - "score": -0.09073764217220955, - "first_commit": "2023-04-30 06:13:24", - "latest_commit": "2024-01-18 13:42:50", + "score": -0.0940932529735818, + "first_commit": "2024-07-21 09:58:15", + "latest_commit": "2024-08-31 09:24:55", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressive-v2の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-v2-GGUF", - "project_name": "Ninja-v1-RP-expressive-v2-GGUF", - "downloads": 193, + "description": "Ruri: Japanese General Text Embeddings Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-small", + "project_name": "ruri-small", + "downloads": 171, "source": "Hugging Face", - "score": -0.09074824712274589, - "first_commit": "2024-05-26 06:09:57", - "latest_commit": "2024-05-26 15:22:01", + "score": -0.09410297100608081, + "first_commit": "2024-08-28 16:23:12", + "latest_commit": "2024-09-04 08:49:30", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": 0.0681, + "model_architectures": "DistilBertModel" }, { - "description": "SakanaAI-EvoLLM-JP-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-v1-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-v1-7B-gguf", - "project_name": "SakanaAI-EvoLLM-JP-v1-7B-gguf", - "downloads": 192, + "description": "DataPilot-ArrowPro-7B-RobinHood-gguf DataPilotさんが公開しているArrowPro-7B-RobinHoodのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-RobinHood-gguf", + "project_name": "DataPilot-ArrowPro-7B-RobinHood-gguf", + "downloads": 171, "source": "Hugging Face", - "score": -0.09075885207328221, - "first_commit": "2024-03-21 13:04:25", - "latest_commit": "2024-03-21 14:41:04", + "score": -0.09410297100608081, + "first_commit": "2024-05-11 07:22:37", + "latest_commit": "2024-05-11 13:43:09", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "Chatbot Arena Conversationsの質問文から、aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2を使用して応答文を作成しました 質問文は、以下のモデルのPrompt部分を使用しました Chatbot Arena Conversations JA (calm2) 以下引用です。 ", - "url": "https://huggingface.co./datasets/aixsatoshi/Swallow-MX-chatbot-DPO", - "project_name": "Swallow-MX-chatbot-DPO", - "downloads": 192, + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-base", + "project_name": "ruri-reranker-base", + "downloads": 170, "source": "Hugging Face", - "score": -0.09075885207328221, - "first_commit": "2024-03-31 06:42:39", - "latest_commit": "2024-03-31 08:16:43", + "score": -0.0941126890385798, + "first_commit": "2024-08-20 01:10:40", + "latest_commit": "2024-09-04 08:50:21", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" + }, + { + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-large-long", + "project_name": "t5-large-long", + "downloads": 168, + "source": "Hugging Face", + "score": -0.09413212510357781, + "first_commit": "2023-04-26 08:33:12", + "latest_commit": "2023-05-10 10:00:35", + "languages": [], + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "range3/wiki40b-ja This dataset consists of three parquet files from the wiki40b dataset with only Japanese data extracted.", - "url": "https://huggingface.co./datasets/range3/wiki40b-ja", - "project_name": "wiki40b-ja", - "downloads": 191, + "description": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Humanities-9B-gemma-2-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", + "project_name": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", + "downloads": 168, "source": "Hugging Face", - "score": -0.09076945702381854, - "first_commit": "2023-02-04 04:54:17", - "latest_commit": "2023-02-04 05:44:21", + "score": -0.09413212510357781, + "first_commit": "2024-07-15 15:43:00", + "latest_commit": "2024-07-15 17:01:09", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 9.24, "model_architectures": null }, { - "description": "QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF This is quantized version of DataPilot/Llama3.1-ArrowSE-v0.4 created using llama.cpp Original Model Card 概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", - "url": "https://huggingface.co./QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF", - "project_name": "Llama3.1-ArrowSE-v0.4-GGUF", - "downloads": 190, + "description": "Model Card for Japanese character-level GPT-2 Medium Model description This is a Japanese character-level GPT-2 Medium (310M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/gpt2-medium-japanese-char", + "project_name": "gpt2-medium-japanese-char", + "downloads": 165, "source": "Hugging Face", - "score": -0.09078006197435488, - "first_commit": "2024-07-28 06:17:48", - "latest_commit": "2024-07-28 06:57:40", + "score": -0.09416127920107482, + "first_commit": "2023-05-18 06:29:28", + "latest_commit": "2023-06-08 05:34:26", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": 0.335, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "Llama3ベースの日本語医療LLM MedLlama3-JP このモデルはLlama3の継続学習により作成された4種類のLLMから成るマージモデルです。 ", - "url": "https://huggingface.co./EQUES/MedLLama3-JP-v2", - "project_name": "MedLLama3-JP-v2", - "downloads": 189, + "description": "electra-base-cyberbullying This is an ELECTRA Base model for the Japanese language finetuned for automatic cyberbullying detection.", + "url": "https://huggingface.co./kit-nlp/transformers-ud-japanese-electra-base-discriminator-cyberbullying", + "project_name": "transformers-ud-japanese-electra-base-discriminator-cyberbullying", + "downloads": 165, "source": "Hugging Face", - "score": -0.09079066692489121, - "first_commit": "2024-07-01 13:42:17", - "latest_commit": "2024-07-13 06:12:43", + "score": -0.09416127920107482, + "first_commit": "2022-09-09 04:08:15", + "latest_commit": "2022-11-01 07:18:40", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "ElectraForSequenceClassification" }, { "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-base-1_6b", - "project_name": "japanese-stablelm-2-base-1_6b", - "downloads": 189, + "url": "https://huggingface.co./stabilityai/japanese-stable-diffusion-xl", + "project_name": "japanese-stable-diffusion-xl", + "downloads": 164, "source": "Hugging Face", - "score": -0.09079066692489121, + "score": -0.09417099723357382, "first_commit": null, "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": 1.64, + "model_size": null, "model_architectures": null }, { - "description": "aixsatoshi-Honyaku-13b-gguf aixsatoshiさんが公開しているHonyaku-13bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aixsatoshi-Honyaku-13b-gguf", - "project_name": "aixsatoshi-Honyaku-13b-gguf", - "downloads": 188, + "description": "hubert-base-jtube This repo provides model weights for the hubert-base model trained on the JTubeSpeech corpus. ", + "url": "https://huggingface.co./sarulab-speech/hubert-base-jtube", + "project_name": "hubert-base-jtube", + "downloads": 162, "source": "Hugging Face", - "score": -0.09080127187542755, - "first_commit": "2024-05-19 08:07:15", - "latest_commit": "2024-05-19 09:24:59", + "score": -0.09419043329857182, + "first_commit": "2024-02-02 04:15:22", + "latest_commit": "2024-02-05 11:49:57", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": null + "model_size": null, + "model_architectures": "HubertModel" }, { - "description": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf pfnetさんが公開しているLlama3-Preferred-MedSwallow-70Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-Llama3-Preferred-MedSwallow-70B-gguf", - "project_name": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf", - "downloads": 188, + "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", + "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese-hiragana", + "project_name": "wav2vec2-large-xlsr-japanese-hiragana", + "downloads": 161, "source": "Hugging Face", - "score": -0.09080127187542755, - "first_commit": "2024-07-18 15:45:16", - "latest_commit": "2024-07-19 09:14:38", + "score": -0.09420015133107083, + "first_commit": "2021-06-18 07:15:24", + "latest_commit": "2023-02-08 00:36:47", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": null + "model_size": 0.316, + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "rinna-llama-3-youko-70b-instruct-gguf rinnaさんが公開しているllama-3-youko-70b-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-70b-instruct-gguf", - "project_name": "rinna-llama-3-youko-70b-instruct-gguf", - "downloads": 188, + "description": "Local-Novel-LLM-project様の Assistance をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Assistance-7B-GGUF", + "project_name": "Assistance-7B-GGUF", + "downloads": 160, "source": "Hugging Face", - "score": -0.09080127187542755, - "first_commit": "2024-07-27 09:04:09", - "latest_commit": "2024-07-31 14:35:52", + "score": -0.09420986936356983, + "first_commit": "2024-05-03 12:16:29", + "latest_commit": "2024-05-04 07:48:41", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, + "model_size": 7.24, "model_architectures": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-RAG", - "project_name": "Orion-14B-Chat-RAG", - "downloads": 185, + "description": "databricks-dolly-15k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", + "url": "https://huggingface.co./datasets/llm-jp/databricks-dolly-15k-ja", + "project_name": "databricks-dolly-15k-ja", + "downloads": 160, "source": "Hugging Face", - "score": -0.09083308672703655, - "first_commit": "2024-01-16 12:19:08", - "latest_commit": "2024-03-26 10:08:09", + "score": -0.09420986936356983, + "first_commit": "2024-01-27 07:11:25", + "latest_commit": "2024-01-30 18:09:37", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "OrionForCausalLM" + "model_architectures": null }, { - "description": "AutoWikiQA 東工大が公開しているSwallow-MXを用いて、Wikipedia中のテキストを入力として「質問(query)」と「回答(answer)」を生成し、生成された質問と回答についてフィルタリングを行ったデータセットです。", - "url": "https://huggingface.co./datasets/cl-nagoya/auto-wiki-qa", - "project_name": "auto-wiki-qa", - "downloads": 185, + "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct-gguf", + "project_name": "Fugaku-LLM-13B-instruct-gguf", + "downloads": 159, "source": "Hugging Face", - "score": -0.09083308672703655, - "first_commit": "2024-03-28 01:33:42", - "latest_commit": "2024-04-20 12:17:33", + "score": -0.09421958739606884, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 13.4, "model_architectures": null }, { - "description": "[EZO model card]", - "url": "https://huggingface.co./AXCXEPT/EZO-InternVL2-26B", - "project_name": "EZO-InternVL2-26B", - "downloads": 183, + "description": "JaCWIR: Japanese Casual Web IR - 日本語情報検索評価のための小規模でカジュアルなWebタイトルと概要のデータセット 近年、大規模言語モデル(LLM)の台頭により、一般的な日本語を用いた自然な検索クエリで質問するユースケースが増えています。", + "url": "https://huggingface.co./datasets/hotchpotch/JaCWIR", + "project_name": "JaCWIR", + "downloads": 159, "source": "Hugging Face", - "score": -0.09085429662810922, - "first_commit": "2024-08-19 08:03:55", - "latest_commit": "2024-08-23 10:56:47", + "score": -0.09421958739606884, + "first_commit": "2024-03-23 05:57:58", + "latest_commit": "2024-04-01 02:34:34", "languages": [], - "model_or_dataset": "model", - "model_size": 25.5, - "model_architectures": "InternVLChatModel" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Japanese-WizardLM2-ChatV-7B-GGUF GGUF conversion of \"Japanese-WizardLM2-ChatV-7B\" This model, Japanese-WizardLM2-ChatV-7B, is based on \"chatntq-ja-7b-v1.0 \", and was created by subtracting \"Mistral-7B-v0.1\" from \"WizardLM-2-7b\" ChatVector was added by a factor of 1.0.", - "url": "https://huggingface.co./umiyuki/Japanese-WizardLM2-ChatV-7B-GGUF", - "project_name": "Japanese-WizardLM2-ChatV-7B-GGUF", - "downloads": 179, + "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/30876467ad5a8a81821f", + "url": "https://huggingface.co./sonoisa/t5-qiita-title-generation", + "project_name": "t5-qiita-title-generation", + "downloads": 157, "source": "Hugging Face", - "score": -0.09089671643025456, - "first_commit": "2024-04-16 14:45:30", - "latest_commit": "2024-04-17 01:41:16", + "score": -0.09423902346106684, + "first_commit": "2021-10-17 14:46:56", + "latest_commit": "2022-02-21 13:39:01", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "HODACHI様の Llama-3.1-8B-EZO-1.1-it をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3.1-8B-EZO-1.1-it-GGUF", - "project_name": "Llama-3.1-8B-EZO-1.1-it-GGUF", - "downloads": 176, + "description": "Umievo-itr012-Gleipnir-7B-GGUF", + "url": "https://huggingface.co./QuantFactory/Umievo-itr012-Gleipnir-7B-GGUF", + "project_name": "Umievo-itr012-Gleipnir-7B-GGUF", + "downloads": 157, "source": "Hugging Face", - "score": -0.09092853128186355, - "first_commit": "2024-07-31 12:12:01", - "latest_commit": "2024-07-31 18:13:59", + "score": -0.09423902346106684, + "first_commit": "2024-06-09 03:48:10", + "latest_commit": "2024-06-09 13:12:32", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": 7.24, "model_architectures": null }, { - "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B", - "project_name": "Fugaku-LLM-13B", - "downloads": 174, + "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with Gaianet Prompt template: prompt template: llama-3-chat Context size: chat_ctx_size: 4096 Run with GaiaNet:", + "url": "https://huggingface.co./gaianet/Llama-3-8B-Japanese-Instruct-GGUF", + "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", + "downloads": 156, "source": "Hugging Face", - "score": -0.09094974118293622, - "first_commit": null, - "latest_commit": null, + "score": -0.09424874149356584, + "first_commit": "2024-05-14 05:38:05", + "latest_commit": "2024-05-16 13:44:53", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", - "url": "https://huggingface.co./TeamFnord/manga-ocr", - "project_name": "manga-ocr", - "downloads": 174, + "description": "Japanese InstructBLIP Alpha Model Details Japanese InstructBLIP Alpha is a vision-language instruction-following model that enables to generate Japanese descriptions for input images and optionally input texts such as questions.", + "url": "https://huggingface.co./stabilityai/japanese-instructblip-alpha", + "project_name": "japanese-instructblip-alpha", + "downloads": 155, "source": "Hugging Face", - "score": -0.09094974118293622, - "first_commit": "2022-01-15 17:39:06", - "latest_commit": "2022-02-10 07:50:15", + "score": -0.09425845952606485, + "first_commit": "2023-08-16 23:49:58", + "latest_commit": "2023-11-17 03:57:41", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "VisionEncoderDecoderModel" + "model_architectures": "JapaneseInstructBlipAlphaForConditionalGeneration" }, { - "description": "Japanese E5 Mixtral 7B Slerp GGUF GGUF conversion of oshizo/japanese-e5-mistral-7b_slerp Avaiable formats: Q2_K.gguf Q3_K.gguf Q4_K.gguf Q5_K.gguf", - "url": "https://huggingface.co./mm/japanese-e5-mistral-7b_slerp_gguf", - "project_name": "japanese-e5-mistral-7b_slerp_gguf", - "downloads": 171, + "description": "Japanese-WizardLM2-ChatV-7B-GGUF GGUF conversion of \"Japanese-WizardLM2-ChatV-7B\" This model, Japanese-WizardLM2-ChatV-7B, is based on \"chatntq-ja-7b-v1.0 \", and was created by subtracting \"Mistral-7B-v0.1\" from \"WizardLM-2-7b\" ChatVector was added by a factor of 1.0.", + "url": "https://huggingface.co./umiyuki/Japanese-WizardLM2-ChatV-7B-GGUF", + "project_name": "Japanese-WizardLM2-ChatV-7B-GGUF", + "downloads": 153, "source": "Hugging Face", - "score": -0.09098155603454523, - "first_commit": "2024-06-09 08:34:37", - "latest_commit": "2024-06-14 16:12:17", + "score": -0.09427789559106285, + "first_commit": "2024-04-16 14:45:30", + "latest_commit": "2024-04-17 01:41:16", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "ryota39様の Tora-7B-v0.1 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Tora-7B-v0.1-GGUF", - "project_name": "Tora-7B-v0.1-GGUF", - "downloads": 171, + "description": "nlp-waseda/roberta-large-japanese Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese", + "project_name": "roberta-large-japanese", + "downloads": 151, "source": "Hugging Face", - "score": -0.09098155603454523, - "first_commit": "2024-05-07 11:24:35", - "latest_commit": "2024-06-15 03:16:21", + "score": -0.09429733165606086, + "first_commit": "2022-05-10 08:37:48", + "latest_commit": "2022-10-21 14:48:46", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": null, + "model_architectures": "RobertaForMaskedLM" }, { - "description": "GPT-2 small Japanese model This repository contains a GPT2-small model trained on Japanese Wikipedia dataset.", - "url": "https://huggingface.co./colorfulscoop/gpt2-small-ja", - "project_name": "gpt2-small-ja", - "downloads": 171, + "description": "Tanuki-8B-dpo-v1.0-GPTQ-8bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGPTQ 8bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GPTQ-8bit", + "project_name": "Tanuki-8B-dpo-v1.0-GPTQ-8bit", + "downloads": 150, "source": "Hugging Face", - "score": -0.09098155603454523, - "first_commit": "2021-03-27 02:27:05", - "latest_commit": "2021-09-27 20:50:17", + "score": -0.09430704968855987, + "first_commit": "2024-08-27 17:32:47", + "latest_commit": "2024-09-03 09:28:59", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "LlamaForCausalLM" }, { - "description": "読み込み方 from datasets import load_dataset dataset = load_dataset(\"YANS-official/ogiri-bokete\", split=\"train\") 概要 大喜利投稿サイトBoketeのクロールデータです。", - "url": "https://huggingface.co./datasets/YANS-official/ogiri-bokete", - "project_name": "ogiri-bokete", - "downloads": 170, + "description": "[EZO model card]", + "url": "https://huggingface.co./AXCXEPT/EZO-InternVL2-26B", + "project_name": "EZO-InternVL2-26B", + "downloads": 150, "source": "Hugging Face", - "score": -0.09099216098508156, - "first_commit": "2024-07-21 09:58:15", - "latest_commit": "2024-08-31 09:24:55", + "score": -0.09430704968855987, + "first_commit": "2024-08-19 08:03:55", + "latest_commit": "2024-08-23 10:56:47", + "languages": [], + "model_or_dataset": "model", + "model_size": 25.5, + "model_architectures": "InternVLChatModel" + }, + { + "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B", + "project_name": "Fugaku-LLM-13B", + "downloads": 149, + "source": "Hugging Face", + "score": -0.09431676772105886, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Oumuamua-7b-RPの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP-GGUF", - "project_name": "Oumuamua-7b-RP-GGUF", - "downloads": 168, + "description": "モデル概要 AWSのtrn1インスタンスを用いて開発した大喜利言語モデルです。", + "url": "https://huggingface.co./watashiha/watashiha-gpt-6b", + "project_name": "watashiha-gpt-6b", + "downloads": 147, "source": "Hugging Face", - "score": -0.09101337088615423, - "first_commit": "2024-06-23 13:00:02", - "latest_commit": "2024-06-23 14:45:14", + "score": -0.09433620378605687, + "first_commit": "2023-12-28 05:41:38", + "latest_commit": "2024-03-04 05:21:14", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, - "model_architectures": null + "model_size": 5.83, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct", - "project_name": "Fugaku-LLM-13B-instruct", - "downloads": 168, + "description": "This is for (private) DEMO only.", + "url": "https://huggingface.co./Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition", + "project_name": "wav2vec2-xlsr-japanese-speech-emotion-recognition", + "downloads": 147, "source": "Hugging Face", - "score": -0.09101337088615423, - "first_commit": null, - "latest_commit": null, + "score": -0.09433620378605687, + "first_commit": "2021-09-22 04:10:36", + "latest_commit": "2023-10-19 01:31:17", "languages": [], "model_or_dataset": "model", - "model_size": 13.2, - "model_architectures": null + "model_size": 0.316, + "model_architectures": "HubertForSequenceClassification" }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-clip-vit-l-16", - "project_name": "japanese-stable-clip-vit-l-16", - "downloads": 165, + "description": "lightblue-suzume-llama-3-8B-multilingual-gguf lightblueさんが公開しているsuzume-llama-3-8B-multilingualのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-multilingual-gguf", + "project_name": "lightblue-suzume-llama-3-8B-multilingual-gguf", + "downloads": 145, "source": "Hugging Face", - "score": -0.09104518573776323, - "first_commit": null, - "latest_commit": null, + "score": -0.09435563985105487, + "first_commit": "2024-05-06 16:31:55", + "latest_commit": "2024-05-07 12:59:57", "languages": [], "model_or_dataset": "model", - "model_size": 0.41400000000000003, + "model_size": null, "model_architectures": null }, { - "description": "DataPilot様の ArrowPro-7B-KUJIRA をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/ArrowPro-7B-KUJIRA-GGUF", - "project_name": "ArrowPro-7B-KUJIRA-GGUF", - "downloads": 162, + "description": "DataPilot様の Llama3-ArrowSE-8B-v0.3 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama3-ArrowSE-8B-v0.3-GGUF", + "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", + "downloads": 145, "source": "Hugging Face", - "score": -0.09107700058937224, - "first_commit": "2024-05-09 13:34:05", - "latest_commit": "2024-05-09 23:32:52", + "score": -0.09435563985105487, + "first_commit": "2024-07-07 07:53:32", + "latest_commit": "2024-07-07 13:40:26", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 8.03, "model_architectures": null }, { - "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", - "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese-hiragana", - "project_name": "wav2vec2-large-xlsr-japanese-hiragana", - "downloads": 161, + "description": "mbpp-ja", + "url": "https://huggingface.co./datasets/llm-jp/mbpp-ja", + "project_name": "mbpp-ja", + "downloads": 144, "source": "Hugging Face", - "score": -0.09108760553990856, - "first_commit": "2021-06-18 07:15:24", - "latest_commit": "2023-02-08 00:36:47", + "score": -0.09436535788355388, + "first_commit": "2024-04-19 00:26:56", + "latest_commit": "2024-04-20 06:26:51", "languages": [], - "model_or_dataset": "model", - "model_size": 0.316, - "model_architectures": "Wav2Vec2ForCTC" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Oumuamua-7b-instruct-GGUF This is quantized version of nitky/Oumuamua-7b-instruct created using llama.cpp Model Description This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./QuantFactory/Oumuamua-7b-instruct-GGUF", - "project_name": "Oumuamua-7b-instruct-GGUF", - "downloads": 159, + "description": "HODACHI様の Llama-3.1-8B-EZO-1.1-it をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3.1-8B-EZO-1.1-it-GGUF", + "project_name": "Llama-3.1-8B-EZO-1.1-it-GGUF", + "downloads": 143, "source": "Hugging Face", - "score": -0.09110881544098123, - "first_commit": "2024-06-19 08:52:12", - "latest_commit": "2024-06-19 11:40:58", + "score": -0.09437507591605288, + "first_commit": "2024-07-31 12:12:01", + "latest_commit": "2024-07-31 18:13:59", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, + "model_size": 8.03, "model_architectures": null }, { - "description": "Stanza model for Japanese (ja)", - "url": "https://huggingface.co./stanfordnlp/stanza-ja", - "project_name": "stanza-ja", - "downloads": 157, + "description": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-MS-7b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", + "downloads": 143, "source": "Hugging Face", - "score": -0.0911300253420539, - "first_commit": "2021-09-07 12:05:41", - "latest_commit": "2024-07-31 05:09:43", + "score": -0.09437507591605288, + "first_commit": "2024-05-02 13:37:22", + "latest_commit": "2024-05-03 04:35:34", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.33, "model_architectures": null }, { - "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/jawiki-sentences", - "project_name": "jawiki-sentences", - "downloads": 157, + "description": "Meta-Llama-3-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3-8B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Meta-Llama-3-8B-Instruct-gguf", + "project_name": "Meta-Llama-3-8B-Instruct-gguf", + "downloads": 142, "source": "Hugging Face", - "score": -0.0911300253420539, - "first_commit": "2023-06-03 03:02:08", - "latest_commit": "2023-10-25 15:22:05", + "score": -0.09438479394855188, + "first_commit": "2024-05-12 07:18:00", + "latest_commit": "2024-05-12 08:08:38", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 8.03, "model_architectures": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GGUF", - "project_name": "japanese-stablelm-base-beta-70B-GGUF", - "downloads": 156, + "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", + "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B", + "project_name": "Japanese-Starling-ChatV-7B", + "downloads": 141, "source": "Hugging Face", - "score": -0.09114063029259023, - "first_commit": "2023-11-06 11:33:47", - "latest_commit": "2023-11-06 12:14:36", + "score": -0.09439451198105088, + "first_commit": "2024-04-14 12:18:31", + "latest_commit": "2024-04-14 15:26:06", "languages": [], "model_or_dataset": "model", - "model_size": 69.0, - "model_architectures": null + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "c4ai-command-r-v01-japanese-instruct-GGUF 概要 Aratako/c4ai-command-r-v01-japanese-instructの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct-GGUF", - "project_name": "c4ai-command-r-v01-japanese-instruct-GGUF", - "downloads": 155, + "description": "Llama3ベースの日本語医療LLM MedLlama3-JP このモデルはLlama3の継続学習により作成された4種類のLLMから成るマージモデルです。 ", + "url": "https://huggingface.co./EQUES/MedLLama3-JP-v2", + "project_name": "MedLLama3-JP-v2", + "downloads": 138, "source": "Hugging Face", - "score": -0.09115123524312657, - "first_commit": "2024-04-05 17:10:51", - "latest_commit": "2024-04-07 03:19:34", + "score": -0.0944236660785479, + "first_commit": "2024-07-01 13:42:17", + "latest_commit": "2024-07-13 06:12:43", "languages": [], "model_or_dataset": "model", - "model_size": 35.0, - "model_architectures": null + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてQAタスクに用いれるようにしたものです。 ", "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-QAe", "project_name": "deberta-v2-base-japanese-finetuned-QAe", - "downloads": 153, + "downloads": 137, "source": "Hugging Face", - "score": -0.09117244514419924, + "score": -0.09443338411104689, "first_commit": "2023-01-09 11:59:13", "latest_commit": "2023-03-27 02:43:35", "languages": [], @@ -8003,334 +8099,362 @@ "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-vlm", - "project_name": "japanese-stable-vlm", - "downloads": 152, + "description": "fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!", + "url": "https://huggingface.co./bclavie/fio-base-japanese-v0.1", + "project_name": "fio-base-japanese-v0.1", + "downloads": 137, "source": "Hugging Face", - "score": -0.09118305009473557, - "first_commit": null, - "latest_commit": null, + "score": -0.09443338411104689, + "first_commit": "2023-12-18 11:01:07", + "latest_commit": "2023-12-19 10:28:16", "languages": [], "model_or_dataset": "model", - "model_size": 7.57, - "model_architectures": null + "model_size": 0.111, + "model_architectures": "BertModel" }, { - "description": "databricks-dolly-15k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", - "url": "https://huggingface.co./datasets/llm-jp/databricks-dolly-15k-ja", - "project_name": "databricks-dolly-15k-ja", - "downloads": 151, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GGUF", + "project_name": "japanese-stablelm-base-beta-70B-GGUF", + "downloads": 136, "source": "Hugging Face", - "score": -0.09119365504527191, - "first_commit": "2024-01-27 07:11:25", - "latest_commit": "2024-01-30 18:09:37", + "score": -0.0944431021435459, + "first_commit": "2023-11-06 11:33:47", + "latest_commit": "2023-11-06 12:14:36", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 69.0, "model_architectures": null }, { - "description": "oasst2-33k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", - "url": "https://huggingface.co./datasets/llm-jp/oasst2-33k-ja", - "project_name": "oasst2-33k-ja", - "downloads": 151, + "description": "このデ���タセットについて このデータは、日本の官公庁のWebサイトに掲載されている「よくある質問」を手作業で抽出し、インストラクション用のデータセットとしたものです。 ", + "url": "https://huggingface.co./datasets/matsuxr/JaGovFaqs-22k", + "project_name": "JaGovFaqs-22k", + "downloads": 135, "source": "Hugging Face", - "score": -0.09119365504527191, - "first_commit": "2024-04-28 16:24:00", - "latest_commit": "2024-04-28 16:39:03", + "score": -0.0944528201760449, + "first_commit": "2023-12-31 13:58:41", + "latest_commit": "2024-02-29 02:51:20", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", - "url": "https://huggingface.co./DataPilot/Llama3.1-ArrowSE-v0.4", - "project_name": "Llama3.1-ArrowSE-v0.4", - "downloads": 149, + "description": "deberta-large-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora", + "project_name": "deberta-large-japanese-aozora", + "downloads": 134, "source": "Hugging Face", - "score": -0.09121486494634456, - "first_commit": "2024-07-24 07:37:16", - "latest_commit": "2024-07-24 12:00:46", + "score": -0.0944625382085439, + "first_commit": "2022-05-26 14:46:58", + "latest_commit": "2023-01-14 00:27:22", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for VecTeus-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 VecTeus has the following changes compared to Mistral-7B-v0.1.", - "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-v1", - "project_name": "Vecteus-v1", - "downloads": 149, + "description": "rinna-llama-3-youko-70b-instruct-gguf rinnaさんが公開しているllama-3-youko-70b-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-70b-instruct-gguf", + "project_name": "rinna-llama-3-youko-70b-instruct-gguf", + "downloads": 134, "source": "Hugging Face", - "score": -0.09121486494634456, - "first_commit": "2024-05-01 02:08:01", - "latest_commit": "2024-05-04 04:07:22", + "score": -0.0944625382085439, + "first_commit": "2024-07-27 09:04:09", + "latest_commit": "2024-07-31 14:35:52", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": 70.6, + "model_architectures": null }, { - "description": "Japanese Anime Speech Dataset 日本語はこちら japanese-anime-speech is an audio-text dataset designed for the training of automatic speech recognition models.", - "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech", - "project_name": "japanese-anime-speech", - "downloads": 148, + "description": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf ryota39さんが公開しているPhi-3-mini-4k-instruct-dpoのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ryota39-Phi-3-mini-4k-instruct-dpo-gguf", + "project_name": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf", + "downloads": 132, "source": "Hugging Face", - "score": -0.0912254698968809, - "first_commit": "2023-11-07 13:53:40", - "latest_commit": "2024-06-30 10:06:34", + "score": -0.09448197427354191, + "first_commit": "2024-04-29 14:27:31", + "latest_commit": "2024-04-29 16:53:45", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 3.82, "model_architectures": null }, { - "description": "whisper-large-v3-japanese-4k-steps This model is a fine-tuned version of openai/whisper-large-v3 on the Common Voice 16.1 dataset.", - "url": "https://huggingface.co./drewschaub/whisper-large-v3-japanese-4k-steps", - "project_name": "whisper-large-v3-japanese-4k-steps", - "downloads": 146, + "description": "aya-23-35B-gguf CohereForAIさんが公開しているaya-23-35Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aya-23-35B-gguf", + "project_name": "aya-23-35B-gguf", + "downloads": 132, "source": "Hugging Face", - "score": -0.09124667979795358, - "first_commit": "2024-02-17 01:01:51", - "latest_commit": "2024-02-18 01:31:35", + "score": -0.09448197427354191, + "first_commit": "2024-05-26 16:32:27", + "latest_commit": "2024-05-27 00:47:56", "languages": [], "model_or_dataset": "model", - "model_size": 1.54, - "model_architectures": "WhisperForConditionalGeneration" + "model_size": 35.0, + "model_architectures": null }, { - "description": "QuantFactory/shisa-gamma-7b-v1-GGUF", - "url": "https://huggingface.co./QuantFactory/shisa-gamma-7b-v1-GGUF", - "project_name": "shisa-gamma-7b-v1-GGUF", - "downloads": 145, + "description": "MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1.", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "downloads": 131, "source": "Hugging Face", - "score": -0.0912572847484899, - "first_commit": "2024-06-12 17:16:36", - "latest_commit": "2024-06-18 06:17:30", + "score": -0.0944916923060409, + "first_commit": "2024-01-26 06:13:55", + "latest_commit": "2024-01-26 06:36:22", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressiveの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-GGUF", - "project_name": "Ninja-v1-RP-expressive-GGUF", - "downloads": 144, + "description": "JaQuAD is developed to provide a SQuAD-like QA dataset in Japanese.", + "url": "https://huggingface.co./datasets/SkelterLabsInc/JaQuAD", + "project_name": "JaQuAD", + "downloads": 131, "source": "Hugging Face", - "score": -0.09126788969902624, - "first_commit": "2024-05-21 12:16:42", - "latest_commit": "2024-05-24 15:11:25", + "score": -0.0944916923060409, + "first_commit": "2022-01-26 01:34:38", + "latest_commit": "2022-10-25 09:06:40", "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "deberta-large-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora", - "project_name": "deberta-large-japanese-aozora", - "downloads": 142, + "description": "calm3-22b-RP-v2-GGUF 概要 Aratako/calm3-22b-RP-v2の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/calm3-22b-RP-v2-GGUF", + "project_name": "calm3-22b-RP-v2-GGUF", + "downloads": 130, "source": "Hugging Face", - "score": -0.09128909960009891, - "first_commit": "2022-05-26 14:46:58", - "latest_commit": "2023-01-14 00:27:22", + "score": -0.09450141033853991, + "first_commit": "2024-09-16 04:30:57", + "latest_commit": "2024-09-16 09:55:09", "languages": [], "model_or_dataset": "model", + "model_size": 22.5, + "model_architectures": null + }, + { + "description": "Japanese Anime Speech Dataset V2 日本語はこちら japanese-anime-speech-v2 is an audio-text dataset designed for training automatic speech recognition models.", + "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech-v2", + "project_name": "japanese-anime-speech-v2", + "downloads": 129, + "source": "Hugging Face", + "score": -0.09451112837103892, + "first_commit": "2024-06-26 14:18:01", + "latest_commit": "2024-07-24 19:06:51", + "languages": [], + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": null }, { - "description": "Tanuki-8x8B-dpo-v1.0-GPTQ-4bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGPTQ 4bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GPTQ-4bit", - "project_name": "Tanuki-8x8B-dpo-v1.0-GPTQ-4bit", - "downloads": 140, + "description": "Mistral-7B-Instruct-v0.3-gguf mistralaiさんが公開しているMistral-7B-Instruct-v0.3のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Mistral-7B-Instruct-v0.3-gguf", + "project_name": "Mistral-7B-Instruct-v0.3-gguf", + "downloads": 128, "source": "Hugging Face", - "score": -0.09131030950117158, - "first_commit": "2024-08-27 18:19:13", - "latest_commit": "2024-09-03 09:27:14", + "score": -0.09452084640353792, + "first_commit": "2024-05-23 14:44:25", + "latest_commit": "2024-05-23 15:58:46", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "TanukiForCausalLM" + "model_size": 7.25, + "model_architectures": null }, { - "description": "SentenceTransformer based on tohoku-nlp/bert-large-japanese-v2 This is a sentence-transformers model finetuned from tohoku-nlp/bert-large-japanese-v2.", - "url": "https://huggingface.co./cl-nagoya/ruri-pt-large", - "project_name": "ruri-pt-large", - "downloads": 139, + "description": "MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1.", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "downloads": 128, "source": "Hugging Face", - "score": -0.09132091445170791, - "first_commit": "2024-08-19 00:58:49", - "latest_commit": "2024-08-30 00:59:26", + "score": -0.09452084640353792, + "first_commit": "2024-01-28 16:13:26", + "latest_commit": "2024-01-28 16:24:30", "languages": [], "model_or_dataset": "model", - "model_size": 0.337, - "model_architectures": "BertModel" + "model_size": 7.24, + "model_architectures": null }, { - "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", - "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B", - "project_name": "Japanese-Starling-ChatV-7B", - "downloads": 139, + "description": "ChatNTQ JA 7B V1.0 Model Description", + "url": "https://huggingface.co./NTQAI/chatntq-ja-7b-v1.0", + "project_name": "chatntq-ja-7b-v1.0", + "downloads": 126, "source": "Hugging Face", - "score": -0.09132091445170791, - "first_commit": "2024-04-14 12:18:31", - "latest_commit": "2024-04-14 15:26:06", + "score": -0.09454028246853592, + "first_commit": "2023-12-26 06:22:59", + "latest_commit": "2023-12-26 09:22:34", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": "MistralForCausalLM" }, { - "description": "DataPilot様の Llama3-ArrowSE-8B-v0.3 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama3-ArrowSE-8B-v0.3-GGUF", - "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", - "downloads": 139, + "description": "lightblue-suzume-llama-3-8B-japanese-gguf lightblueさんが公開しているsuzume-llama-3-8B-japaneseのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-japanese-gguf", + "project_name": "lightblue-suzume-llama-3-8B-japanese-gguf", + "downloads": 126, "source": "Hugging Face", - "score": -0.09132091445170791, - "first_commit": "2024-07-07 07:53:32", - "latest_commit": "2024-07-07 13:40:26", + "score": -0.09454028246853592, + "first_commit": "2024-04-23 13:30:08", + "latest_commit": "2024-05-07 12:58:06", "languages": [], "model_or_dataset": "model", "model_size": 8.03, "model_architectures": null }, { - "description": "Local-Novel-LLM-project様の Ninja-V2-7B をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Ninja-V2-7B-GGUF", - "project_name": "Ninja-V2-7B-GGUF", - "downloads": 139, + "description": "range3/wiki40b-ja This dataset consists of three parquet files from the wiki40b dataset with only Japanese data extracted.", + "url": "https://huggingface.co./datasets/range3/wiki40b-ja", + "project_name": "wiki40b-ja", + "downloads": 124, "source": "Hugging Face", - "score": -0.09132091445170791, - "first_commit": "2024-06-15 16:23:41", - "latest_commit": "2024-06-15 21:25:59", + "score": -0.09455971853353393, + "first_commit": "2023-02-04 04:54:17", + "latest_commit": "2023-02-04 05:44:21", "languages": [], - "model_or_dataset": "model", - "model_size": 7.24, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "このデータセットについて このデータは、日本の官公庁のWebサイトに掲載されている「よくある質問」を手作業で抽出し、インストラクション用のデータセットとしたものです。 ", - "url": "https://huggingface.co./datasets/matsuxr/JaGovFaqs-22k", - "project_name": "JaGovFaqs-22k", - "downloads": 137, + "description": "NVIDIA が公開している SteerLM 向けのトライアルデータセット HelpSteer2を日本語に自動翻訳したデータセットになります。", + "url": "https://huggingface.co./datasets/kunishou/HelpSteer2-20k-ja", + "project_name": "HelpSteer2-20k-ja", + "downloads": 124, "source": "Hugging Face", - "score": -0.09134212435278058, - "first_commit": "2023-12-31 13:58:41", - "latest_commit": "2024-02-29 02:51:20", + "score": -0.09455971853353393, + "first_commit": "2024-06-21 08:09:33", + "latest_commit": "2024-06-21 08:44:21", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-small-long", - "project_name": "t5-small-long", - "downloads": 135, + "description": "ryota39様の Tora-7B-v0.1 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Tora-7B-v0.1-GGUF", + "project_name": "Tora-7B-v0.1-GGUF", + "downloads": 121, "source": "Hugging Face", - "score": -0.09136333425385325, - "first_commit": "2023-04-26 08:26:49", - "latest_commit": "2023-05-10 10:01:29", + "score": -0.09458887263103094, + "first_commit": "2024-05-07 11:24:35", + "latest_commit": "2024-06-15 03:16:21", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 7.24, + "model_architectures": null }, { - "description": "DataPilot様の ArrowPro-7B-RobinHood をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/ArrowPro-7B-RobinHood-GGUF", - "project_name": "ArrowPro-7B-RobinHood-GGUF", - "downloads": 135, + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressive-breadcrumbsの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-breadcrumbs-GGUF", + "project_name": "Ninja-v1-RP-expressive-breadcrumbs-GGUF", + "downloads": 121, "source": "Hugging Face", - "score": -0.09136333425385325, - "first_commit": "2024-05-10 12:03:26", - "latest_commit": "2024-05-10 18:14:28", + "score": -0.09458887263103094, + "first_commit": "2024-05-26 13:46:39", + "latest_commit": "2024-06-01 11:55:08", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "GitHub リポジトリ cl-tohoku/quiz-datasets で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/aio-retriever", - "project_name": "aio-retriever", - "downloads": 133, + "description": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-A-v1-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-A-v1-7B-gguf", + "project_name": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf", + "downloads": 121, "source": "Hugging Face", - "score": -0.09138454415492592, - "first_commit": "2023-07-04 04:53:47", - "latest_commit": "2023-10-25 15:31:08", + "score": -0.09458887263103094, + "first_commit": "2024-03-21 13:25:41", + "latest_commit": "2024-03-21 14:48:28", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 7.24, "model_architectures": null }, { - "description": "Local-Novel-LLM-project様の Vecteus-V2-7B をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Vecteus-V2-7B-GGUF", - "project_name": "Vecteus-V2-7B-GGUF", - "downloads": 130, + "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-base - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-base StableLM", + "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-base-gguf", + "project_name": "stabilityai-japanese-stablelm-3b-4e1t-base-gguf", + "downloads": 120, "source": "Hugging Face", - "score": -0.09141635900653491, - "first_commit": "2024-06-16 05:26:00", - "latest_commit": "2024-06-16 11:32:15", + "score": -0.09459859066352994, + "first_commit": "2023-11-16 10:23:21", + "latest_commit": "2023-11-16 11:18:48", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 2.8, "model_architectures": null }, { - "description": "ryota39様の Tora-7B-v0.2 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Tora-7B-v0.2-GGUF", - "project_name": "Tora-7B-v0.2-GGUF", - "downloads": 130, + "description": "Ninja-v1-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-128kのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-128k-gguf", + "project_name": "Ninja-v1-128k-gguf", + "downloads": 120, "source": "Hugging Face", - "score": -0.09141635900653491, - "first_commit": "2024-05-06 12:50:49", - "latest_commit": "2024-06-15 03:17:32", + "score": -0.09459859066352994, + "first_commit": "2024-05-01 17:48:06", + "latest_commit": "2024-05-04 13:25:20", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "Kotoba-Speech-v0.1 Kotoba-Speech v0.1 is a 1.2B Transformer-based speech generative model.", - "url": "https://huggingface.co./kotoba-tech/kotoba-speech-v0.1", - "project_name": "kotoba-speech-v0.1", - "downloads": 130, + "description": "whisper-large-v3-japanese-4k-steps This model is a fine-tuned version of openai/whisper-large-v3 on the Common Voice 16.1 dataset.", + "url": "https://huggingface.co./drewschaub/whisper-large-v3-japanese-4k-steps", + "project_name": "whisper-large-v3-japanese-4k-steps", + "downloads": 119, "source": "Hugging Face", - "score": -0.09141635900653491, - "first_commit": "2024-03-14 01:21:58", - "latest_commit": "2024-04-17 07:54:48", + "score": -0.09460830869602894, + "first_commit": "2024-02-17 01:01:51", + "latest_commit": "2024-02-18 01:31:35", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 1.54, + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "WRIME-fine-tuned BERT base Japanese This model is a Japanese BERTBASE fine-tuned on the WRIME dataset.", - "url": "https://huggingface.co./patrickramos/bert-base-japanese-v2-wrime-fine-tune", - "project_name": "bert-base-japanese-v2-wrime-fine-tune", - "downloads": 128, + "description": "このモデルはluke-japanese-baseをファインチューニングして、MARC-ja(positive or negativeの二値分類)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-marcja", + "project_name": "luke-japanese-base-marcja", + "downloads": 118, "source": "Hugging Face", - "score": -0.09143756890760758, - "first_commit": "2022-05-22 09:42:14", - "latest_commit": "2023-03-22 08:11:34", + "score": -0.09461802672852794, + "first_commit": "2023-03-02 03:57:33", + "latest_commit": "2023-07-21 14:10:48", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": 0.279, + "model_architectures": "LukeForSequenceClassification" + }, + { + "description": "Japanese to emotions I fine-tuned LINE DistillBERT as the base model using WRIME Ver2 as the teacher data.", + "url": "https://huggingface.co./koshin2001/Japanese-to-emotions", + "project_name": "Japanese-to-emotions", + "downloads": 118, + "source": "Hugging Face", + "score": -0.09461802672852794, + "first_commit": "2024-09-09 13:28:59", + "latest_commit": "2024-09-11 01:49:55", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.06870000000000001, + "model_architectures": "DistilBertForSequenceClassification" }, { "description": "QuantFactory/shisa-7b-v1-GGUF This is quantized version of augmxnt/shisa-base-7b-v1 created using llama.cpp Model Description shisa-base-7b-v1 takes Mistral 7B and adds an additional 8B tokens of primarily Japanese pre-training.", "url": "https://huggingface.co./QuantFactory/shisa-7b-v1-GGUF", "project_name": "shisa-7b-v1-GGUF", - "downloads": 128, + "downloads": 118, "source": "Hugging Face", - "score": -0.09143756890760758, + "score": -0.09461802672852794, "first_commit": "2024-06-14 01:44:05", "latest_commit": "2024-06-18 05:53:41", "languages": [], @@ -8339,82 +8463,68 @@ "model_architectures": null }, { - "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-base - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-base StableLM", - "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-base-gguf", - "project_name": "stabilityai-japanese-stablelm-3b-4e1t-base-gguf", - "downloads": 126, - "source": "Hugging Face", - "score": -0.09145877880868025, - "first_commit": "2023-11-16 10:23:21", - "latest_commit": "2023-11-16 11:18:48", - "languages": [], - "model_or_dataset": "model", - "model_size": 2.8, - "model_architectures": null - }, - { - "description": "MobileBERT 日本語事前学習済みモデル爆誕!! ", - "url": "https://huggingface.co./ysakuramoto/mobilebert-ja", - "project_name": "mobilebert-ja", - "downloads": 126, + "description": "Aerner LM-v2 事前学習から全部日本語で学習させたモデルのバージョン2です。 ", + "url": "https://huggingface.co./aerner/lm-v2", + "project_name": "lm-v2", + "downloads": 118, "source": "Hugging Face", - "score": -0.09145877880868025, - "first_commit": "2022-01-23 11:29:39", - "latest_commit": "2022-01-24 05:25:31", + "score": -0.09461802672852794, + "first_commit": "2023-06-09 15:19:12", + "latest_commit": "2023-06-09 16:08:47", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "OpenLlamaForCausalLM" }, { - "description": "Model Card for Japanese character-level GPT-2 Medium Model description This is a Japanese character-level GPT-2 Medium (310M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/gpt2-medium-japanese-char", - "project_name": "gpt2-medium-japanese-char", - "downloads": 124, + "description": "c4ai-command-r-v01-japanese-instruct-GGUF 概要 Aratako/c4ai-command-r-v01-japanese-instructの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct-GGUF", + "project_name": "c4ai-command-r-v01-japanese-instruct-GGUF", + "downloads": 117, "source": "Hugging Face", - "score": -0.09147998870975292, - "first_commit": "2023-05-18 06:29:28", - "latest_commit": "2023-06-08 05:34:26", + "score": -0.09462774476102695, + "first_commit": "2024-04-05 17:10:51", + "latest_commit": "2024-04-07 03:19:34", "languages": [], "model_or_dataset": "model", - "model_size": 0.335, - "model_architectures": "GPT2LMHeadModel" - }, - { - "description": "mbpp-ja", - "url": "https://huggingface.co./datasets/llm-jp/mbpp-ja", - "project_name": "mbpp-ja", - "downloads": 124, + "model_size": 35.0, + "model_architectures": null + }, + { + "description": "概要 NHKで定期的に放送されていた『着信御礼!", + "url": "https://huggingface.co./datasets/YANS-official/ogiri-keitai", + "project_name": "ogiri-keitai", + "downloads": 117, "source": "Hugging Face", - "score": -0.09147998870975292, - "first_commit": "2024-04-19 00:26:56", - "latest_commit": "2024-04-20 06:26:51", + "score": -0.09462774476102695, + "first_commit": "2024-07-20 10:11:36", + "latest_commit": "2024-08-30 10:13:20", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "gemma-2-2b-it-gguf googleさんが公開しているgemma-2-2b-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/gemma-2-2b-it-gguf", - "project_name": "gemma-2-2b-it-gguf", - "downloads": 123, + "description": "QuantFactory/llama-3-youko-8b-GGUF", + "url": "https://huggingface.co./QuantFactory/llama-3-youko-8b-GGUF", + "project_name": "llama-3-youko-8b-GGUF", + "downloads": 115, "source": "Hugging Face", - "score": -0.09149059366028925, - "first_commit": "2024-08-01 17:22:58", - "latest_commit": "2024-08-01 18:29:08", + "score": -0.09464718082602495, + "first_commit": "2024-06-24 05:04:12", + "latest_commit": "2024-06-24 06:35:40", "languages": [], "model_or_dataset": "model", - "model_size": 2.61, + "model_size": 8.03, "model_architectures": null }, { "description": "This dataset is a clarified version of the image, context, and question set included in the Japanese-Heron-Bench for the construction of the Japanese evaluation benchmark suite.", "url": "https://huggingface.co./datasets/Silviase/Japanese-Heron-Bench", "project_name": "Japanese-Heron-Bench", - "downloads": 123, + "downloads": 113, "source": "Hugging Face", - "score": -0.09149059366028925, + "score": -0.09466661689102296, "first_commit": "2024-07-16 08:12:30", "latest_commit": "2024-07-28 12:33:15", "languages": [], @@ -8423,26 +8533,26 @@ "model_architectures": null }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-sqlcoder-2x8B-GGUF", - "project_name": "Llama-3-ELYZA-sqlcoder-2x8B-GGUF", - "downloads": 122, + "description": "roberta-long-japanese (jumanpp + sentencepiece, mC4 Japanese)", + "url": "https://huggingface.co./megagonlabs/roberta-long-japanese", + "project_name": "roberta-long-japanese", + "downloads": 110, "source": "Hugging Face", - "score": -0.09150119861082559, - "first_commit": "2024-06-28 01:51:50", - "latest_commit": "2024-06-28 05:56:23", + "score": -0.09469577098851996, + "first_commit": "2022-09-04 14:31:06", + "latest_commit": "2022-10-04 23:36:27", "languages": [], "model_or_dataset": "model", - "model_size": 13.7, - "model_architectures": null + "model_size": null, + "model_architectures": "RobertaForMaskedLM" }, { "description": "ELYZA-japanese-CodeLlama-7b-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-gguf", "project_name": "ELYZA-japanese-CodeLlama-7b-gguf", - "downloads": 120, + "downloads": 109, "source": "Hugging Face", - "score": -0.09152240851189826, + "score": -0.09470548902101897, "first_commit": "2023-11-15 09:53:42", "latest_commit": "2023-11-16 14:28:03", "languages": [], @@ -8451,600 +8561,502 @@ "model_architectures": null }, { - "description": "Update: 2023/12/25oasst2-135k-jaをチャット形式に変換したoasst2-chat-68k-jaを公開しました。 ", - "url": "https://huggingface.co./datasets/kunishou/oasst2-135k-ja", - "project_name": "oasst2-135k-ja", - "downloads": 116, + "description": "NVIDIA が公開している SteerLM 向けのトライアルデータセット HelpSteerを日本語に自動翻訳したデータセットになります。", + "url": "https://huggingface.co./datasets/kunishou/HelpSteer-35k-ja", + "project_name": "HelpSteer-35k-ja", + "downloads": 109, "source": "Hugging Face", - "score": -0.09156482831404358, - "first_commit": "2023-12-24 22:04:54", - "latest_commit": "2023-12-25 13:23:55", + "score": -0.09470548902101897, + "first_commit": "2024-03-02 16:45:19", + "latest_commit": "2024-03-03 10:10:54", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "abc-multiple-choice Dataset abc-multiple-choice は、競技クイズの大会「abc」で使用された4択問題を元に作成された、多肢選択式の質問応答データセットです。 ", - "url": "https://huggingface.co./datasets/tohoku-nlp/abc-multiple-choice", - "project_name": "abc-multiple-choice", - "downloads": 116, + "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/jawiki-sentences", + "project_name": "jawiki-sentences", + "downloads": 107, "source": "Hugging Face", - "score": -0.09156482831404358, - "first_commit": "2024-03-02 03:58:25", - "latest_commit": "2024-03-12 07:32:13", + "score": -0.09472492508601697, + "first_commit": "2023-06-03 03:02:08", + "latest_commit": "2023-10-25 15:22:05", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Llama-3-EZO-VLM-1 Based on SakanaAI/Llama-3-EvoVLM-JP-v2, it has been enhanced for Japanese usage through additional pre-training and instruction tuning.", - "url": "https://huggingface.co./AXCXEPT/Llama-3-EZO-VLM-1", - "project_name": "Llama-3-EZO-VLM-1", - "downloads": 114, - "source": "Hugging Face", - "score": -0.09158603821511625, - "first_commit": "2024-08-03 17:15:09", - "latest_commit": "2024-08-23 10:55:53", - "languages": [], - "model_or_dataset": "model", - "model_size": 8.48, - "model_architectures": "LlavaForConditionalGeneration" - }, - { - "description": "Tanuki-8B-dpo-v1.0-AWQ 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のAWQ 4bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-AWQ", - "project_name": "Tanuki-8B-dpo-v1.0-AWQ", - "downloads": 113, - "source": "Hugging Face", - "score": -0.0915966431656526, - "first_commit": "2024-08-27 04:50:35", - "latest_commit": "2024-09-03 09:29:23", - "languages": [], - "model_or_dataset": "model", - "model_size": 1.47, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b", - "project_name": "ELYZA-japanese-CodeLlama-7b", - "downloads": 113, - "source": "Hugging Face", - "score": -0.0915966431656526, - "first_commit": "2023-11-07 12:48:15", - "latest_commit": "2023-11-15 00:38:12", - "languages": [], - "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "モデル概要 AWSのtrn1インスタンスを用いて開発した大喜利言語モデルです。", - "url": "https://huggingface.co./watashiha/watashiha-gpt-6b", - "project_name": "watashiha-gpt-6b", - "downloads": 112, - "source": "Hugging Face", - "score": -0.09160724811618892, - "first_commit": "2023-12-28 05:41:38", - "latest_commit": "2024-03-04 05:21:14", - "languages": [], - "model_or_dataset": "model", - "model_size": 5.83, - "model_architectures": "GPT2LMHeadModel" - }, - { - "description": "doc2query/msmarco-japanese-mt5-base-v1 This is a doc2query model based on mT5 (also known as docT5query).", - "url": "https://huggingface.co./doc2query/msmarco-japanese-mt5-base-v1", - "project_name": "msmarco-japanese-mt5-base-v1", - "downloads": 111, - "source": "Hugging Face", - "score": -0.09161785306672526, - "first_commit": "2022-04-29 12:05:21", - "latest_commit": "2022-04-29 14:05:37", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MT5ForConditionalGeneration" - }, - { - "description": "MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1.", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "downloads": 110, + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-sqlcoder-2x8B-GGUF", + "project_name": "Llama-3-ELYZA-sqlcoder-2x8B-GGUF", + "downloads": 106, "source": "Hugging Face", - "score": -0.09162845801726159, - "first_commit": "2024-01-28 16:13:26", - "latest_commit": "2024-01-28 16:24:30", + "score": -0.09473464311851597, + "first_commit": "2024-06-28 01:51:50", + "latest_commit": "2024-06-28 05:56:23", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 13.7, "model_architectures": null }, { - "description": "MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1.", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "downloads": 110, + "description": "HODACHI-EZO-Common-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-9B-gemma-2-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-9B-gemma-2-it-gguf", + "project_name": "HODACHI-EZO-Common-9B-gemma-2-it-gguf", + "downloads": 105, "source": "Hugging Face", - "score": -0.09162845801726159, - "first_commit": "2024-01-26 06:13:55", - "latest_commit": "2024-01-26 06:36:22", + "score": -0.09474436115101498, + "first_commit": "2024-07-15 15:42:39", + "latest_commit": "2024-07-15 16:20:33", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 9.24, "model_architectures": null }, { - "description": "Aerner LM-v2 事前学習から全部日本語で学習させたモデルのバージョン2です。 ", - "url": "https://huggingface.co./aerner/lm-v2", - "project_name": "lm-v2", - "downloads": 109, + "description": "reazonspeech-espnet-next ReazonSpeech is a project to maintain freely-available Japanese audio datasets and ML models.", + "url": "https://huggingface.co./reazon-research/reazonspeech-espnet-next", + "project_name": "reazonspeech-espnet-next", + "downloads": 103, "source": "Hugging Face", - "score": -0.09163906296779792, - "first_commit": "2023-06-09 15:19:12", - "latest_commit": "2023-06-09 16:08:47", + "score": -0.09476379721601298, + "first_commit": "2023-03-29 07:20:03", + "latest_commit": "2023-03-29 17:28:01", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "OpenLlamaForCausalLM" - }, - { - "description": "LLM-jp Toxicity Dataset 日本語有害文書データセット「LLM-jp Toxicity Dataset」 See https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-toxicity-dataset", - "url": "https://huggingface.co./datasets/p1atdev/LLM-jp-Toxicity-Dataset", - "project_name": "LLM-jp-Toxicity-Dataset", - "downloads": 109, - "source": "Hugging Face", - "score": -0.09163906296779792, - "first_commit": "2024-08-07 07:11:08", - "latest_commit": "2024-08-07 07:21:07", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, - { - "description": "Anime with caption CC-0 dataset このデータセットはイラストに対する日本語キャプションを 倫理的に学習しやすくするためのデータセットです。 ", - "url": "https://huggingface.co./datasets/alfredplpl/anime-with-caption-cc0", - "project_name": "anime-with-caption-cc0", - "downloads": 108, - "source": "Hugging Face", - "score": -0.09164966791833426, - "first_commit": "2024-06-03 04:37:13", - "latest_commit": "2024-06-03 05:49:20", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, "model_architectures": null }, { - "description": "Dataset.", - "url": "https://huggingface.co./datasets/hpprc/jsick", - "project_name": "jsick", - "downloads": 108, + "description": "aya-23-8B-gguf CohereForAIさんが公開しているaya-23-8Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aya-23-8B-gguf", + "project_name": "aya-23-8B-gguf", + "downloads": 103, "source": "Hugging Face", - "score": -0.09164966791833426, - "first_commit": "2023-04-08 16:02:06", - "latest_commit": "2023-04-11 15:18:09", + "score": -0.09476379721601298, + "first_commit": "2024-05-26 16:32:53", + "latest_commit": "2024-05-27 00:54:36", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 8.03, "model_architectures": null }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-large", - "project_name": "ruri-reranker-large", - "downloads": 106, + "description": "Finetuned Waseda RoBERTa to evaluate the generated answers on JTruthfulQA.", + "url": "https://huggingface.co./nlp-waseda/roberta_jtruthfulqa", + "project_name": "roberta_jtruthfulqa", + "downloads": 102, "source": "Hugging Face", - "score": -0.09167087781940693, - "first_commit": "2024-08-20 02:37:26", - "latest_commit": "2024-09-04 08:50:12", + "score": -0.09477351524851199, + "first_commit": "2023-12-06 01:33:02", + "latest_commit": "2023-12-06 04:31:12", "languages": [], "model_or_dataset": "model", "model_size": 0.337, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "RobertaForSequenceClassification" }, { - "description": "Japanese-TextGen-Kage-v0.1-2x7B Kage is \"影\" in Japanese or \"Shadow\" in English.", - "url": "https://huggingface.co./dddump/Japanese-TextGen-Kage-v0.1-2x7B-gguf", - "project_name": "Japanese-TextGen-Kage-v0.1-2x7B-gguf", - "downloads": 106, + "description": "What’s this?", + "url": "https://huggingface.co./globis-university/deberta-v3-japanese-xsmall", + "project_name": "deberta-v3-japanese-xsmall", + "downloads": 102, "source": "Hugging Face", - "score": -0.09167087781940693, - "first_commit": "2024-05-04 07:03:38", - "latest_commit": "2024-05-19 08:54:19", + "score": -0.09477351524851199, + "first_commit": "2023-09-21 16:12:53", + "latest_commit": "2024-07-05 05:48:15", "languages": [], "model_or_dataset": "model", - "model_size": 12.9, - "model_architectures": null - }, - { - "description": "Japanese Anime Speech Dataset V2 日本語はこちら japanese-anime-speech-v2 is an audio-text dataset designed for training automatic speech recognition models.", - "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech-v2", - "project_name": "japanese-anime-speech-v2", - "downloads": 106, - "source": "Hugging Face", - "score": -0.09167087781940693, - "first_commit": "2024-06-26 14:18:01", - "latest_commit": "2024-07-24 19:06:51", - "languages": [], - "model_or_dataset": "dataset", "model_size": null, - "model_architectures": null + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Japanese stopwords for nagisa", - "url": "https://huggingface.co./datasets/taishi-i/nagisa_stopwords", - "project_name": "nagisa_stopwords", - "downloads": 106, + "description": "[Under Construction]", + "url": "https://huggingface.co./datasets/bclavie/mmarco-japanese-hard-negatives", + "project_name": "mmarco-japanese-hard-negatives", + "downloads": 102, "source": "Hugging Face", - "score": -0.09167087781940693, - "first_commit": "2023-08-06 17:10:10", - "latest_commit": "2023-08-07 02:58:31", + "score": -0.09477351524851199, + "first_commit": "2023-12-24 13:04:27", + "latest_commit": "2023-12-24 18:52:04", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Japanese InstructBLIP Alpha Model Details Japanese InstructBLIP Alpha is a vision-language instruction-following model that enables to generate Japanese descriptions for input images and optionally input texts such as questions.", - "url": "https://huggingface.co./stabilityai/japanese-instructblip-alpha", - "project_name": "japanese-instructblip-alpha", - "downloads": 104, + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressive-v2の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-v2-GGUF", + "project_name": "Ninja-v1-RP-expressive-v2-GGUF", + "downloads": 101, "source": "Hugging Face", - "score": -0.0916920877204796, - "first_commit": "2023-08-16 23:49:58", - "latest_commit": "2023-11-17 03:57:41", + "score": -0.09478323328101099, + "first_commit": "2024-05-26 06:09:57", + "latest_commit": "2024-05-26 15:22:01", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "JapaneseInstructBlipAlphaForConditionalGeneration" + "model_size": 7.24, + "model_architectures": null }, { - "description": "SentenceTransformer based on line-corporation/line-distilbert-base-japanese This is a sentence-transformers model finetuned from line-corporation/line-distilbert-base-japanese.", - "url": "https://huggingface.co./cl-nagoya/ruri-pt-small", - "project_name": "ruri-pt-small", - "downloads": 104, + "description": "GitHub リポジトリ cl-tohoku/quiz-datasets で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/aio-retriever", + "project_name": "aio-retriever", + "downloads": 100, "source": "Hugging Face", - "score": -0.0916920877204796, - "first_commit": "2024-08-17 10:39:05", - "latest_commit": "2024-08-30 03:11:20", + "score": -0.09479295131351, + "first_commit": "2023-07-04 04:53:47", + "latest_commit": "2023-10-25 15:31:08", "languages": [], - "model_or_dataset": "model", - "model_size": 0.0681, - "model_architectures": "DistilBertModel" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-medium", - "project_name": "t5-base-medium", - "downloads": 103, + "description": "This model is traned with guanaco dataset.", + "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v0", + "project_name": "Yoko-7B-Japanese-v0", + "downloads": 99, "source": "Hugging Face", - "score": -0.09170269267101593, - "first_commit": "2023-04-26 08:27:09", - "latest_commit": "2023-05-10 10:00:12", + "score": -0.09480266934600899, + "first_commit": "2023-08-09 16:28:38", + "latest_commit": "2023-08-10 13:00:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": "LlamaForCausalLM" }, { - "description": "RoBERTa base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", - "url": "https://huggingface.co./ybelkada/japanese-roberta-question-answering", - "project_name": "japanese-roberta-question-answering", - "downloads": 101, + "description": "roberta_qa_japanese (Japanese caption : 日本語の (抽出型) 質問応答のモデル)", + "url": "https://huggingface.co./tsmatz/roberta_qa_japanese", + "project_name": "roberta_qa_japanese", + "downloads": 99, "source": "Hugging Face", - "score": -0.0917239025720886, - "first_commit": "2022-04-08 08:52:22", - "latest_commit": "2022-04-08 11:38:39", + "score": -0.09480266934600899, + "first_commit": "2022-12-11 03:41:07", + "latest_commit": "2024-07-12 00:00:07", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 0.11, "model_architectures": "RobertaForQuestionAnswering" }, { - "description": "albert-base-japanese-v1-with-japanese 日本語事前学習済みALBERTモデルですこのモデルではTokenizerにBertJapaneseTokenizerクラスを利用していますalbert-base-japanese-v1よりトークナイズ処理が楽になっています How to use ファインチューニング このモデルはPreTrainedモデルです基本的には各種タスク用にファインチューニングして使用されることを想定しています Fill-Mask for PyTorch from transformers import ( AutoModelForMaskedLM, AutoTokenizer ) tokenizer = AutoTokenizer.from_pretrained(\"ken11/albert-base-japanese-v1-with-japanese-tokenizer\")", - "url": "https://huggingface.co./ken11/albert-base-japanese-v1-with-japanese-tokenizer", - "project_name": "albert-base-japanese-v1-with-japanese-tokenizer", - "downloads": 100, + "description": "One more step before getting this model.", + "url": "https://huggingface.co./rinna/japanese-stable-diffusion", + "project_name": "japanese-stable-diffusion", + "downloads": 98, "source": "Hugging Face", - "score": -0.09173450752262494, - "first_commit": "2022-04-20 16:34:22", - "latest_commit": "2022-04-21 02:28:13", + "score": -0.094812387378508, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "AlbertForMaskedLM" + "model_architectures": null }, { - "description": "Wikidata parallel descriptions en-ja Parallel corpus for machine translation generated from wikidata dump (2024-05-06).", - "url": "https://huggingface.co./datasets/Mitsua/wikidata-parallel-descriptions-en-ja", - "project_name": "wikidata-parallel-descriptions-en-ja", - "downloads": 100, + "description": "Tanuki-8B-dpo-v1.0-GPTQ-4bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGPTQ 4bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GPTQ-4bit", + "project_name": "Tanuki-8B-dpo-v1.0-GPTQ-4bit", + "downloads": 98, "source": "Hugging Face", - "score": -0.09173450752262494, - "first_commit": "2024-05-13 12:02:43", - "latest_commit": "2024-05-17 00:25:10", + "score": -0.094812387378508, + "first_commit": "2024-08-27 16:17:17", + "latest_commit": "2024-09-03 09:29:10", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "LlamaForCausalLM" }, { - "description": "Tanuki-ZeRo-gguf kanhatakeyamaさんが公開しているTanuki-ZeRoのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Tanuki-ZeRo-gguf", - "project_name": "Tanuki-ZeRo-gguf", - "downloads": 99, + "description": "概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", + "url": "https://huggingface.co./DataPilot/Llama3.1-ArrowSE-v0.4", + "project_name": "Llama3.1-ArrowSE-v0.4", + "downloads": 98, "source": "Hugging Face", - "score": -0.09174511247316126, - "first_commit": "2024-03-30 10:49:02", - "latest_commit": "2024-03-30 17:01:16", + "score": -0.094812387378508, + "first_commit": "2024-07-24 07:37:16", + "latest_commit": "2024-07-24 12:00:46", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": null + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Ninja-v1-RP-GGUF 概要 Aratako/Ninja-v1-RPの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-GGUF", - "project_name": "Ninja-v1-RP-GGUF", - "downloads": 99, + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-small", + "project_name": "ruri-reranker-small", + "downloads": 95, "source": "Hugging Face", - "score": -0.09174511247316126, - "first_commit": "2024-05-20 17:08:50", - "latest_commit": "2024-05-24 15:11:08", + "score": -0.094841541476005, + "first_commit": "2024-08-19 12:39:07", + "latest_commit": "2024-09-04 08:50:32", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": 0.06870000000000001, + "model_architectures": "DistilBertForSequenceClassification" }, { - "description": "This model is traned with guanaco dataset.", - "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v0", - "project_name": "Yoko-7B-Japanese-v0", - "downloads": 99, + "description": "Umievo-itr012-Gleipnir-7B このモデルは強力な4つの日本語モデルを進化的アルゴリズムで進化的マージしたものです。", + "url": "https://huggingface.co./umiyuki/Umievo-itr012-Gleipnir-7B", + "project_name": "Umievo-itr012-Gleipnir-7B", + "downloads": 95, "source": "Hugging Face", - "score": -0.09174511247316126, - "first_commit": "2023-08-09 16:28:38", - "latest_commit": "2023-08-10 13:00:34", + "score": -0.094841541476005, + "first_commit": "2024-05-29 12:32:29", + "latest_commit": "2024-05-29 13:51:31", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "日本語 gpt2 蒸留モデル このモデルはrinna/japanese-gpt2-meduimを教師として蒸留したものです。 ", - "url": "https://huggingface.co./knok/japanese-distilgpt2", - "project_name": "japanese-distilgpt2", - "downloads": 96, + "description": "shisa-7b-v1-gguf augmxntさんが公開しているshisa-7b-v1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/shisa-7b-v1-gguf", + "project_name": "shisa-7b-v1-gguf", + "downloads": 94, "source": "Hugging Face", - "score": -0.09177692732477026, - "first_commit": "2022-04-14 09:32:23", - "latest_commit": "2022-04-15 06:00:51", + "score": -0.09485125950850401, + "first_commit": "2023-12-09 14:02:20", + "latest_commit": "2023-12-10 12:24:25", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.96, "model_architectures": null }, { - "description": "sonoisa/t5-base-japaneseをファインチューニングして、タイトル生成に用いれるようにしたモデルです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/t5-CAMERA-title-generation", - "project_name": "t5-CAMERA-title-generation", - "downloads": 96, + "description": "このモデルはluke-japanese-base-liteをファインチューニングして、Question-Answeringに用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-QA", + "project_name": "luke-japanese-base-finetuned-QA", + "downloads": 93, "source": "Hugging Face", - "score": -0.09177692732477026, - "first_commit": "2023-03-21 10:49:27", - "latest_commit": "2023-07-21 14:11:13", + "score": -0.094860977541003, + "first_commit": "2023-01-15 23:38:30", + "latest_commit": "2023-07-21 14:11:02", "languages": [], "model_or_dataset": "model", - "model_size": 0.223, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 0.132, + "model_architectures": "LukeForQuestionAnswering" }, { - "description": "日本語VL-T5事前学習済みモデル", - "url": "https://huggingface.co./sonoisa/vl-t5-base-japanese", - "project_name": "vl-t5-base-japanese", - "downloads": 96, + "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for VecTeus-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 VecTeus has the following changes compared to Mistral-7B-v0.1.", + "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-v1", + "project_name": "Vecteus-v1", + "downloads": 93, "source": "Hugging Face", - "score": -0.09177692732477026, - "first_commit": "2021-10-03 11:54:43", - "latest_commit": "2021-10-04 11:13:35", + "score": -0.094860977541003, + "first_commit": "2024-05-01 02:08:01", + "latest_commit": "2024-05-04 04:07:22", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "VLT5ModelWrapper" + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "更新履歴 2023年5月7日 「oasst1-89k-ja」データセットを追加して対話システムに対応しました。", - "url": "https://huggingface.co./inu-ai/dolly-japanese-gpt-1b", - "project_name": "dolly-japanese-gpt-1b", - "downloads": 95, + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressiveの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-GGUF", + "project_name": "Ninja-v1-RP-expressive-GGUF", + "downloads": 90, "source": "Hugging Face", - "score": -0.0917875322753066, - "first_commit": "2023-04-13 22:46:07", - "latest_commit": "2023-08-01 07:55:27", + "score": -0.09489013163850002, + "first_commit": "2024-05-21 12:16:42", + "latest_commit": "2024-05-24 15:11:25", "languages": [], "model_or_dataset": "model", - "model_size": 1.33, - "model_architectures": "GPT2LMHeadModel" + "model_size": 7.24, + "model_architectures": null }, { - "description": "roberta-long-japanese (jumanpp + sentencepiece, mC4 Japanese)", - "url": "https://huggingface.co./megagonlabs/roberta-long-japanese", - "project_name": "roberta-long-japanese", - "downloads": 95, + "description": "Ruri: Japanese General Text Embeddings Usage First install the Sentence Transformers library: pip install -U sentence-transformers Then you can load this model and run inference.", + "url": "https://huggingface.co./cl-nagoya/ruri-pt-large", + "project_name": "ruri-pt-large", + "downloads": 90, "source": "Hugging Face", - "score": -0.0917875322753066, - "first_commit": "2022-09-04 14:31:06", - "latest_commit": "2022-10-04 23:36:27", + "score": -0.09489013163850002, + "first_commit": "2024-08-19 00:58:49", + "latest_commit": "2024-08-30 00:59:26", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_size": 0.337, + "model_architectures": "BertModel" }, { - "description": "umiyuki様の Japanese-Chat-Umievo-itr004-7b をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Japanese-Chat-Umievo-itr004-7b-GGUF", - "project_name": "Japanese-Chat-Umievo-itr004-7b-GGUF", - "downloads": 94, + "description": "QuantFactory/shisa-gamma-7b-v1-GGUF", + "url": "https://huggingface.co./QuantFactory/shisa-gamma-7b-v1-GGUF", + "project_name": "shisa-gamma-7b-v1-GGUF", + "downloads": 90, "source": "Hugging Face", - "score": -0.09179813722584293, - "first_commit": "2024-05-13 16:28:41", - "latest_commit": "2024-05-13 23:33:49", + "score": -0.09489013163850002, + "first_commit": "2024-06-12 17:16:36", + "latest_commit": "2024-06-18 06:17:30", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "deberta-base-japanese-aozora-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-head", - "project_name": "deberta-base-japanese-aozora-ud-head", - "downloads": 93, + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/swallow-3-8B-sqlcoder-2x8B-GGUF", + "project_name": "swallow-3-8B-sqlcoder-2x8B-GGUF", + "downloads": 88, "source": "Hugging Face", - "score": -0.09180874217637927, - "first_commit": "2022-06-15 04:02:27", - "latest_commit": "2023-03-04 20:10:16", + "score": -0.09490956770349802, + "first_commit": "2024-07-03 11:02:45", + "latest_commit": "2024-07-04 07:20:41", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_size": 13.7, + "model_architectures": null }, { - "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", - "url": "https://huggingface.co./cinmodel/electra-small-japanese-generator", - "project_name": "electra-small-japanese-generator", - "downloads": 92, + "description": "Genji-JP 6B Please check our blog post for more details, samples, evaluations and more: Blogpost Model Description Genji-JP 6B is a model finetuned on our Japanese storytelling dataset based on EleutherAI's GPT-J 6B model.", + "url": "https://huggingface.co./NovelAI/genji-jp", + "project_name": "genji-jp", + "downloads": 88, "source": "Hugging Face", - "score": -0.0918193471269156, - "first_commit": "2020-11-13 06:49:52", - "latest_commit": "2020-12-11 22:26:17", + "score": -0.09490956770349802, + "first_commit": "2021-11-03 15:07:47", + "latest_commit": "2022-08-09 17:36:02", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForMaskedLM" + "model_architectures": "GPTJForCausalLM" }, { - "description": "[github].", - "url": "https://huggingface.co./datasets/fujiki/japanese_alpaca_data", - "project_name": "japanese_alpaca_data", - "downloads": 92, + "description": "Dataset Summary This is the Business Scene Dialogue (BSD) dataset, a Japanese-English parallel corpus containing written conversations in various business scenarios.", + "url": "https://huggingface.co./datasets/ryo0634/bsd_ja_en", + "project_name": "bsd_ja_en", + "downloads": 88, "source": "Hugging Face", - "score": -0.0918193471269156, - "first_commit": "2023-05-18 07:13:15", - "latest_commit": "2023-05-19 12:54:13", + "score": -0.09490956770349802, + "first_commit": "2022-01-25 16:35:02", + "latest_commit": "2024-01-11 07:36:44", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Int4", - "project_name": "Orion-14B-Chat-Int4", - "downloads": 91, + "description": "Kotoba-Speech-v0.1 Kotoba-Speech v0.1 is a 1.2B Transformer-based speech generative model.", + "url": "https://huggingface.co./kotoba-tech/kotoba-speech-v0.1", + "project_name": "kotoba-speech-v0.1", + "downloads": 87, "source": "Hugging Face", - "score": -0.09182995207745194, - "first_commit": "2024-01-18 09:54:07", - "latest_commit": "2024-03-26 10:04:46", + "score": -0.09491928573599702, + "first_commit": "2024-03-14 01:21:58", + "latest_commit": "2024-04-17 07:54:48", "languages": [], "model_or_dataset": "model", - "model_size": 2.69, - "model_architectures": "OrionForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "nlp-waseda/roberta-large-japanese Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese", - "project_name": "roberta-large-japanese", - "downloads": 91, + "description": "deberta-base-japanese-aozora-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-head", + "project_name": "deberta-base-japanese-aozora-ud-head", + "downloads": 87, "source": "Hugging Face", - "score": -0.09182995207745194, - "first_commit": "2022-05-10 08:37:48", - "latest_commit": "2022-10-21 14:48:46", + "score": -0.09491928573599702, + "first_commit": "2022-06-15 04:02:27", + "latest_commit": "2023-03-04 20:10:16", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "Dataset Preprocessing Supported Tasks and Leaderboards Languages 注釈はすべて日本語を主要��語としています。 ", - "url": "https://huggingface.co./datasets/shunk031/jsnli", - "project_name": "jsnli", - "downloads": 89, + "description": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-7b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", + "downloads": 86, "source": "Hugging Face", - "score": -0.09185116197852461, - "first_commit": "2022-12-01 01:31:32", - "latest_commit": "2022-12-12 16:36:58", + "score": -0.09492900376849603, + "first_commit": "2024-05-03 04:09:27", + "latest_commit": "2024-05-03 04:53:43", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 6.83, "model_architectures": null }, { - "description": "Ruri: Japanese General Text Embeddings Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-base", - "project_name": "ruri-base", - "downloads": 87, + "description": "Local-Novel-LLM-project様の Ninja-V3 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Ninja-V3-GGUF", + "project_name": "Ninja-V3-GGUF", + "downloads": 84, "source": "Hugging Face", - "score": -0.09187237187959726, - "first_commit": "2024-08-28 13:09:10", - "latest_commit": "2024-09-04 08:49:23", + "score": -0.09494843983349403, + "first_commit": "2024-07-03 11:52:04", + "latest_commit": "2024-07-03 16:59:05", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertModel" + "model_size": 7.24, + "model_architectures": null }, { - "description": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf lightblueさんが公開しているKarasu-Mixtral-8x22B-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/lightblue-Karasu-Mixtral-8x22B-v0.1-gguf", - "project_name": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf", - "downloads": 86, + "description": "HODACHI様の EZO-Common-T2-2B-gemma-2-it をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/EZO-Common-T2-2B-gemma-2-it-GGUF", + "project_name": "EZO-Common-T2-2B-gemma-2-it-GGUF", + "downloads": 84, "source": "Hugging Face", - "score": -0.0918829768301336, - "first_commit": "2024-05-07 12:53:56", - "latest_commit": "2024-05-07 18:07:43", + "score": -0.09494843983349403, + "first_commit": "2024-08-01 11:38:48", + "latest_commit": "2024-08-01 13:42:20", "languages": [], "model_or_dataset": "model", - "model_size": 141.0, + "model_size": 2.61, "model_architectures": null }, { - "description": "shisa-7b-v1-gguf augmxntさんが公開しているshisa-7b-v1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/shisa-7b-v1-gguf", - "project_name": "shisa-7b-v1-gguf", - "downloads": 85, + "description": "Swallow-8Bは追加の日本語継続事前学習により日本語が大変流暢なLlama-3派生モデルです。", + "url": "https://huggingface.co./aixsatoshi/Meta-Llama-3.1-8B-Instruct-plus-Swallow", + "project_name": "Meta-Llama-3.1-8B-Instruct-plus-Swallow", + "downloads": 84, "source": "Hugging Face", - "score": -0.09189358178066993, - "first_commit": "2023-12-09 14:02:20", - "latest_commit": "2023-12-10 12:24:25", + "score": -0.09494843983349403, + "first_commit": "2024-07-24 03:10:38", + "latest_commit": "2024-07-24 04:03:21", "languages": [], "model_or_dataset": "model", - "model_size": 7.96, - "model_architectures": null + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "このモデルはluke-japanese-base-liteをファインチューニングして、Question-Answeringに用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-QA", - "project_name": "luke-japanese-base-finetuned-QA", + "description": "stockmark-100b-gguf stockmarkさんが公開しているstockmark-100bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/stockmark-100b-gguf", + "project_name": "stockmark-100b-gguf", "downloads": 83, "source": "Hugging Face", - "score": -0.0919147916817426, - "first_commit": "2023-01-15 23:38:30", - "latest_commit": "2023-07-21 14:11:02", + "score": -0.09495815786599303, + "first_commit": "2024-05-17 12:45:56", + "latest_commit": "2024-05-18 09:14:46", "languages": [], "model_or_dataset": "model", - "model_size": 0.132, - "model_architectures": "LukeForQuestionAnswering" + "model_size": 96.2, + "model_architectures": null + }, + { + "description": "Ruri: Japanese General Text Embeddings Usage First install the Sentence Transformers library: pip install -U sentence-transformers Then you can load this model and run inference.", + "url": "https://huggingface.co./cl-nagoya/ruri-pt-small", + "project_name": "ruri-pt-small", + "downloads": 82, + "source": "Hugging Face", + "score": -0.09496787589849204, + "first_commit": "2024-08-17 10:39:05", + "latest_commit": "2024-08-30 03:11:20", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.0681, + "model_architectures": "DistilBertModel" }, { "description": "Llama 3 Youko 70B (rinna/llama-3-youko-70b)", "url": "https://huggingface.co./rinna/llama-3-youko-70b", "project_name": "llama-3-youko-70b", - "downloads": 83, + "downloads": 82, "source": "Hugging Face", - "score": -0.0919147916817426, + "score": -0.09496787589849204, "first_commit": "2024-07-21 14:13:34", "latest_commit": "2024-07-25 05:16:28", "languages": [], @@ -9053,502 +9065,530 @@ "model_architectures": "LlamaForCausalLM" }, { - "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoで利用しているモデルです。", - "url": "https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b", - "project_name": "japanese-novel-gpt-j-6b", - "downloads": 82, + "description": "LLM-jp Toxicity Dataset 日本語有害文書データセット「LLM-jp Toxicity Dataset」 See https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-toxicity-dataset", + "url": "https://huggingface.co./datasets/p1atdev/LLM-jp-Toxicity-Dataset", + "project_name": "LLM-jp-Toxicity-Dataset", + "downloads": 81, "source": "Hugging Face", - "score": -0.09192539663227894, - "first_commit": "2023-08-11 00:52:32", - "latest_commit": "2023-08-26 04:20:51", + "score": -0.09497759393099105, + "first_commit": "2024-08-07 07:11:08", + "latest_commit": "2024-08-07 07:21:07", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "GPTJForCausalLM" + "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-large-medium", - "project_name": "t5-large-medium", - "downloads": 81, + "description": "line-corporation/japanese-large-lm-1.7b line-corporationさんが公開しているjapanese-large-lm-1.7bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-gguf", + "project_name": "line-corp-japanese-large-lm-1.7b-gguf", + "downloads": 80, "source": "Hugging Face", - "score": -0.09193600158281527, - "first_commit": "2023-04-26 08:31:45", - "latest_commit": "2023-05-10 10:00:45", + "score": -0.09498731196349004, + "first_commit": "2023-09-03 22:35:34", + "latest_commit": "2024-03-24 05:54:30", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": null }, { - "description": "line-corporation/japanese-large-lm-3.6b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-3.6b-instruction-sftのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", - "project_name": "line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", + "description": "Qwen2.5-ja-zh", + "url": "https://huggingface.co./hakutaku/qwen2.5-ja-zh", + "project_name": "qwen2.5-ja-zh", "downloads": 80, "source": "Hugging Face", - "score": -0.09194660653335161, - "first_commit": "2023-09-02 18:01:40", - "latest_commit": "2023-09-08 02:52:29", + "score": -0.09498731196349004, + "first_commit": "2024-09-19 14:15:49", + "latest_commit": "2024-09-20 07:45:25", "languages": [], "model_or_dataset": "model", - "model_size": 3.71, - "model_architectures": null + "model_size": 7.62, + "model_architectures": "Qwen2ForCausalLM" }, { - "description": "roberta_qa_japanese (Japanese caption : 日本語の (抽出型) 質問応答のモデル)", - "url": "https://huggingface.co./tsmatz/roberta_qa_japanese", - "project_name": "roberta_qa_japanese", + "description": "albert-base-japanese-v1-with-japanese 日本語事前学習済みALBERTモデルですこのモデルではTokenizerにBertJapaneseTokenizerクラスを利用していますalbert-base-japanese-v1よりトークナイズ処理が楽になっています How to use ファインチューニング このモデルはPreTrainedモデルです基本的には各種タスク用にファインチューニングして使用されることを想定しています Fill-Mask for PyTorch from transformers import ( AutoModelForMaskedLM, AutoTokenizer ) tokenizer = AutoTokenizer.from_pretrained(\"ken11/albert-base-japanese-v1-with-japanese-tokenizer\")", + "url": "https://huggingface.co./ken11/albert-base-japanese-v1-with-japanese-tokenizer", + "project_name": "albert-base-japanese-v1-with-japanese-tokenizer", "downloads": 80, "source": "Hugging Face", - "score": -0.09194660653335161, - "first_commit": "2022-12-11 03:41:07", - "latest_commit": "2024-07-12 00:00:07", + "score": -0.09498731196349004, + "first_commit": "2022-04-20 16:34:22", + "latest_commit": "2022-04-21 02:28:13", "languages": [], "model_or_dataset": "model", - "model_size": 0.11, - "model_architectures": "RobertaForQuestionAnswering" + "model_size": null, + "model_architectures": "AlbertForMaskedLM" }, { - "description": "reazonspeech-espnet-next ReazonSpeech is a project to maintain freely-available Japanese audio datasets and ML models.", - "url": "https://huggingface.co./reazon-research/reazonspeech-espnet-next", - "project_name": "reazonspeech-espnet-next", - "downloads": 79, + "description": "ryota39様の Tora-7B-v0.2 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Tora-7B-v0.2-GGUF", + "project_name": "Tora-7B-v0.2-GGUF", + "downloads": 80, "source": "Hugging Face", - "score": -0.09195721148388794, - "first_commit": "2023-03-29 07:20:03", - "latest_commit": "2023-03-29 17:28:01", + "score": -0.09498731196349004, + "first_commit": "2024-05-06 12:50:49", + "latest_commit": "2024-06-15 03:17:32", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.24, "model_architectures": null }, { - "description": "HODACHI様の Llama-3-EZO-8b-Common-it をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3-EZO-8b-Common-it-GGUF", - "project_name": "Llama-3-EZO-8b-Common-it-GGUF", + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-medium", + "project_name": "t5-base-medium", "downloads": 79, "source": "Hugging Face", - "score": -0.09195721148388794, - "first_commit": "2024-07-15 11:58:12", - "latest_commit": "2024-07-15 20:08:22", + "score": -0.09499702999598905, + "first_commit": "2023-04-26 08:27:09", + "latest_commit": "2023-05-10 10:00:12", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Local-Novel-LLM-project様の Ninja-V3 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Ninja-V3-GGUF", - "project_name": "Ninja-V3-GGUF", + "description": "Local-Novel-LLM-project様の Ninja-V2-7B をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Ninja-V2-7B-GGUF", + "project_name": "Ninja-V2-7B-GGUF", "downloads": 79, "source": "Hugging Face", - "score": -0.09195721148388794, - "first_commit": "2024-07-03 11:52:04", - "latest_commit": "2024-07-03 16:59:05", + "score": -0.09499702999598905, + "first_commit": "2024-06-15 16:23:41", + "latest_commit": "2024-06-15 21:25:59", "languages": [], "model_or_dataset": "model", "model_size": 7.24, "model_architectures": null }, { - "description": "Asian Language Treebank (ALT) Project ALT Parallel Corpusのうち、日英対訳部分のみを抽出したデータセットです。", - "url": "https://huggingface.co./datasets/hpprc/alt-parallel-en-ja", - "project_name": "alt-parallel-en-ja", + "description": "pfnet-nekomata-14b-pfn-qfin-gguf pfnetさんが公開しているnekomata-14b-pfn-qfinのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-gguf", + "project_name": "pfnet-nekomata-14b-pfn-qfin-gguf", "downloads": 79, "source": "Hugging Face", - "score": -0.09195721148388794, - "first_commit": "2024-03-21 02:24:27", - "latest_commit": "2024-03-21 12:40:15", + "score": -0.09499702999598905, + "first_commit": "2024-04-24 12:58:10", + "latest_commit": "2024-04-24 14:46:15", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 14.2, "model_architectures": null }, { - "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", - "url": "https://huggingface.co./cinmodel/electra-small-japanese-discriminator", - "project_name": "electra-small-japanese-discriminator", + "description": "Model Card for Japanese character-level GPT-2 Large Model description", + "url": "https://huggingface.co./ku-nlp/gpt2-large-japanese-char", + "project_name": "gpt2-large-japanese-char", "downloads": 77, "source": "Hugging Face", - "score": -0.09197842138496061, - "first_commit": "2020-11-13 06:49:25", - "latest_commit": "2020-12-11 22:26:13", + "score": -0.09501646606098706, + "first_commit": "2023-12-27 11:18:45", + "latest_commit": "2023-12-27 12:07:30", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForPreTraining" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "COMET-GPT2 ja Finetuned GPT-2 on ATOMIC ja using a causal language modeling (CLM) objective.", - "url": "https://huggingface.co./nlp-waseda/comet-gpt2-small-japanese", - "project_name": "comet-gpt2-small-japanese", - "downloads": 77, + "description": "Oumuamua-7b-instruct-GGUF This is quantized version of nitky/Oumuamua-7b-instruct created using llama.cpp Model Description This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./QuantFactory/Oumuamua-7b-instruct-GGUF", + "project_name": "Oumuamua-7b-instruct-GGUF", + "downloads": 76, "source": "Hugging Face", - "score": -0.09197842138496061, - "first_commit": "2022-11-15 05:14:35", - "latest_commit": "2023-02-13 10:26:12", + "score": -0.09502618409348605, + "first_commit": "2024-06-19 08:52:12", + "latest_commit": "2024-06-19 11:40:58", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.33, + "model_architectures": null + }, + { + "description": "英語+日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on English and Japanese balanced corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-english-japanese", + "project_name": "t5-base-english-japanese", + "downloads": 75, + "source": "Hugging Face", + "score": -0.09503590212598506, + "first_commit": "2022-07-28 11:31:28", + "latest_commit": "2022-08-27 09:07:53", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/swallow-3-8B-sqlcoder-2x8B-GGUF", - "project_name": "swallow-3-8B-sqlcoder-2x8B-GGUF", - "downloads": 77, + "description": "Japanese E5 Mixtral 7B Slerp GGUF GGUF conversion of oshizo/japanese-e5-mistral-7b_slerp Avaiable formats: Q2_K.gguf Q3_K.gguf Q4_K.gguf Q5_K.gguf", + "url": "https://huggingface.co./mm/japanese-e5-mistral-7b_slerp_gguf", + "project_name": "japanese-e5-mistral-7b_slerp_gguf", + "downloads": 75, + "source": "Hugging Face", + "score": -0.09503590212598506, + "first_commit": "2024-06-09 08:34:37", + "latest_commit": "2024-06-14 16:12:17", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24, + "model_architectures": null + }, + { + "description": "このデータセットはkunishou氏が公開している\"databricks-dolly-15k\"を日本語訳したkunishou/databricks-dolly-15k-jaデータセットの語尾をArrowPro-7B-KUJIRAを用いて「にゃん!", + "url": "https://huggingface.co./datasets/DataPilot/databricks-dolly-15k-Nyan-ja", + "project_name": "databricks-dolly-15k-Nyan-ja", + "downloads": 75, "source": "Hugging Face", - "score": -0.09197842138496061, - "first_commit": "2024-07-03 11:02:45", - "latest_commit": "2024-07-04 07:20:41", + "score": -0.09503590212598506, + "first_commit": "2024-05-18 13:03:25", + "latest_commit": "2024-05-19 10:24:16", "languages": [], - "model_or_dataset": "model", - "model_size": 13.7, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "Original Model Optical character recognition for Japanese text, with the main focus being Japanese manga.", - "url": "https://huggingface.co./TareHimself/manga-ocr-base", - "project_name": "manga-ocr-base", - "downloads": 77, + "description": "This dataset was created by automatically translating \"OpenAssistant/oasst1\" into Japanese.", + "url": "https://huggingface.co./datasets/kunishou/oasst1-89k-ja", + "project_name": "oasst1-89k-ja", + "downloads": 75, "source": "Hugging Face", - "score": -0.09197842138496061, - "first_commit": "2023-09-14 04:15:52", - "latest_commit": "2024-06-03 05:10:11", + "score": -0.09503590212598506, + "first_commit": "2023-05-06 09:12:30", + "latest_commit": "2024-04-01 17:15:31", "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "VisionEncoderDecoderModel" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Dataset 5M (5121625) clean Japanese full sentence with the context.", - "url": "https://huggingface.co./datasets/AhmedSSabir/Japanese-wiki-dump-sentence-dataset", - "project_name": "Japanese-wiki-dump-sentence-dataset", - "downloads": 77, + "description": "line-corporation/japanese-large-lm-3.6b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-3.6b-instruction-sftのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", + "project_name": "line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", + "downloads": 73, "source": "Hugging Face", - "score": -0.09197842138496061, - "first_commit": "2022-06-08 11:34:04", - "latest_commit": "2023-07-11 12:22:09", + "score": -0.09505533819098307, + "first_commit": "2023-09-02 18:01:40", + "latest_commit": "2023-09-08 02:52:29", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "ChatNTQ JA 7B V1.0 Model Description", - "url": "https://huggingface.co./NTQAI/chatntq-ja-7b-v1.0", - "project_name": "chatntq-ja-7b-v1.0", - "downloads": 76, + "description": "DataPilot様の ArrowPro-7B-RobinHood をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/ArrowPro-7B-RobinHood-GGUF", + "project_name": "ArrowPro-7B-RobinHood-GGUF", + "downloads": 72, "source": "Hugging Face", - "score": -0.09198902633549694, - "first_commit": "2023-12-26 06:22:59", - "latest_commit": "2023-12-26 09:22:34", + "score": -0.09506505622348206, + "first_commit": "2024-05-10 12:03:26", + "latest_commit": "2024-05-10 18:14:28", "languages": [], "model_or_dataset": "model", "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_architectures": null }, { - "description": "Llama-3-8B-Instruct-JP-nk2t-v0.2 Model Details: Built with Meta Llama 3", - "url": "https://huggingface.co./nk2t/Llama-3-8B-Instruct-japanese-nk2t-v0.2", - "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.2", - "downloads": 72, + "description": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k elyzaさんが公開しているELYZA-japanese-CodeLlama-7b-instructを 日本語のキャリブレーションセットで生成したGPTQモデルになります。", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", + "downloads": 71, "source": "Hugging Face", - "score": -0.09203144613764228, - "first_commit": "2024-05-04 04:16:35", - "latest_commit": "2024-05-15 12:56:34", + "score": -0.09507477425598107, + "first_commit": "2023-11-15 16:33:25", + "latest_commit": "2023-11-16 14:28:39", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, + "model_size": null, "model_architectures": "LlamaForCausalLM" }, { - "description": "fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!", - "url": "https://huggingface.co./bclavie/fio-base-japanese-v0.1", - "project_name": "fio-base-japanese-v0.1", - "downloads": 72, + "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese Considering the results of the JNLI evaluation result, we recommend using akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick for natural language inference in Japanese.", + "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli", + "project_name": "bert-base-japanese-v3_nli-jsnli", + "downloads": 71, "source": "Hugging Face", - "score": -0.09203144613764228, - "first_commit": "2023-12-18 11:01:07", - "latest_commit": "2023-12-19 10:28:16", + "score": -0.09507477425598107, + "first_commit": "2024-04-11 05:38:09", + "latest_commit": "2024-04-26 06:27:05", "languages": [], "model_or_dataset": "model", "model_size": 0.111, - "model_architectures": "BertModel" + "model_architectures": "BertForSequenceClassification" }, { - "description": "Sakura_dataset 商用利用可能な超小規模高品質日本語データセット。 ", - "url": "https://huggingface.co./datasets/saldra/sakura_japanese_dataset", - "project_name": "sakura_japanese_dataset", - "downloads": 72, + "description": "「LLM-jp-3 172B beta1」利用規約 この利用規約(以下「本規約」といいます)は、大学共同利用機関法人 情報・システム研究機構 国立情報学研究所(以下「提供者」といいます)による開発の成果物として公開する大規模言語モデル「LLM-jp-3 172B beta1」(以下「本プログラム」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./llm-jp/llm-jp-3-172b-beta1-instruct", + "project_name": "llm-jp-3-172b-beta1-instruct", + "downloads": 70, "source": "Hugging Face", - "score": -0.09203144613764228, - "first_commit": "2023-06-07 05:44:23", - "latest_commit": "2023-06-08 11:31:06", + "score": -0.09508449228848007, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 172.0, "model_architectures": null }, { - "description": "range3/cc100-ja This dataset consists of parquet files from the cc100 dataset with only the Japanese language extracted and sharded.", - "url": "https://huggingface.co./datasets/range3/cc100-ja", - "project_name": "cc100-ja", + "description": "RoBERTa base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", + "url": "https://huggingface.co./ybelkada/japanese-roberta-question-answering", + "project_name": "japanese-roberta-question-answering", "downloads": 70, "source": "Hugging Face", - "score": -0.09205265603871494, - "first_commit": "2023-02-04 05:10:34", - "latest_commit": "2023-02-04 05:43:32", + "score": -0.09508449228848007, + "first_commit": "2022-04-08 08:52:22", + "latest_commit": "2022-04-08 11:38:39", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "RobertaForQuestionAnswering" }, { - "description": "モデル概要 このモデルは、 sonoisa/sentence-luke-japanese-base-lite をSNS上のコメントに人手で攻撃性評価を行ったデータセットでFine-tuningすることで作成しました。 ", - "url": "https://huggingface.co./TomokiFujihara/luke-japanese-base-lite-offensiveness-estimation", - "project_name": "luke-japanese-base-lite-offensiveness-estimation", - "downloads": 69, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Int4", + "project_name": "Orion-14B-Chat-Int4", + "downloads": 70, "source": "Hugging Face", - "score": -0.09206326098925129, - "first_commit": "2023-12-08 03:20:14", - "latest_commit": "2024-03-24 12:35:36", + "score": -0.09508449228848007, + "first_commit": "2024-01-18 09:54:07", + "latest_commit": "2024-03-26 10:04:46", "languages": [], "model_or_dataset": "model", - "model_size": 0.133, - "model_architectures": "OffensivenessEstimationModel" + "model_size": 2.69, + "model_architectures": "OrionForCausalLM" }, { - "description": "Finetuned Waseda RoBERTa to evaluate the generated answers on JTruthfulQA.", - "url": "https://huggingface.co./nlp-waseda/roberta_jtruthfulqa", - "project_name": "roberta_jtruthfulqa", + "description": "rinna/japanese-data2vec-audio-base Overview This is a Japanese data2vec Audio Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-data2vec-audio-base", + "project_name": "japanese-data2vec-audio-base", "downloads": 69, "source": "Hugging Face", - "score": -0.09206326098925129, - "first_commit": "2023-12-06 01:33:02", - "latest_commit": "2023-12-06 04:31:12", + "score": -0.09509421032097908, + "first_commit": "2024-03-05 10:32:32", + "latest_commit": "2024-07-22 08:12:56", "languages": [], "model_or_dataset": "model", - "model_size": 0.337, - "model_architectures": "RobertaForSequenceClassification" + "model_size": 0.0932, + "model_architectures": "Data2VecAudioModel" }, { - "description": "llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset", - "project_name": "bert-base-japanese-v3-crf-ner-wikipedia-dataset", - "downloads": 68, + "description": "Local-Novel-LLM-project様の Vecteus-V2-7B をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Vecteus-V2-7B-GGUF", + "project_name": "Vecteus-V2-7B-GGUF", + "downloads": 69, "source": "Hugging Face", - "score": -0.09207386593978761, - "first_commit": "2023-05-28 08:19:43", - "latest_commit": "2023-07-25 15:04:39", + "score": -0.09509421032097908, + "first_commit": "2024-06-16 05:26:00", + "latest_commit": "2024-06-16 11:32:15", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertWithCrfForTokenClassification" + "model_size": 7.24, + "model_architectures": null }, { - "description": "Vecteus-V2-7B このモデルは、ベクトルマージなどを用い作成された高性能ベースモデルです。 ", - "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-V2-7B", - "project_name": "Vecteus-V2-7B", - "downloads": 68, + "description": "Tanuki-8x8B-dpo-v1.0-GPTQ-8bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGPTQ 8bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GPTQ-8bit", + "project_name": "Tanuki-8x8B-dpo-v1.0-GPTQ-8bit", + "downloads": 69, "source": "Hugging Face", - "score": -0.09207386593978761, - "first_commit": "2024-06-16 03:51:43", - "latest_commit": "2024-07-06 13:39:41", + "score": -0.09509421032097908, + "first_commit": "2024-08-28 02:30:27", + "latest_commit": "2024-09-03 09:26:02", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": "TanukiForCausalLM" }, { - "description": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1 上記のモデルを訓練し、アダルト用語を認識できるようにしたものです。", - "url": "https://huggingface.co./swdq/Visual-novel-whisper", - "project_name": "Visual-novel-whisper", - "downloads": 67, + "description": "Dataset 5M (5121625) clean Japanese full sentence with the context.", + "url": "https://huggingface.co./datasets/AhmedSSabir/Japanese-wiki-dump-sentence-dataset", + "project_name": "Japanese-wiki-dump-sentence-dataset", + "downloads": 68, "source": "Hugging Face", - "score": -0.09208447089032394, - "first_commit": "2024-07-24 10:09:29", - "latest_commit": "2024-07-24 10:29:47", + "score": -0.09510392835347807, + "first_commit": "2022-06-08 11:34:04", + "latest_commit": "2023-07-11 12:22:09", "languages": [], - "model_or_dataset": "model", - "model_size": 0.756, - "model_architectures": "WhisperForConditionalGeneration" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "This is for (private) DEMO only.", - "url": "https://huggingface.co./Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition", - "project_name": "wav2vec2-xlsr-japanese-speech-emotion-recognition", + "description": "aixsatoshi-Honyaku-13b-gguf aixsatoshiさんが公開しているHonyaku-13bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aixsatoshi-Honyaku-13b-gguf", + "project_name": "aixsatoshi-Honyaku-13b-gguf", "downloads": 66, "source": "Hugging Face", - "score": -0.09209507584086028, - "first_commit": "2021-09-22 04:10:36", - "latest_commit": "2023-10-19 01:31:17", + "score": -0.09512336441847608, + "first_commit": "2024-05-19 08:07:15", + "latest_commit": "2024-05-19 09:24:59", "languages": [], "model_or_dataset": "model", - "model_size": 0.316, - "model_architectures": "HubertForSequenceClassification" + "model_size": 13.1, + "model_architectures": null }, { - "description": "BERT base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", - "url": "https://huggingface.co./SkelterLabsInc/bert-base-japanese-jaquad", - "project_name": "bert-base-japanese-jaquad", + "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoで利用しているモデルです。", + "url": "https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b", + "project_name": "japanese-novel-gpt-j-6b", "downloads": 65, "source": "Hugging Face", - "score": -0.09210568079139661, - "first_commit": "2022-01-27 08:08:53", - "latest_commit": "2022-02-04 02:39:25", + "score": -0.09513308245097508, + "first_commit": "2023-08-11 00:52:32", + "latest_commit": "2023-08-26 04:20:51", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForQuestionAnswering" + "model_architectures": "GPTJForCausalLM" }, { - "description": "SakuraMixSeries 背景とキャラクタークオリティーを両立させたVAE内蔵型モデル Model with built-in VAE for both background and character quality 📄 ライセンス / License 修正 CreativeML OpenRAIL-M ライセンス / Modified CreativeML OpenRAIL-M license このモデルのクレジットを入れずに使用する Use the model without crediting the creator このモデルで生成した画像を商用利用する Sell images they generate このモデルを商用の画像生成サービスで利用する Run on services that generate images for money このモデルを使用したマージモデルを共有する Share merges using this model このモデル、またはこのモデルをマージしたモデルを販売する Sell this model or merges using this model このモデ", - "url": "https://huggingface.co./natsusakiyomi/SakuraMix", - "project_name": "SakuraMix", + "description": "DataPilot様の ArrowPro-7B-KUJIRA をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/ArrowPro-7B-KUJIRA-GGUF", + "project_name": "ArrowPro-7B-KUJIRA-GGUF", "downloads": 65, "source": "Hugging Face", - "score": -0.09210568079139661, - "first_commit": "2023-03-17 17:37:21", - "latest_commit": "2023-08-22 12:30:44", + "score": -0.09513308245097508, + "first_commit": "2024-05-09 13:34:05", + "latest_commit": "2024-05-09 23:32:52", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.24, "model_architectures": null }, { - "description": "HODACHI様の EZO-Common-T2-2B-gemma-2-it をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/EZO-Common-T2-2B-gemma-2-it-GGUF", - "project_name": "EZO-Common-T2-2B-gemma-2-it-GGUF", - "downloads": 65, + "description": "Llama-3-EZO-VLM-1 Based on SakanaAI/Llama-3-EvoVLM-JP-v2, it has been enhanced for Japanese usage through additional pre-training and instruction tuning.", + "url": "https://huggingface.co./AXCXEPT/Llama-3-EZO-VLM-1", + "project_name": "Llama-3-EZO-VLM-1", + "downloads": 64, "source": "Hugging Face", - "score": -0.09210568079139661, - "first_commit": "2024-08-01 11:38:48", - "latest_commit": "2024-08-01 13:42:20", + "score": -0.09514280048347408, + "first_commit": "2024-08-03 17:15:09", + "latest_commit": "2024-08-23 10:55:53", "languages": [], "model_or_dataset": "model", - "model_size": 2.61, - "model_architectures": null + "model_size": 8.48, + "model_architectures": "LlavaForConditionalGeneration" }, { - "description": "line-corporation/japanese-large-lm-1.7b line-corporationさんが公開しているjapanese-large-lm-1.7bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-gguf", - "project_name": "line-corp-japanese-large-lm-1.7b-gguf", - "downloads": 65, + "description": "Japanese Anime Speech Dataset 日本語はこちら japanese-anime-speech is an audio-text dataset designed for the training of automatic speech recognition models.", + "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech", + "project_name": "japanese-anime-speech", + "downloads": 64, "source": "Hugging Face", - "score": -0.09210568079139661, - "first_commit": "2023-09-03 22:35:34", - "latest_commit": "2024-03-24 05:54:30", + "score": -0.09514280048347408, + "first_commit": "2023-11-07 13:53:40", + "latest_commit": "2024-06-30 10:06:34", "languages": [], - "model_or_dataset": "model", - "model_size": 1.77, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "Deepreneur-blue-lizard-gguf Deepreneurさんが公開しているblue-lizardのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Deepreneur-blue-lizard-gguf", - "project_name": "Deepreneur-blue-lizard-gguf", + "description": "Japanese stopwords for nagisa", + "url": "https://huggingface.co./datasets/taishi-i/nagisa_stopwords", + "project_name": "nagisa_stopwords", "downloads": 63, "source": "Hugging Face", - "score": -0.09212689069246928, - "first_commit": "2024-02-13 15:18:15", - "latest_commit": "2024-02-13 16:26:26", + "score": -0.09515251851597309, + "first_commit": "2023-08-06 17:10:10", + "latest_commit": "2023-08-07 02:58:31", "languages": [], - "model_or_dataset": "model", - "model_size": 6.74, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "Ruri: Japanese General Text Embeddings Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-small", - "project_name": "ruri-small", - "downloads": 62, + "description": "[github].", + "url": "https://huggingface.co./datasets/fujiki/japanese_alpaca_data", + "project_name": "japanese_alpaca_data", + "downloads": 63, "source": "Hugging Face", - "score": -0.09213749564300562, - "first_commit": "2024-08-28 16:23:12", - "latest_commit": "2024-09-04 08:49:30", + "score": -0.09515251851597309, + "first_commit": "2023-05-18 07:13:15", + "latest_commit": "2023-05-19 12:54:13", "languages": [], - "model_or_dataset": "model", - "model_size": 0.0681, - "model_architectures": "DistilBertModel" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", - "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese", - "project_name": "wav2vec2-large-xlsr-japanese", + "description": "c4ai-command-r-v01-japanese-instruct GGUF版はこちら/Click here for the GGUF version 概要 CohereForAI/c4ai-command-r-v01を、ichikara-instructionを使って追加で日本語インストラクションチューニングを施したモデルです。 ", + "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct", + "project_name": "c4ai-command-r-v01-japanese-instruct", "downloads": 62, "source": "Hugging Face", - "score": -0.09213749564300562, - "first_commit": "2021-03-28 04:21:20", - "latest_commit": "2023-02-08 00:15:23", + "score": -0.09516223654847208, + "first_commit": "2024-04-04 03:56:52", + "latest_commit": "2024-04-07 15:18:37", "languages": [], "model_or_dataset": "model", - "model_size": 0.318, - "model_architectures": "Wav2Vec2ForCTC" + "model_size": 35.0, + "model_architectures": "CohereForCausalLM" }, { - "description": "Kanji Diffusion v1-4 Model Card Kanji Diffusion is a latent text-to-image diffusion model capable of hallucinating Kanji characters given any English prompt.", - "url": "https://huggingface.co./yashvoladoddi37/kanji-diffusion-v1-4", - "project_name": "kanji-diffusion-v1-4", + "description": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf alfredplplさんが公開しているLlama-3-8B-Instruct-Jaのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/alfredplpl-Llama-3-8B-Instruct-Ja-gguf", + "project_name": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf", "downloads": 62, "source": "Hugging Face", - "score": -0.09213749564300562, - "first_commit": "2024-08-13 06:06:21", - "latest_commit": "2024-08-16 12:14:22", + "score": -0.09516223654847208, + "first_commit": "2024-04-23 14:18:57", + "latest_commit": "2024-04-23 15:24:47", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 8.03, "model_architectures": null }, { - "description": "shisa-v1-qwen2-7b-gguf (English explanation is below.", - "url": "https://huggingface.co./keitokei1994/shisa-v1-qwen2-7b-GGUF", - "project_name": "shisa-v1-qwen2-7b-GGUF", + "description": "Asian Language Treebank (ALT) Project ALT Parallel Corpusのうち、日英対訳部分のみを抽出したデータセットです。", + "url": "https://huggingface.co./datasets/hpprc/alt-parallel-en-ja", + "project_name": "alt-parallel-en-ja", "downloads": 62, "source": "Hugging Face", - "score": -0.09213749564300562, - "first_commit": "2024-06-09 08:58:45", - "latest_commit": "2024-07-04 07:44:00", + "score": -0.09516223654847208, + "first_commit": "2024-03-21 02:24:27", + "latest_commit": "2024-03-21 12:40:15", "languages": [], - "model_or_dataset": "model", - "model_size": 7.62, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "c4ai-command-r-v01-japanese-instruct GGUF版はこちら/Click here for the GGUF version 概要 CohereForAI/c4ai-command-r-v01を、ichikara-instructionを使って追加で日本語インストラクションチューニングを施したモデルです。 ", - "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct", - "project_name": "c4ai-command-r-v01-japanese-instruct", - "downloads": 62, + "description": "luke-japanese-base-lite-xlm-roberta studio-ousia/luke-japanese-base-liteの重みの名前をXLMRoberta形式に置き換え、XLMRobertaモデルとして扱えるようにした物です。 ", + "url": "https://huggingface.co./hotchpotch/luke-japanese-base-lite-xlm-roberta", + "project_name": "luke-japanese-base-lite-xlm-roberta", + "downloads": 60, "source": "Hugging Face", - "score": -0.09213749564300562, - "first_commit": "2024-04-04 03:56:52", - "latest_commit": "2024-04-07 15:18:37", + "score": -0.09518167261347009, + "first_commit": "2024-09-09 18:18:38", + "latest_commit": "2024-09-09 18:33:44", "languages": [], "model_or_dataset": "model", - "model_size": 35.0, - "model_architectures": "CohereForCausalLM" + "model_size": 0.111, + "model_architectures": "XLMRobertaForMaskedLM" }, { - "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", - "url": "https://huggingface.co./if001/tiny_mixtral_ja", - "project_name": "tiny_mixtral_ja", - "downloads": 61, + "description": "Hibiki ASR Phonemizer This model is a Phoneme Level Speech Recognition network, originally a fine-tuned version of openai/whisper-large-v3 on a mixture of Different Japanese datasets.", + "url": "https://huggingface.co./Respair/Hibiki_ASR_Phonemizer_v0.2", + "project_name": "Hibiki_ASR_Phonemizer_v0.2", + "downloads": 60, "source": "Hugging Face", - "score": -0.09214810059354195, - "first_commit": "2024-01-22 15:02:21", - "latest_commit": "2024-01-23 00:42:05", + "score": -0.09518167261347009, + "first_commit": "2024-08-12 01:30:08", + "latest_commit": "2024-08-19 18:13:01", "languages": [], "model_or_dataset": "model", - "model_size": 0.276, - "model_architectures": "MixtralForCausalLM" + "model_size": 1.54, + "model_architectures": "WhisperForConditionalGeneration" }, { "description": "Google's mt5-base fine-tuned in Japanese to solve error detection and correction task. ", "url": "https://huggingface.co./kz/mt5base-finetuned-ECC-japanese-small", "project_name": "mt5base-finetuned-ECC-japanese-small", - "downloads": 61, + "downloads": 59, "source": "Hugging Face", - "score": -0.09214810059354195, + "score": -0.0951913906459691, "first_commit": "2021-03-21 19:07:13", "latest_commit": "2022-05-26 13:50:56", "languages": [], @@ -9557,250 +9597,180 @@ "model_architectures": "MT5ForConditionalGeneration" }, { - "description": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "project_name": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "downloads": 61, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-xl", + "project_name": "t5-xl", + "downloads": 59, "source": "Hugging Face", - "score": -0.09214810059354195, - "first_commit": "2023-09-26 06:16:23", - "latest_commit": "2023-09-28 00:02:06", + "score": -0.0951913906459691, + "first_commit": "2023-04-26 07:19:08", + "latest_commit": "2023-05-10 10:01:04", "languages": [], "model_or_dataset": "model", - "model_size": 1.17, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "概要 NHKで定期的に放送されていた『着信御礼!", - "url": "https://huggingface.co./datasets/YANS-official/ogiri-keitai", - "project_name": "ogiri-keitai", + "description": "matsuolab-weblab-10b-instruction-sft-gguf matsuo-labさんが公開しているweblab-10b-instruction-sftのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/matsuolab-weblab-10b-instruction-sft-gguf", + "project_name": "matsuolab-weblab-10b-instruction-sft-gguf", "downloads": 59, "source": "Hugging Face", - "score": -0.09216931049461462, - "first_commit": "2024-07-20 10:11:36", - "latest_commit": "2024-08-30 10:13:20", + "score": -0.0951913906459691, + "first_commit": "2023-08-21 11:22:48", + "latest_commit": "2023-09-02 18:16:33", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 10.7, "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-small-medium", - "project_name": "t5-small-medium", + "description": "Sarashina2-7B Instruct sarashina2-7Bを会話できるようにフルファインチューニングしたものです。", + "url": "https://huggingface.co./alfredplpl/sarashina2-7b-it", + "project_name": "sarashina2-7b-it", "downloads": 58, "source": "Hugging Face", - "score": -0.09217991544515095, - "first_commit": "2023-04-26 08:26:19", - "latest_commit": "2023-05-10 10:01:16", + "score": -0.09520110867846811, + "first_commit": "2024-06-12 02:24:28", + "latest_commit": "2024-06-12 03:00:35", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 7.32, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Umievo-itr012-Gleipnir-7B このモデルは強力な4つの日本語モデルを進化的アルゴリズムで進化的マージしたものです。", - "url": "https://huggingface.co./umiyuki/Umievo-itr012-Gleipnir-7B", - "project_name": "Umievo-itr012-Gleipnir-7B", + "description": "electra-base-cyberbullying This is a BERT Base model for the Japanese language finetuned for automatic cyberbullying detection.", + "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-cyberbullying", + "project_name": "bert-base-japanese-sentiment-cyberbullying", "downloads": 58, "source": "Hugging Face", - "score": -0.09217991544515095, - "first_commit": "2024-05-29 12:32:29", - "latest_commit": "2024-05-29 13:51:31", + "score": -0.09520110867846811, + "first_commit": "2022-09-09 02:16:34", + "latest_commit": "2022-11-01 07:18:05", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" - }, - { - "description": "このデータセットはkunishou氏が公開している\"databricks-dolly-15k\"を日本語訳したkunishou/databricks-dolly-15k-jaデータセットの語尾をArrowPro-7B-KUJIRAを用いて「にゃん!", - "url": "https://huggingface.co./datasets/DataPilot/databricks-dolly-15k-Nyan-ja", - "project_name": "databricks-dolly-15k-Nyan-ja", - "downloads": 57, - "source": "Hugging Face", - "score": -0.09219052039568729, - "first_commit": "2024-05-18 13:03:25", - "latest_commit": "2024-05-19 10:24:16", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, - { - "description": "Dataset overview This dataset identifies whether a GitHub repository description pertains to Japanese natural language processing (NLP).", - "url": "https://huggingface.co./datasets/taishi-i/awesome-japanese-nlp-classification-dataset", - "project_name": "awesome-japanese-nlp-classification-dataset", - "downloads": 57, - "source": "Hugging Face", - "score": -0.09219052039568729, - "first_commit": "2023-09-09 06:37:36", - "latest_commit": "2023-09-09 20:09:04", - "languages": [], - "model_or_dataset": "dataset", "model_size": null, - "model_architectures": null + "model_architectures": "BertForSequenceClassification" }, { - "description": "英語+日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on English and Japanese balanced corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-english-japanese", - "project_name": "t5-base-english-japanese", + "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b", + "project_name": "ELYZA-japanese-CodeLlama-7b", "downloads": 56, "source": "Hugging Face", - "score": -0.09220112534622361, - "first_commit": "2022-07-28 11:31:28", - "latest_commit": "2022-08-27 09:07:53", + "score": -0.09522054474346611, + "first_commit": "2023-11-07 12:48:15", + "latest_commit": "2023-11-15 00:38:12", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 6.74, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Heron BLIP Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1", - "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1", + "description": "JaWiki WikipediaのHTML形式のダンプファイルから抽出したテキストデータセットです。 ", + "url": "https://huggingface.co./datasets/hpprc/jawiki", + "project_name": "jawiki", "downloads": 56, "source": "Hugging Face", - "score": -0.09220112534622361, - "first_commit": "2024-02-20 11:32:57", - "latest_commit": "2024-02-27 13:57:20", + "score": -0.09522054474346611, + "first_commit": "2024-02-02 06:36:01", + "latest_commit": "2024-02-13 15:19:49", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "VideoBlipForConditionalGeneration" - }, - { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./nold/Orion-14B-Base-GGUF", - "project_name": "Orion-14B-Base-GGUF", - "downloads": 55, - "source": "Hugging Face", - "score": -0.09221173029675996, - "first_commit": "2024-03-07 14:56:51", - "latest_commit": "2024-03-07 19:33:53", - "languages": [], - "model_or_dataset": "model", - "model_size": 14.5, "model_architectures": null }, { - "description": "studio-ousia/luke-japanese-baseに対して次の変更を加えたモデルです。 ", - "url": "https://huggingface.co./uzabase/luke-japanese-wordpiece-base", - "project_name": "luke-japanese-wordpiece-base", + "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-instruct - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-instruct StableLM", + "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", + "project_name": "stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", "downloads": 55, "source": "Hugging Face", - "score": -0.09221173029675996, - "first_commit": "2023-08-10 06:04:58", - "latest_commit": "2023-11-28 13:35:07", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "LukeForMaskedLM" - }, - { - "description": "GGUF conversion of NTQAI/chatntq-ja-7b-v1.0 ChatNTQ-JA-7b-v1.0 is a Japanese chat fine-tuned model built on top of the stabilityai/japanese-stablelm-base-gamma-7b, which is originally based on Mistral 7B v0.1.", - "url": "https://huggingface.co./TFMC/ChatNTQ-JA-7b-v1.0-GGUF", - "project_name": "ChatNTQ-JA-7b-v1.0-GGUF", - "downloads": 54, - "source": "Hugging Face", - "score": -0.09222233524729628, - "first_commit": "2024-04-03 22:42:14", - "latest_commit": "2024-04-04 23:10:54", + "score": -0.09523026277596511, + "first_commit": "2023-11-16 10:25:20", + "latest_commit": "2023-11-16 12:53:33", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, + "model_size": 2.8, "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-xl", - "project_name": "t5-xl", - "downloads": 54, + "description": "BERT for Sentiment Analysis of Japanese Twitter", + "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter-sentiment", + "project_name": "bert-for-japanese-twitter-sentiment", + "downloads": 55, "source": "Hugging Face", - "score": -0.09222233524729628, - "first_commit": "2023-04-26 07:19:08", - "latest_commit": "2023-05-10 10:01:04", + "score": -0.09523026277596511, + "first_commit": "2024-05-13 10:19:52", + "latest_commit": "2024-08-09 12:03:25", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" }, { - "description": "This is a Japanese+English sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-en-mean-tokens", - "project_name": "sentence-bert-base-ja-en-mean-tokens", - "downloads": 54, + "description": "abc-multiple-choice Dataset abc-multiple-choice は、競技クイズの大会「abc」で使用された4択問題を元に作成された、多肢選択式の質問応答データセットです。 ", + "url": "https://huggingface.co./datasets/tohoku-nlp/abc-multiple-choice", + "project_name": "abc-multiple-choice", + "downloads": 55, "source": "Hugging Face", - "score": -0.09222233524729628, - "first_commit": "2022-05-08 03:05:08", - "latest_commit": "2022-05-08 03:29:28", + "score": -0.09523026277596511, + "first_commit": "2024-03-02 03:58:25", + "latest_commit": "2024-03-12 07:32:13", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "BertModel" + "model_architectures": null }, { - "description": "Heron BLIP Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", - "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", - "downloads": 53, + "description": "Dataset.", + "url": "https://huggingface.co./datasets/hpprc/jsick", + "project_name": "jsick", + "downloads": 55, "source": "Hugging Face", - "score": -0.09223294019783262, - "first_commit": "2024-02-27 13:48:02", - "latest_commit": "2024-02-27 13:59:23", + "score": -0.09523026277596511, + "first_commit": "2023-04-08 16:02:06", + "latest_commit": "2023-04-11 15:18:09", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "VideoBlipForConditionalGeneration" + "model_architectures": null }, { - "description": "BERT for Sentiment Analysis of Japanese Twitter", - "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter-sentiment", - "project_name": "bert-for-japanese-twitter-sentiment", - "downloads": 53, + "description": "This is a BERT Base model for emotion analysis in Japanese additionally fine-tuned for emotion detection and classification.", + "url": "https://huggingface.co./alter-wang/bert-base-japanese-emotion-lily", + "project_name": "bert-base-japanese-emotion-lily", + "downloads": 54, "source": "Hugging Face", - "score": -0.09223294019783262, - "first_commit": "2024-05-13 10:19:52", - "latest_commit": "2024-08-09 12:03:25", + "score": -0.09523998080846412, + "first_commit": "2024-04-25 06:05:51", + "latest_commit": "2024-06-17 01:44:16", "languages": [], "model_or_dataset": "model", "model_size": 0.111, "model_architectures": "BertForSequenceClassification" }, { - "description": "Oumuamua-7b-instruct-v2 🚨 If you want to avoid outputs that appear to be literal translations, please prompt this model to role-play as a Japanese person.", - "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct-v2", - "project_name": "Oumuamua-7b-instruct-v2", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Chat", + "project_name": "SambaLingo-Japanese-Chat", "downloads": 53, "source": "Hugging Face", - "score": -0.09223294019783262, - "first_commit": "2024-06-14 07:08:07", - "latest_commit": "2024-06-19 22:29:07", + "score": -0.09524969884096311, + "first_commit": "2024-02-15 22:45:08", + "latest_commit": "2024-04-16 22:32:15", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, - "model_architectures": "MistralForCausalLM" - }, - { - "description": "JSNLI Version 1.1 のデータセットのうち、フィルタリング後の訓練セット (train_w_filtering)", - "url": "https://huggingface.co./datasets/llm-book/jsnli", - "project_name": "jsnli", - "downloads": 53, - "source": "Hugging Face", - "score": -0.09223294019783262, - "first_commit": "2023-06-19 12:31:46", - "latest_commit": "2023-10-25 15:22:46", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_size": 6.95, + "model_architectures": "LlamaForCausalLM" }, { "description": "Convert from: drewschaub/whisper-large-v3-japanese-4k-steps Whisper large-v3 model for CTranslate2 This repository contains the conversion of drewschaub/whisper-large-v3-japanese-4k-steps to the CTranslate2 model format.", "url": "https://huggingface.co./JhonVanced/whisper-large-v3-japanese-4k-steps-ct2", "project_name": "whisper-large-v3-japanese-4k-steps-ct2", - "downloads": 52, + "downloads": 53, "source": "Hugging Face", - "score": -0.09224354514836895, + "score": -0.09524969884096311, "first_commit": "2024-02-20 13:41:17", "latest_commit": "2024-02-22 01:11:59", "languages": [], @@ -9809,40 +9779,40 @@ "model_architectures": null }, { - "description": "line-corporation/japanese-large-lm-1.7b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-1.7b-instruction-sftのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", - "project_name": "line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", - "downloads": 52, + "description": "GGUF conversion of NTQAI/chatntq-ja-7b-v1.0 ChatNTQ-JA-7b-v1.0 is a Japanese chat fine-tuned model built on top of the stabilityai/japanese-stablelm-base-gamma-7b, which is originally based on Mistral 7B v0.1.", + "url": "https://huggingface.co./TFMC/ChatNTQ-JA-7b-v1.0-GGUF", + "project_name": "ChatNTQ-JA-7b-v1.0-GGUF", + "downloads": 53, "source": "Hugging Face", - "score": -0.09224354514836895, - "first_commit": "2023-09-03 22:30:23", - "latest_commit": "2024-03-24 05:54:56", + "score": -0.09524969884096311, + "first_commit": "2024-04-03 22:42:14", + "latest_commit": "2024-04-04 23:10:54", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 7.24, "model_architectures": null }, { - "description": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", - "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", + "description": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf pfnetさんが公開しているnekomata-14b-pfn-qfin-inst-mergeのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", + "project_name": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", "downloads": 52, "source": "Hugging Face", - "score": -0.09224354514836895, - "first_commit": "2023-09-26 06:16:04", - "latest_commit": "2023-09-27 23:54:44", + "score": -0.09525941687346212, + "first_commit": "2024-04-23 14:53:08", + "latest_commit": "2024-04-24 14:39:32", "languages": [], "model_or_dataset": "model", - "model_size": 0.771, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": 14.2, + "model_architectures": null }, { "description": "bert-base-japanese-v3-bpr-question-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRの質問エンコーダです。 ", "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-question-aio", "project_name": "bert-base-japanese-v3-bpr-question-aio", - "downloads": 51, + "downloads": 52, "source": "Hugging Face", - "score": -0.0922541500989053, + "score": -0.09525941687346212, "first_commit": "2023-06-06 08:21:13", "latest_commit": "2023-07-24 07:12:05", "languages": [], @@ -9851,418 +9821,544 @@ "model_architectures": "BertModel" }, { - "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-instruct - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-instruct StableLM", - "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", - "project_name": "stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", + "description": "This is a Japanese+English sentence-BERT model.", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-en-mean-tokens", + "project_name": "sentence-bert-base-ja-en-mean-tokens", + "downloads": 52, + "source": "Hugging Face", + "score": -0.09525941687346212, + "first_commit": "2022-05-08 03:05:08", + "latest_commit": "2022-05-08 03:29:28", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "BertModel" + }, + { + "description": "sonoisa/t5-base-japaneseをファインチューニングして、タイトル生成に用いれるようにしたモデルです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/t5-CAMERA-title-generation", + "project_name": "t5-CAMERA-title-generation", + "downloads": 51, + "source": "Hugging Face", + "score": -0.09526913490596112, + "first_commit": "2023-03-21 10:49:27", + "latest_commit": "2023-07-21 14:11:13", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.223, + "model_architectures": "T5ForConditionalGeneration" + }, + { + "description": "bart-base-japanese-news(base-sized model)", + "url": "https://huggingface.co./stockmark/bart-base-japanese-news", + "project_name": "bart-base-japanese-news", + "downloads": 51, + "source": "Hugging Face", + "score": -0.09526913490596112, + "first_commit": "2023-01-20 04:23:07", + "latest_commit": "2023-12-08 03:39:50", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.125, + "model_architectures": "BartForConditionalGeneration" + }, + { + "description": "COMET-T5 ja Finetuned T5 on ATOMIC ja using a text-to-text language modeling objective.", + "url": "https://huggingface.co./nlp-waseda/comet-t5-base-japanese", + "project_name": "comet-t5-base-japanese", + "downloads": 51, + "source": "Hugging Face", + "score": -0.09526913490596112, + "first_commit": "2022-11-12 15:07:40", + "latest_commit": "2023-02-08 09:26:55", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "T5ForConditionalGeneration" + }, + { + "description": "Japanese-TextGen-Kage-v0.1-2x7B Kage is \"影\" in Japanese or \"Shadow\" in English.", + "url": "https://huggingface.co./dddump/Japanese-TextGen-Kage-v0.1-2x7B-gguf", + "project_name": "Japanese-TextGen-Kage-v0.1-2x7B-gguf", "downloads": 50, "source": "Hugging Face", - "score": -0.09226475504944162, - "first_commit": "2023-11-16 10:25:20", - "latest_commit": "2023-11-16 12:53:33", + "score": -0.09527885293846013, + "first_commit": "2024-05-04 07:03:38", + "latest_commit": "2024-05-19 08:54:19", "languages": [], "model_or_dataset": "model", - "model_size": 2.8, + "model_size": 12.9, "model_architectures": null }, { - "description": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", - "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "description": "BERT base Japanese model This repository contains a BERT base model trained on Japanese Wikipedia dataset.", + "url": "https://huggingface.co./colorfulscoop/bert-base-ja", + "project_name": "bert-base-ja", "downloads": 50, "source": "Hugging Face", - "score": -0.09226475504944162, - "first_commit": "2023-09-26 06:15:16", - "latest_commit": "2023-09-29 03:19:23", + "score": -0.09527885293846013, + "first_commit": "2021-07-30 10:11:35", + "latest_commit": "2021-09-23 15:46:05", "languages": [], "model_or_dataset": "model", - "model_size": 0.446, - "model_architectures": "GPT2LMHeadModel" + "model_size": null, + "model_architectures": "BertForPreTraining" }, { - "description": "bert-base-japanese-jsnli This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on the JSNLI dataset.", - "url": "https://huggingface.co./Formzu/bert-base-japanese-jsnli", - "project_name": "bert-base-japanese-jsnli", - "downloads": 49, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-base-1_6b", + "project_name": "japanese-stablelm-2-base-1_6b", + "downloads": 50, "source": "Hugging Face", - "score": -0.09227535999997796, - "first_commit": "2022-10-14 07:50:13", - "latest_commit": "2022-10-18 12:13:20", + "score": -0.09527885293846013, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_size": 1.64, + "model_architectures": null }, { - "description": "electra-base-cyberbullying This is an ELECTRA Base model for the Japanese language finetuned for automatic cyberbullying detection.", - "url": "https://huggingface.co./kit-nlp/transformers-ud-japanese-electra-base-discriminator-cyberbullying", - "project_name": "transformers-ud-japanese-electra-base-discriminator-cyberbullying", - "downloads": 49, + "description": "BERT base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", + "url": "https://huggingface.co./SkelterLabsInc/bert-base-japanese-jaquad", + "project_name": "bert-base-japanese-jaquad", + "downloads": 48, "source": "Hugging Face", - "score": -0.09227535999997796, - "first_commit": "2022-09-09 04:08:15", - "latest_commit": "2022-11-01 07:18:40", + "score": -0.09529828900345813, + "first_commit": "2022-01-27 08:08:53", + "latest_commit": "2022-02-04 02:39:25", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForSequenceClassification" + "model_architectures": "BertForQuestionAnswering" }, { - "description": "このモデルはluke-japanese-baseをファインチューニングして、MARC-ja(positive or negativeの二値分類)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-marcja", - "project_name": "luke-japanese-base-marcja", + "description": "モデル概要 このモデルは、 sonoisa/sentence-luke-japanese-base-lite をSNS上のコメントに人手で攻撃性評価を行ったデータセットでFine-tuningすることで作成しました。 ", + "url": "https://huggingface.co./TomokiFujihara/luke-japanese-base-lite-offensiveness-estimation", + "project_name": "luke-japanese-base-lite-offensiveness-estimation", "downloads": 48, "source": "Hugging Face", - "score": -0.09228596495051429, - "first_commit": "2023-03-02 03:57:33", - "latest_commit": "2023-07-21 14:10:48", + "score": -0.09529828900345813, + "first_commit": "2023-12-08 03:20:14", + "latest_commit": "2024-03-24 12:35:36", "languages": [], "model_or_dataset": "model", - "model_size": 0.279, - "model_architectures": "LukeForSequenceClassification" + "model_size": 0.133, + "model_architectures": "OffensivenessEstimationModel" }, { - "description": "t5-base-japanese-web-8k (with Byte-fallback, 8K) Description megagonlabs/t5-base-japanese-web-8k is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", - "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web-8k", - "project_name": "t5-base-japanese-web-8k", + "description": "The English document is here. ", + "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft", + "project_name": "Watashiha-Llama-2-13B-Ogiri-sft", "downloads": 48, "source": "Hugging Face", - "score": -0.09228596495051429, - "first_commit": "2021-09-06 10:13:42", - "latest_commit": "2023-07-04 07:05:38", + "score": -0.09529828900345813, + "first_commit": "2024-01-19 06:59:08", + "latest_commit": "2024-03-04 05:24:31", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_size": 13.1, + "model_architectures": "LlamaForCausalLM" }, { - "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese Considering the results of the JNLI evaluation result, we recommend using akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick for natural language inference in Japanese.", - "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli", - "project_name": "bert-base-japanese-v3_nli-jsnli", + "description": "BERT large Japanese (character-level tokenization with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char", + "project_name": "bert-large-japanese-char", "downloads": 48, "source": "Hugging Face", - "score": -0.09228596495051429, - "first_commit": "2024-04-11 05:38:09", - "latest_commit": "2024-04-26 06:27:05", + "score": -0.09529828900345813, + "first_commit": "2021-03-05 06:36:24", + "latest_commit": "2021-09-23 15:45:39", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "Mixtral-8x7B-v0.1-japanese Mixtral-8x7B-v0.1-japaneseはMixtral-8x7B-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", - "url": "https://huggingface.co./abeja/Mixtral-8x7B-v0.1-japanese", - "project_name": "Mixtral-8x7B-v0.1-japanese", - "downloads": 48, + "description": "Fine-tuned XLSR-53 large model for speech diarization in Japanese phone-call 2 speakers diarization model which was fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using phone-call data CallHome.", + "url": "https://huggingface.co./Ivydata/wav2vec2-large-speech-diarization-jp", + "project_name": "wav2vec2-large-speech-diarization-jp", + "downloads": 47, "source": "Hugging Face", - "score": -0.09228596495051429, - "first_commit": "2024-04-16 03:06:14", - "latest_commit": "2024-04-20 09:14:10", + "score": -0.09530800703595713, + "first_commit": "2023-05-08 10:10:43", + "latest_commit": "2023-05-10 00:32:23", "languages": [], "model_or_dataset": "model", - "model_size": 46.9, - "model_architectures": "MixtralForCausalLM" + "model_size": null, + "model_architectures": "Wav2Vec2ForAudioFrameClassification" }, { - "description": "実験モデルです。", - "url": "https://huggingface.co./Akimite/Qwen2-7b-Instruct-Boku-v2", - "project_name": "Qwen2-7b-Instruct-Boku-v2", + "description": "Original Model Optical character recognition for Japanese text, with the main focus being Japanese manga.", + "url": "https://huggingface.co./TareHimself/manga-ocr-base", + "project_name": "manga-ocr-base", "downloads": 47, "source": "Hugging Face", - "score": -0.09229656990105062, - "first_commit": "2024-06-13 03:23:05", - "latest_commit": "2024-06-15 14:58:10", + "score": -0.09530800703595713, + "first_commit": "2023-09-14 04:15:52", + "latest_commit": "2024-06-03 05:10:11", "languages": [], "model_or_dataset": "model", - "model_size": 7.62, - "model_architectures": "Qwen2ForCausalLM" + "model_size": 0.111, + "model_architectures": "VisionEncoderDecoderModel" }, { - "description": "COMET-T5 ja Finetuned T5 on ATOMIC ja using a text-to-text language modeling objective.", - "url": "https://huggingface.co./nlp-waseda/comet-t5-base-japanese", - "project_name": "comet-t5-base-japanese", + "description": "ArrowPro-7B-KillerWhale-gguf DataPilotさんが公開しているArrowPro-7B-KillerWhaleのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ArrowPro-7B-KillerWhale-gguf", + "project_name": "ArrowPro-7B-KillerWhale-gguf", + "downloads": 47, + "source": "Hugging Face", + "score": -0.09530800703595713, + "first_commit": "2024-05-29 15:06:55", + "latest_commit": "2024-05-29 15:53:17", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24, + "model_architectures": null + }, + { + "description": "yacis-electra-small-cyberbullying", + "url": "https://huggingface.co./ptaszynski/yacis-electra-small-japanese-cyberbullying", + "project_name": "yacis-electra-small-japanese-cyberbullying", "downloads": 46, "source": "Hugging Face", - "score": -0.09230717485158696, - "first_commit": "2022-11-12 15:07:40", - "latest_commit": "2023-02-08 09:26:55", + "score": -0.09531772506845614, + "first_commit": "2022-01-12 03:57:13", + "latest_commit": "2022-01-16 13:51:28", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": "ElectraForSequenceClassification" }, { - "description": "Oumuamua-7b-instruct This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct", - "project_name": "Oumuamua-7b-instruct", + "description": "bert-large-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-luw-upos", + "project_name": "bert-large-japanese-luw-upos", "downloads": 46, "source": "Hugging Face", - "score": -0.09230717485158696, - "first_commit": "2024-06-01 10:40:37", - "latest_commit": "2024-06-01 15:55:51", + "score": -0.09531772506845614, + "first_commit": "2021-10-26 13:54:17", + "latest_commit": "2022-09-18 19:43:45", "languages": [], "model_or_dataset": "model", - "model_size": 7.33, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": "BertForTokenClassification" }, { - "description": "Overview of bert-japanese-12M The bert-japanese-12M model is a transformer-based model with BERT architecture, which is designed to be used on Japanese text.", - "url": "https://huggingface.co./nptdat/bert-japanese-12M", - "project_name": "bert-japanese-12M", - "downloads": 46, + "description": "alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000", + "project_name": "jmedroberta-base-manbyo-wordpiece-vocab50000", + "downloads": 45, "source": "Hugging Face", - "score": -0.09230717485158696, - "first_commit": "2024-08-16 16:46:49", - "latest_commit": "2024-08-19 02:56:14", + "score": -0.09532744310095513, + "first_commit": "2022-12-22 17:19:15", + "latest_commit": "2023-03-08 01:47:12", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "BertForMaskedLM" }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-small", - "project_name": "ruri-reranker-small", + "description": "COMET-GPT2 ja Finetuned GPT-2 on ATOMIC ja using a causal language modeling (CLM) objective.", + "url": "https://huggingface.co./nlp-waseda/comet-gpt2-small-japanese", + "project_name": "comet-gpt2-small-japanese", "downloads": 45, "source": "Hugging Face", - "score": -0.09231777980212329, - "first_commit": "2024-08-19 12:39:07", - "latest_commit": "2024-09-04 08:50:32", + "score": -0.09532744310095513, + "first_commit": "2022-11-15 05:14:35", + "latest_commit": "2023-02-13 10:26:12", "languages": [], "model_or_dataset": "model", - "model_size": 0.06870000000000001, - "model_architectures": "DistilBertForSequenceClassification" + "model_size": null, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k elyzaさんが公開しているELYZA-japanese-CodeLlama-7b-instructを 日本語のキャリブレーションセットで生成したGPTQモデルになります。", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", - "downloads": 45, + "description": "Llama-3-8B-Instruct-JP-nk2t-v0.2 Model Details: Built with Meta Llama 3", + "url": "https://huggingface.co./nk2t/Llama-3-8B-Instruct-japanese-nk2t-v0.2", + "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.2", + "downloads": 44, "source": "Hugging Face", - "score": -0.09231777980212329, - "first_commit": "2023-11-15 16:33:25", - "latest_commit": "2023-11-16 14:28:39", + "score": -0.09533716113345414, + "first_commit": "2024-05-04 04:16:35", + "latest_commit": "2024-05-15 12:56:34", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 8.03, "model_architectures": "LlamaForCausalLM" }, { - "description": "bert-base-japanese-v3-jcommonsenseqa 「大規模言語モデル入門」の第5章で紹介している(多肢選択式質問応答)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jcommonsenseqa", - "project_name": "bert-base-japanese-v3-jcommonsenseqa", - "downloads": 45, + "description": "luke-large-defamation-detection-japanese 日本語誹謗中傷検出器", + "url": "https://huggingface.co./kubota/luke-large-defamation-detection-japanese", + "project_name": "luke-large-defamation-detection-japanese", + "downloads": 44, "source": "Hugging Face", - "score": -0.09231777980212329, - "first_commit": "2023-06-20 07:01:53", - "latest_commit": "2023-07-24 06:49:16", + "score": -0.09533716113345414, + "first_commit": "2023-01-23 06:25:08", + "latest_commit": "2023-02-07 15:49:33", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMultipleChoice" + "model_architectures": "LukeForSequenceClassification" }, { - "description": "[Under Construction]", - "url": "https://huggingface.co./datasets/bclavie/mmarco-japanese-hard-negatives", - "project_name": "mmarco-japanese-hard-negatives", - "downloads": 45, + "description": "AIO with extended answers AIO (AI王) is a Japanese quiz dataset.", + "url": "https://huggingface.co./datasets/sbintuitions/aio-extended-answers", + "project_name": "aio-extended-answers", + "downloads": 44, "source": "Hugging Face", - "score": -0.09231777980212329, - "first_commit": "2023-12-24 13:04:27", - "latest_commit": "2023-12-24 18:52:04", + "score": -0.09533716113345414, + "first_commit": "2024-06-21 08:15:23", + "latest_commit": "2024-07-29 08:26:02", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "cyberagent/calm2-7b-chatの出力を人手でチェック・修正することで作成した日本語Instructionデータセットです。 ", - "url": "https://huggingface.co./datasets/Kendamarron/jimba-instuction-1k-beta", - "project_name": "jimba-instuction-1k-beta", - "downloads": 45, + "description": "Japanese-LLaMA-2-13B-GGUF Japanese-LLaMA-2-13B-GGUFはJapanese-LLaMA-2-13BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-llama-2-13b-gguf", + "project_name": "japanese-llama-2-13b-gguf", + "downloads": 43, "source": "Hugging Face", - "score": -0.09231777980212329, - "first_commit": "2024-02-29 15:23:48", - "latest_commit": "2024-04-25 12:49:28", + "score": -0.09534687916595314, + "first_commit": "2023-12-20 05:37:09", + "latest_commit": "2023-12-26 11:45:15", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 13.3, "model_architectures": null }, { - "description": "deberta-large-japanese-wikipedia-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-luw-upos", - "project_name": "deberta-large-japanese-wikipedia-luw-upos", - "downloads": 44, + "description": "Introduction Who am I: Qishen Ha", + "url": "https://huggingface.co./haqishen/Llama-3-8B-Japanese-Instruct", + "project_name": "Llama-3-8B-Japanese-Instruct", + "downloads": 43, "source": "Hugging Face", - "score": -0.09232838475265963, - "first_commit": "2022-07-06 03:15:12", - "latest_commit": "2024-08-20 17:54:58", + "score": -0.09534687916595314, + "first_commit": "2024-04-23 04:41:19", + "latest_commit": "2024-05-02 03:36:10", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "project_name": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "downloads": 44, + "description": "karasu-lora-jp-qa-chat karasu fine tuned model by lora method with the original Q&A dataset.", + "url": "https://huggingface.co./aipib/karasu-lora-jp-qa-chat", + "project_name": "karasu-lora-jp-qa-chat", + "downloads": 43, + "source": "Hugging Face", + "score": -0.09534687916595314, + "first_commit": "2024-04-24 02:26:58", + "latest_commit": "2024-06-03 01:02:33", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.1, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "Deepreneur-blue-lizard-gguf Deepreneurさんが公開しているblue-lizardのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Deepreneur-blue-lizard-gguf", + "project_name": "Deepreneur-blue-lizard-gguf", + "downloads": 43, + "source": "Hugging Face", + "score": -0.09534687916595314, + "first_commit": "2024-02-13 15:18:15", + "latest_commit": "2024-02-13 16:26:26", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.74, + "model_architectures": null + }, + { + "description": "更新履歴 2023年5月7日 「oasst1-89k-ja」データセットを追加して対話システムに対応しました。", + "url": "https://huggingface.co./inu-ai/dolly-japanese-gpt-1b", + "project_name": "dolly-japanese-gpt-1b", + "downloads": 42, "source": "Hugging Face", - "score": -0.09232838475265963, - "first_commit": "2023-09-26 06:15:31", - "latest_commit": "2023-09-29 03:09:03", + "score": -0.09535659719845214, + "first_commit": "2023-04-13 22:46:07", + "latest_commit": "2023-08-01 07:55:27", "languages": [], "model_or_dataset": "model", - "model_size": 0.625, + "model_size": 1.33, "model_architectures": "GPT2LMHeadModel" }, { - "description": "オリジナルのサイトと同じものを使用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/ja-vicuna-qa-benchmark", - "project_name": "ja-vicuna-qa-benchmark", - "downloads": 44, + "description": "roberta-base-japanese-char-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-char-luw-upos", + "project_name": "roberta-base-japanese-char-luw-upos", + "downloads": 42, "source": "Hugging Face", - "score": -0.09232838475265963, - "first_commit": "2024-06-25 22:14:55", - "latest_commit": "2024-08-31 12:37:25", + "score": -0.09535659719845214, + "first_commit": "2021-12-28 05:01:56", + "latest_commit": "2024-08-20 18:21:15", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, + "model_architectures": "RobertaForTokenClassification" + }, + { + "description": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1 上記のモデルを訓練し、アダルト用語を認識できるようにしたものです。", + "url": "https://huggingface.co./swdq/Visual-novel-whisper", + "project_name": "Visual-novel-whisper", + "downloads": 42, + "source": "Hugging Face", + "score": -0.09535659719845214, + "first_commit": "2024-07-24 10:09:29", + "latest_commit": "2024-07-24 10:29:47", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.756, + "model_architectures": "WhisperForConditionalGeneration" + }, + { + "description": "Ninja-v1-RP-GGUF 概要 Aratako/Ninja-v1-RPの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-GGUF", + "project_name": "Ninja-v1-RP-GGUF", + "downloads": 41, + "source": "Hugging Face", + "score": -0.09536631523095114, + "first_commit": "2024-05-20 17:08:50", + "latest_commit": "2024-05-24 15:11:08", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24, "model_architectures": null }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-base", - "project_name": "ruri-reranker-base", - "downloads": 43, + "description": "rinna/nekomata-7b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-7b-instruction.", + "url": "https://huggingface.co./rinna/nekomata-7b-instruction-gguf", + "project_name": "nekomata-7b-instruction-gguf", + "downloads": 41, "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2024-08-20 01:10:40", - "latest_commit": "2024-09-04 08:50:21", + "score": -0.09536631523095114, + "first_commit": "2023-12-19 08:11:08", + "latest_commit": "2024-07-20 08:38:34", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": 7.72, + "model_architectures": null }, { - "description": "bert-japanese_finetuned-sentiment-analysis This model was trained from scratch on the Japanese Sentiment Polarity Dictionary dataset.", - "url": "https://huggingface.co./minutillamolinara/bert-japanese_finetuned-sentiment-analysis", - "project_name": "bert-japanese_finetuned-sentiment-analysis", - "downloads": 43, + "description": "bert-base-japanese-jsnli This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on the JSNLI dataset.", + "url": "https://huggingface.co./Formzu/bert-base-japanese-jsnli", + "project_name": "bert-base-japanese-jsnli", + "downloads": 40, "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2023-03-31 02:28:09", - "latest_commit": "2023-03-31 13:13:37", + "score": -0.09537603326345015, + "first_commit": "2022-10-14 07:50:13", + "latest_commit": "2022-10-18 12:13:20", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "BertForSequenceClassification" }, { - "description": "whisper-large-v2-mix-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-mix-jp to the CTranslate2 model format.", - "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-mix-jp", - "project_name": "faster-whisper-large-v2-mix-jp", - "downloads": 43, + "description": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf lightblueさんが公開しているKarasu-Mixtral-8x22B-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/lightblue-Karasu-Mixtral-8x22B-v0.1-gguf", + "project_name": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf", + "downloads": 40, "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2023-07-07 05:53:52", - "latest_commit": "2023-07-07 17:56:03", + "score": -0.09537603326345015, + "first_commit": "2024-05-07 12:53:56", + "latest_commit": "2024-05-07 18:07:43", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 141.0, "model_architectures": null }, { - "description": "Model trained on 800,000 Japanese sentences after reducing oshizo/japanese-e5-mistral-7b_slerp to 8 layers.", - "url": "https://huggingface.co./oshizo/japanese-e5-mistral-1.9b", - "project_name": "japanese-e5-mistral-1.9b", - "downloads": 43, + "description": "studio-ousia/luke-japanese-baseに対して次の変更を加えたモデルです。 ", + "url": "https://huggingface.co./uzabase/luke-japanese-wordpiece-base", + "project_name": "luke-japanese-wordpiece-base", + "downloads": 40, "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2024-02-02 12:39:11", - "latest_commit": "2024-02-03 00:28:28", + "score": -0.09537603326345015, + "first_commit": "2023-08-10 06:04:58", + "latest_commit": "2023-11-28 13:35:07", "languages": [], "model_or_dataset": "model", - "model_size": 1.88, - "model_architectures": "MistralForEmbedding" + "model_size": null, + "model_architectures": "LukeForMaskedLM" }, { - "description": "alpaca-guanaco-japanese-gpt-1b 1.3Bパラメータの日本語GPTモデルを使用した対話AIです。", - "url": "https://huggingface.co./inu-ai/alpaca-guanaco-japanese-gpt-1b", - "project_name": "alpaca-guanaco-japanese-gpt-1b", - "downloads": 43, + "description": "bert-japanese-ner このモデルは日本語の固有表現抽出タスクを目的として、京都大学 黒橋・褚・村脇研究室が公開しているBERT日本語Pretrainedモデルをベースにストックマーク株式会社が公開しているner-wikipedia-datasetでファインチューニングしたものです。 ", + "url": "https://huggingface.co./ken11/bert-japanese-ner", + "project_name": "bert-japanese-ner", + "downloads": 40, "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2023-04-12 00:18:29", - "latest_commit": "2023-04-13 10:25:48", + "score": -0.09537603326345015, + "first_commit": "2021-11-13 16:28:23", + "latest_commit": "2021-11-14 02:34:01", "languages": [], "model_or_dataset": "model", - "model_size": 1.33, - "model_architectures": "GPT2LMHeadModel" + "model_size": null, + "model_architectures": "BertForTokenClassification" }, { - "description": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", - "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", - "downloads": 43, - "source": "Hugging Face", - "score": -0.09233898970319596, - "first_commit": "2023-09-26 06:15:51", - "latest_commit": "2023-09-27 23:56:05", + "description": "Summary This is a text classifier for assigning a JLPT level.", + "url": "https://huggingface.co./bennexx/cl-tohoku-bert-base-japanese-v3-jlpt-classifier", + "project_name": "cl-tohoku-bert-base-japanese-v3-jlpt-classifier", + "downloads": 40, + "source": "Hugging Face", + "score": -0.09537603326345015, + "first_commit": "2024-01-19 00:32:15", + "latest_commit": "2024-07-10 13:41:08", "languages": [], "model_or_dataset": "model", - "model_size": 0.861, - "model_architectures": "GPTNeoXForCausalLM" + "model_size": null, + "model_architectures": "BertForSequenceClassification" }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-GGUF", - "project_name": "SambaLingo-Japanese-Chat-GGUF", - "downloads": 42, + "description": "以下のデータ源からランダムに抽出した日本語のテキストをもとに、RAG形式のQ&Aを自動生成したものです。 Wikibooks Wikipedia 判例データ instruction datasetとしてではなく、事前学習での利用を想定しています(質疑応答をするための訓練)。 一部の計算には東京工業大学のスーパーコンピュータTSUBAME4.0を利用しました。", + "url": "https://huggingface.co./datasets/kanhatakeyama/CreativeCommons-RAG-QA-Mixtral8x22b", + "project_name": "CreativeCommons-RAG-QA-Mixtral8x22b", + "downloads": 40, "source": "Hugging Face", - "score": -0.0923495946537323, - "first_commit": "2024-03-07 06:38:01", - "latest_commit": "2024-03-07 06:48:27", + "score": -0.09537603326345015, + "first_commit": "2024-07-03 07:54:49", + "latest_commit": "2024-07-12 06:43:18", "languages": [], - "model_or_dataset": "model", - "model_size": 6.95, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "Japanese to Korean translator Japanese to Korean translator model based on EncoderDecoderModel(bert-japanese+kogpt2)", - "url": "https://huggingface.co./sappho192/aihub-ja-ko-translator", - "project_name": "aihub-ja-ko-translator", - "downloads": 42, + "description": "deberta-large-japanese-wikipedia-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-luw-upos", + "project_name": "deberta-large-japanese-wikipedia-luw-upos", + "downloads": 39, "source": "Hugging Face", - "score": -0.0923495946537323, - "first_commit": "2024-02-05 00:51:47", - "latest_commit": "2024-06-28 06:38:39", + "score": -0.09538575129594914, + "first_commit": "2022-07-06 03:15:12", + "latest_commit": "2024-08-20 17:54:58", "languages": [], "model_or_dataset": "model", - "model_size": 0.265, - "model_architectures": "EncoderDecoderModel" + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "rinna/japanese-data2vec-audio-base Overview This is a Japanese data2vec Audio Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-data2vec-audio-base", - "project_name": "japanese-data2vec-audio-base", - "downloads": 41, + "description": "This is a model for named entity recognition of Japanese medical documents.", + "url": "https://huggingface.co./sociocom/MedNERN-CR-JA", + "project_name": "MedNERN-CR-JA", + "downloads": 39, "source": "Hugging Face", - "score": -0.09236019960426863, - "first_commit": "2024-03-05 10:32:32", - "latest_commit": "2024-07-22 08:12:56", + "score": -0.09538575129594914, + "first_commit": "2023-04-13 08:25:56", + "latest_commit": "2024-02-26 13:53:06", "languages": [], "model_or_dataset": "model", - "model_size": 0.0932, - "model_architectures": "Data2VecAudioModel" + "model_size": 0.11, + "model_architectures": "BertForTokenClassification" }, { "description": "nlp-waseda/bigbird-base-japanese Model description This is a Japanese BigBird base model pretrained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", "url": "https://huggingface.co./nlp-waseda/bigbird-base-japanese", "project_name": "bigbird-base-japanese", - "downloads": 41, + "downloads": 39, "source": "Hugging Face", - "score": -0.09236019960426863, + "score": -0.09538575129594914, "first_commit": "2023-06-03 12:51:12", "latest_commit": "2023-06-20 10:49:17", "languages": [], @@ -10271,70 +10367,98 @@ "model_architectures": "BigBirdForMaskedLM" }, { - "description": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "downloads": 41, + "description": "umiyuki様の Japanese-Chat-Umievo-itr004-7b をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Japanese-Chat-Umievo-itr004-7b-GGUF", + "project_name": "Japanese-Chat-Umievo-itr004-7b-GGUF", + "downloads": 39, "source": "Hugging Face", - "score": -0.09236019960426863, - "first_commit": "2023-09-26 06:14:25", - "latest_commit": "2023-09-27 01:23:34", + "score": -0.09538575129594914, + "first_commit": "2024-05-13 16:28:41", + "latest_commit": "2024-05-13 23:33:49", "languages": [], "model_or_dataset": "model", - "model_size": 0.487, - "model_architectures": "GPT2LMHeadModel" + "model_size": 7.24, + "model_architectures": null }, { - "description": "Wikipedia日本語版データセット(izumi-lab/wikipedia-ja-20230720)", - "url": "https://huggingface.co./datasets/shi3z/Japanese_Wikipedia_Conversation", - "project_name": "Japanese_Wikipedia_Conversation", - "downloads": 41, + "description": "Model Card for Model ID", + "url": "https://huggingface.co./Respair/Japanese_Phoneme_to_Grapheme_LLM", + "project_name": "Japanese_Phoneme_to_Grapheme_LLM", + "downloads": 39, "source": "Hugging Face", - "score": -0.09236019960426863, - "first_commit": "2023-11-10 07:36:40", - "latest_commit": "2023-11-10 22:46:29", + "score": -0.09538575129594914, + "first_commit": "2024-09-06 23:01:09", + "latest_commit": "2024-09-09 23:16:12", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 1.54, + "model_architectures": "Qwen2Model" }, { - "description": "つくよみちゃんデータセットを用いて calm-2-7b-chat をファインチューニングしたモデルです。", - "url": "https://huggingface.co./offtoung/tsukuyomi-chan-calm2-7b", - "project_name": "tsukuyomi-chan-calm2-7b", + "description": "TinySlime-1.1B-Chat-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", + "url": "https://huggingface.co./2121-8/TinySlime-1.1B-Chat-v1.0", + "project_name": "TinySlime-1.1B-Chat-v1.0", "downloads": 39, "source": "Hugging Face", - "score": -0.0923814095053413, - "first_commit": "2023-12-21 08:46:37", - "latest_commit": "2023-12-27 04:07:20", + "score": -0.09538575129594914, + "first_commit": "2024-07-02 03:34:30", + "latest_commit": "2024-07-02 08:53:11", "languages": [], "model_or_dataset": "model", - "model_size": 7.01, + "model_size": 1.1, "model_architectures": "LlamaForCausalLM" }, { - "description": "rinna/nekomata-7b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-7b-instruction.", - "url": "https://huggingface.co./rinna/nekomata-7b-instruction-gguf", - "project_name": "nekomata-7b-instruction-gguf", - "downloads": 39, + "description": "line-corporation/japanese-large-lm-1.7b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-1.7b-instruction-sftのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", + "project_name": "line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", + "downloads": 38, "source": "Hugging Face", - "score": -0.0923814095053413, - "first_commit": "2023-12-19 08:11:08", - "latest_commit": "2024-07-20 08:38:34", + "score": -0.09539546932844815, + "first_commit": "2023-09-03 22:30:23", + "latest_commit": "2024-03-24 05:54:56", "languages": [], "model_or_dataset": "model", - "model_size": 7.72, + "model_size": null, "model_architectures": null }, { - "description": "Dataset Details Dataset Type:Japanese LLaVA v1.5", - "url": "https://huggingface.co./datasets/turing-motors/LLaVA-v1.5-Instruct-620K-JA", - "project_name": "LLaVA-v1.5-Instruct-620K-JA", - "downloads": 39, + "description": "Introduction Who am I: Qishen Ha", + "url": "https://huggingface.co./haqishen/h2o-Llama-3-8B-Japanese-Instruct", + "project_name": "h2o-Llama-3-8B-Japanese-Instruct", + "downloads": 38, "source": "Hugging Face", - "score": -0.0923814095053413, - "first_commit": "2024-04-10 05:04:58", - "latest_commit": "2024-04-12 09:18:42", + "score": -0.09539546932844815, + "first_commit": "2024-04-24 07:48:45", + "latest_commit": "2024-06-24 08:57:49", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "roberta-large-japanese-aozora-char Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora-char", + "project_name": "roberta-large-japanese-aozora-char", + "downloads": 38, + "source": "Hugging Face", + "score": -0.09539546932844815, + "first_commit": "2021-12-30 14:19:53", + "latest_commit": "2022-06-22 10:22:43", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "RobertaForMaskedLM" + }, + { + "description": "JSNLI Version 1.1 のデータセットのうち、フィルタリング後の訓練セット (train_w_filtering)", + "url": "https://huggingface.co./datasets/llm-book/jsnli", + "project_name": "jsnli", + "downloads": 38, + "source": "Hugging Face", + "score": -0.09539546932844815, + "first_commit": "2023-06-19 12:31:46", + "latest_commit": "2023-10-25 15:22:46", "languages": [], "model_or_dataset": "dataset", "model_size": null, @@ -10344,9 +10468,9 @@ "description": "This dataset is the data that corrected the translation errors and untranslated data of the Japanese data in MBZUAI/multilingual-llava-bench-in-the-wild.", "url": "https://huggingface.co./datasets/toshi456/llava-bench-in-the-wild-ja", "project_name": "llava-bench-in-the-wild-ja", - "downloads": 39, + "downloads": 38, "source": "Hugging Face", - "score": -0.0923814095053413, + "score": -0.09539546932844815, "first_commit": "2024-03-06 21:56:53", "latest_commit": "2024-04-01 15:15:57", "languages": [], @@ -10355,550 +10479,634 @@ "model_architectures": null }, { - "description": "Sarashina2-7B Instruct sarashina2-7Bを会話できるようにフルファインチューニングしたものです。", - "url": "https://huggingface.co./alfredplpl/sarashina2-7b-it", - "project_name": "sarashina2-7b-it", + "description": "cyberagent/calm2-7b-chatの出力を人手でチェック・修正することで作成した日本語Instructionデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/jimba-instuction-1k-beta", + "project_name": "jimba-instuction-1k-beta", "downloads": 38, "source": "Hugging Face", - "score": -0.09239201445587764, - "first_commit": "2024-06-12 02:24:28", - "latest_commit": "2024-06-12 03:00:35", + "score": -0.09539546932844815, + "first_commit": "2024-02-29 15:23:48", + "latest_commit": "2024-04-25 12:49:28", "languages": [], - "model_or_dataset": "model", - "model_size": 7.32, - "model_architectures": "LlamaForCausalLM" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "zenz-v1 zenz-v1はGPT-2アーキテクチャに基づくかな漢字変換タスクに特化した言語モデルです。", - "url": "https://huggingface.co./Miwa-Keita/zenz-v1", - "project_name": "zenz-v1", + "description": "Dataset overview This dataset identifies whether a GitHub repository description pertains to Japanese natural language processing (NLP).", + "url": "https://huggingface.co./datasets/taishi-i/awesome-japanese-nlp-classification-dataset", + "project_name": "awesome-japanese-nlp-classification-dataset", "downloads": 38, "source": "Hugging Face", - "score": -0.09239201445587764, - "first_commit": "2024-05-12 15:48:46", - "latest_commit": "2024-05-13 16:34:02", + "score": -0.09539546932844815, + "first_commit": "2023-09-09 06:37:36", + "latest_commit": "2023-09-09 20:09:04", "languages": [], - "model_or_dataset": "model", - "model_size": 0.09509999999999999, - "model_architectures": "GPT2LMHeadModel" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "One more step before getting this model.", - "url": "https://huggingface.co./rinna/japanese-stable-diffusion", - "project_name": "japanese-stable-diffusion", - "downloads": 38, + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-large", + "project_name": "ruri-reranker-stage1-large", + "downloads": 37, "source": "Hugging Face", - "score": -0.09239201445587764, - "first_commit": null, - "latest_commit": null, + "score": -0.09540518736094715, + "first_commit": "2024-08-19 23:48:54", + "latest_commit": "2024-09-04 08:54:05", "languages": [], "model_or_dataset": "model", + "model_size": 0.337, + "model_architectures": "BertForSequenceClassification" + }, + { + "description": "Dataset Summary JMultiWOZ is a large-scale Japanese multi-domain task-oriented dialogue dataset.", + "url": "https://huggingface.co./datasets/nu-dialogue/jmultiwoz", + "project_name": "jmultiwoz", + "downloads": 37, + "source": "Hugging Face", + "score": -0.09540518736094715, + "first_commit": "2024-02-29 15:38:29", + "latest_commit": "2024-03-13 02:15:37", + "languages": [], + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "以下のデータ源からランダムに抽出した日本語のテキストをもとに、RAG形式のQ&Aを自動生成したものです。 Wikibooks Wikipedia 判例データ instruction datasetとしてではなく、事前学習での利用を想定しています(質疑応答をするための訓練)。 一部の計算には東京工業大学のスーパーコンピュータTSUBAME4.0を利用しました。", - "url": "https://huggingface.co./datasets/kanhatakeyama/CreativeCommons-RAG-QA-Mixtral8x22b", - "project_name": "CreativeCommons-RAG-QA-Mixtral8x22b", - "downloads": 38, + "description": "Dataset Summary SNOW T15:The simplified corpus for the Japanese language.", + "url": "https://huggingface.co./datasets/SNOW-NLP/snow_simplified_japanese_corpus", + "project_name": "snow_simplified_japanese_corpus", + "downloads": 37, "source": "Hugging Face", - "score": -0.09239201445587764, - "first_commit": "2024-07-03 07:54:49", - "latest_commit": "2024-07-12 06:43:18", + "score": -0.09540518736094715, + "first_commit": "2022-01-25 16:36:23", + "latest_commit": "2024-01-18 11:16:01", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "ELECTRA base Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-base-japanese-discriminator", - "project_name": "electra-base-japanese-discriminator", - "downloads": 37, + "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59363 Validation Metrics Loss: 0.12651239335536957 Accuracy: 0.9532079853817648 Precision: 0.9729688278823665 Recall: 0.9744633462616643 AUC: 0.9717333684823413 F1: 0.9737155136027014 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", + "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59363", + "project_name": "autonlp-japanese-sentiment-59363", + "downloads": 36, "source": "Hugging Face", - "score": -0.09240261940641396, - "first_commit": "2021-11-15 17:39:41", - "latest_commit": "2022-12-09 00:43:19", + "score": -0.09541490539344616, + "first_commit": "2021-04-21 11:28:24", + "latest_commit": "2021-05-18 22:56:15", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForPreTraining" + "model_architectures": "BertForSequenceClassification" + }, + { + "description": "ltgbert-base-japanese-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/ltgbert-base-japanese-ud-goeswith", + "project_name": "ltgbert-base-japanese-ud-goeswith", + "downloads": 36, + "source": "Hugging Face", + "score": -0.09541490539344616, + "first_commit": "2024-09-13 16:29:53", + "latest_commit": "2024-09-14 07:34:21", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LtgbertForTokenClassification" + }, + { + "description": "nlp-waseda/gpt2-small-japanese This model is Japanese GPT-2 pretrained on Japanese Wikipedia and CC-100.", + "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese", + "project_name": "gpt2-small-japanese", + "downloads": 35, + "source": "Hugging Face", + "score": -0.09542462342594515, + "first_commit": "2022-03-30 03:34:11", + "latest_commit": "2022-03-30 04:28:17", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "GPT2LMHeadModel" + }, + { + "description": "deberta-base-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora", + "project_name": "deberta-base-japanese-aozora", + "downloads": 35, + "source": "Hugging Face", + "score": -0.09542462342594515, + "first_commit": "2022-05-24 04:30:28", + "latest_commit": "2023-01-08 08:41:04", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "This is a BERT Base model for emotion analysis in Japanese additionally fine-tuned for emotion detection and classification.", - "url": "https://huggingface.co./alter-wang/bert-base-japanese-emotion-lily", - "project_name": "bert-base-japanese-emotion-lily", - "downloads": 37, + "description": "deberta-large-japanese-wikipedia Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia", + "project_name": "deberta-large-japanese-wikipedia", + "downloads": 35, "source": "Hugging Face", - "score": -0.09240261940641396, - "first_commit": "2024-04-25 06:05:51", - "latest_commit": "2024-06-17 01:44:16", + "score": -0.09542462342594515, + "first_commit": "2022-07-05 22:01:16", + "latest_commit": "2023-02-27 10:15:35", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": null, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "This is a model for named entity recognition of Japanese medical documents.", - "url": "https://huggingface.co./sociocom/MedNERN-CR-JA", - "project_name": "MedNERN-CR-JA", - "downloads": 37, + "description": "deberta-base-japanese-wikipedia-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-ud-goeswith", + "project_name": "deberta-base-japanese-wikipedia-ud-goeswith", + "downloads": 35, "source": "Hugging Face", - "score": -0.09240261940641396, - "first_commit": "2023-04-13 08:25:56", - "latest_commit": "2024-02-26 13:53:06", + "score": -0.09542462342594515, + "first_commit": "2022-09-18 06:02:55", + "latest_commit": "2024-08-20 19:38:50", "languages": [], "model_or_dataset": "model", - "model_size": 0.11, - "model_architectures": "BertForTokenClassification" + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Miwa-Keita/zenz-v1-checkpoints を optimum 用に ONNX に変換したモデルです。", - "url": "https://huggingface.co./p1atdev/zenz-v1-onnx", - "project_name": "zenz-v1-onnx", - "downloads": 36, + "description": "Fine-tuned Japanese Wav2Vec2 model for speech recognition using XLSR-53 large Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using Common Voice, JVS and JSUT.", + "url": "https://huggingface.co./Ivydata/wav2vec2-large-xlsr-53-japanese", + "project_name": "wav2vec2-large-xlsr-53-japanese", + "downloads": 35, "source": "Hugging Face", - "score": -0.09241322435695029, - "first_commit": "2024-06-29 03:03:03", - "latest_commit": "2024-06-29 03:40:34", + "score": -0.09542462342594515, + "first_commit": "2023-05-11 08:47:29", + "latest_commit": "2023-05-12 02:15:39", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "Japanese-LLaMA-2-13B-GGUF Japanese-LLaMA-2-13B-GGUFはJapanese-LLaMA-2-13BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-2-13b-gguf", - "project_name": "japanese-llama-2-13b-gguf", - "downloads": 36, + "description": "Mixtral-8x7B-v0.1-japanese Mixtral-8x7B-v0.1-japaneseはMixtral-8x7B-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-v0.1-japanese", + "project_name": "Mixtral-8x7B-v0.1-japanese", + "downloads": 35, "source": "Hugging Face", - "score": -0.09241322435695029, - "first_commit": "2023-12-20 05:37:09", - "latest_commit": "2023-12-26 11:45:15", + "score": -0.09542462342594515, + "first_commit": "2024-04-16 03:06:14", + "latest_commit": "2024-04-20 09:14:10", "languages": [], "model_or_dataset": "model", - "model_size": 13.3, - "model_architectures": null + "model_size": 46.9, + "model_architectures": "MixtralForCausalLM" }, { - "description": "Dataset Summary SNOW T15:The simplified corpus for the Japanese language.", - "url": "https://huggingface.co./datasets/SNOW-NLP/snow_simplified_japanese_corpus", - "project_name": "snow_simplified_japanese_corpus", - "downloads": 36, + "description": "electra-base-cyberbullying This is an ELECTRA Small model for the Japanese language finetuned for automatic cyberbullying detection.", + "url": "https://huggingface.co./kit-nlp/electra-small-japanese-discriminator-cyberbullying", + "project_name": "electra-small-japanese-discriminator-cyberbullying", + "downloads": 35, "source": "Hugging Face", - "score": -0.09241322435695029, - "first_commit": "2022-01-25 16:36:23", - "latest_commit": "2024-01-18 11:16:01", + "score": -0.09542462342594515, + "first_commit": "2022-09-09 02:43:59", + "latest_commit": "2022-11-01 07:14:15", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "ElectraForSequenceClassification" }, { - "description": "NVIDIA が公開している SteerLM 向けのトライアルデータセット HelpSteerを日本語に自動翻訳したデータセットになります。", - "url": "https://huggingface.co./datasets/kunishou/HelpSteer-35k-ja", - "project_name": "HelpSteer-35k-ja", - "downloads": 36, + "description": "Kurage Kurage is a multipurpose RAG model from Lightblue.", + "url": "https://huggingface.co./lightblue/kurage-ja", + "project_name": "kurage-ja", + "downloads": 34, "source": "Hugging Face", - "score": -0.09241322435695029, - "first_commit": "2024-03-02 16:45:19", - "latest_commit": "2024-03-03 10:10:54", + "score": -0.09543434145844416, + "first_commit": "2024-09-11 03:39:10", + "latest_commit": "2024-09-16 08:12:19", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 7.61, + "model_architectures": "Qwen2ForCausalLM" }, { - "description": "luke-large-defamation-detection-japanese 日本語誹謗中傷検出器", - "url": "https://huggingface.co./kubota/luke-large-defamation-detection-japanese", - "project_name": "luke-large-defamation-detection-japanese", - "downloads": 35, + "description": "bert-base-japanese-v3-jcommonsenseqa 「大規模言語モデル入門」の第5章で紹介している(多肢選択式質問応答)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jcommonsenseqa", + "project_name": "bert-base-japanese-v3-jcommonsenseqa", + "downloads": 34, "source": "Hugging Face", - "score": -0.09242382930748663, - "first_commit": "2023-01-23 06:25:08", - "latest_commit": "2023-02-07 15:49:33", + "score": -0.09543434145844416, + "first_commit": "2023-06-20 07:01:53", + "latest_commit": "2023-07-24 06:49:16", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LukeForSequenceClassification" + "model_architectures": "BertForMultipleChoice" }, { - "description": "This is my conversion of NilanE/ParallelFiction-Ja_En-100k into json which can be read by text-generation-webui when training a model.", - "url": "https://huggingface.co./datasets/mpasila/ParallelFiction-Ja_En-100k-json", - "project_name": "ParallelFiction-Ja_En-100k-json", - "downloads": 35, + "description": "Japanese-LLaMA-2-7B-GGUF Japanese-LLaMA-2-7B-GGUFはJapanese-LLaMA-2-7BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-llama-2-7b-gguf", + "project_name": "japanese-llama-2-7b-gguf", + "downloads": 34, "source": "Hugging Face", - "score": -0.09242382930748663, - "first_commit": "2024-03-26 06:08:27", - "latest_commit": "2024-04-02 04:46:10", + "score": -0.09543434145844416, + "first_commit": "2024-01-22 03:00:02", + "latest_commit": "2024-06-05 02:30:01", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 6.97, "model_architectures": null }, { - "description": "BERT large Japanese (character-level tokenization with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char", - "project_name": "bert-large-japanese-char", + "description": "deberta-large-japanese-unidic Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic", + "project_name": "deberta-large-japanese-unidic", "downloads": 34, "source": "Hugging Face", - "score": -0.09243443425802296, - "first_commit": "2021-03-05 06:36:24", - "latest_commit": "2021-09-23 15:45:39", + "score": -0.09543434145844416, + "first_commit": "2022-06-10 12:49:12", + "latest_commit": "2022-06-19 09:15:35", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "yacis-electra-small-cyberbullying", - "url": "https://huggingface.co./ptaszynski/yacis-electra-small-japanese-cyberbullying", - "project_name": "yacis-electra-small-japanese-cyberbullying", + "description": "roberta-base-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora", + "project_name": "roberta-base-japanese-aozora", "downloads": 34, "source": "Hugging Face", - "score": -0.09243443425802296, - "first_commit": "2022-01-12 03:57:13", - "latest_commit": "2022-01-16 13:51:28", + "score": -0.09543434145844416, + "first_commit": "2021-12-21 00:04:03", + "latest_commit": "2022-10-15 14:20:11", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForSequenceClassification" + "model_architectures": "RobertaForMaskedLM" }, { - "description": "Fine-tuned XLSR-53 large model for speech diarization in Japanese phone-call 2 speakers diarization model which was fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using phone-call data CallHome.", - "url": "https://huggingface.co./Ivydata/wav2vec2-large-speech-diarization-jp", - "project_name": "wav2vec2-large-speech-diarization-jp", + "description": "JP Voice-Text Dataset for", + "url": "https://huggingface.co./datasets/deepghs/fgo_voices_jp", + "project_name": "fgo_voices_jp", "downloads": 34, "source": "Hugging Face", - "score": -0.09243443425802296, - "first_commit": "2023-05-08 10:10:43", - "latest_commit": "2023-05-10 00:32:23", + "score": -0.09543434145844416, + "first_commit": "2024-08-28 08:56:04", + "latest_commit": "2024-08-28 09:14:22", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "Wav2Vec2ForAudioFrameClassification" + "model_architectures": null }, { - "description": "deberta-large-japanese-wikipedia-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-head", - "project_name": "deberta-large-japanese-wikipedia-ud-head", + "description": "alpaca-guanaco-japanese-gpt-1b 1.3Bパラメータの日本語GPTモデルを使用した対話AIです。", + "url": "https://huggingface.co./inu-ai/alpaca-guanaco-japanese-gpt-1b", + "project_name": "alpaca-guanaco-japanese-gpt-1b", "downloads": 33, "source": "Hugging Face", - "score": -0.0924450392085593, - "first_commit": "2022-07-06 03:51:14", - "latest_commit": "2024-08-20 19:51:21", + "score": -0.09544405949094317, + "first_commit": "2023-04-12 00:18:29", + "latest_commit": "2023-04-13 10:25:48", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.33, + "model_architectures": "GPT2LMHeadModel" + }, + { + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-short", + "project_name": "t5-base-short", + "downloads": 33, + "source": "Hugging Face", + "score": -0.09544405949094317, + "first_commit": "2023-04-26 08:20:52", + "latest_commit": "2023-05-10 10:00:23", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Tanuki-8x8B-dpo-v1.0-GPTQ-8bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8x8B-dpo-v1.0のGPTQ 8bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8x8B-dpo-v1.0-GPTQ-8bit", - "project_name": "Tanuki-8x8B-dpo-v1.0-GPTQ-8bit", + "description": "モデル説明 (model explanation) V1 = MoeDiffusion 1.0 + (HassanBlend 1.5 - VMix03) * 0.2 V2 = MoeDiffusion 0.6 : HassanBlend 1.5 0.2 : VMix03 : 0.2 マージ元のルーツにNAIリークやInsta系モデルが含まれるという噂があるので、NAIリークアンチ・Insta系モデルアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", + "url": "https://huggingface.co./ThePioneer/MoeDiffusionPlusPlus", + "project_name": "MoeDiffusionPlusPlus", "downloads": 33, "source": "Hugging Face", - "score": -0.0924450392085593, - "first_commit": "2024-08-28 02:30:27", - "latest_commit": "2024-09-03 09:26:02", + "score": -0.09544405949094317, + "first_commit": "2023-01-19 13:04:02", + "latest_commit": "2023-01-21 02:05:54", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "TanukiForCausalLM" + "model_architectures": null }, { - "description": "deberta-small-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-upos", - "project_name": "deberta-small-japanese-upos", + "description": "bert-large-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-unidic-luw-upos", + "project_name": "bert-large-japanese-unidic-luw-upos", "downloads": 33, "source": "Hugging Face", - "score": -0.0924450392085593, - "first_commit": "2022-05-23 23:55:56", - "latest_commit": "2024-07-26 15:38:41", + "score": -0.09544405949094317, + "first_commit": "2022-02-13 01:00:41", + "latest_commit": "2023-11-05 18:44:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "BertForTokenClassification" }, { - "description": "electra-base-cyberbullying This is a BERT Base model for the Japanese language finetuned for automatic cyberbullying detection.", - "url": "https://huggingface.co./kit-nlp/bert-base-japanese-basic-char-v2-cyberbullying", - "project_name": "bert-base-japanese-basic-char-v2-cyberbullying", + "description": "ELECTRA base Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-base-japanese-discriminator", + "project_name": "electra-base-japanese-discriminator", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2022-09-08 09:09:39", - "latest_commit": "2022-11-01 07:20:52", + "score": -0.09545377752344217, + "first_commit": "2021-11-15 17:39:41", + "latest_commit": "2022-12-09 00:43:19", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "ElectraForPreTraining" }, { - "description": "Yaki-Dofu-Mix 概要 / Overview Yaki-Dofu-Mixは、アニメ風の画風に特化したマージモデルです。 ", - "url": "https://huggingface.co./Vsukiyaki/Yaki-Dofu-Mix", - "project_name": "Yaki-Dofu-Mix", + "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/bert-small-japanese-fin", + "project_name": "bert-small-japanese-fin", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2023-12-23 09:26:20", - "latest_commit": "2023-12-24 11:07:09", + "score": -0.09545377752344217, + "first_commit": "2021-10-04 13:15:37", + "latest_commit": "2022-12-09 00:41:24", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "BertForMaskedLM" }, { - "description": "mathstral-7B-v0.1-gguf mistralaiさんが公開しているmathstral-7B-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/mathstral-7B-v0.1-gguf", - "project_name": "mathstral-7B-v0.1-gguf", + "description": "ku-nlp/roberta-large-japanese-char-wwm Model description This is a Japanese RoBERTa large model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./ku-nlp/roberta-large-japanese-char-wwm", + "project_name": "roberta-large-japanese-char-wwm", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2024-07-17 17:49:56", - "latest_commit": "2024-07-17 18:54:27", + "score": -0.09545377752344217, + "first_commit": "2022-09-18 08:10:44", + "latest_commit": "2023-03-19 01:58:12", "languages": [], "model_or_dataset": "model", - "model_size": 7.25, - "model_architectures": null + "model_size": 0.323, + "model_architectures": "RobertaForMaskedLM" }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-large", - "project_name": "ruri-reranker-stage1-large", + "description": "deberta-v3-base-japanese-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-v3-base-japanese-ud-goeswith", + "project_name": "deberta-v3-base-japanese-ud-goeswith", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2024-08-19 23:48:54", - "latest_commit": "2024-09-04 08:54:05", + "score": -0.09545377752344217, + "first_commit": "2024-05-21 11:42:12", + "latest_commit": "2024-09-12 23:31:12", "languages": [], "model_or_dataset": "model", - "model_size": 0.337, - "model_architectures": "BertForSequenceClassification" + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "The English document is here. ", - "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft", - "project_name": "Watashiha-Llama-2-13B-Ogiri-sft", + "description": "nlp-waseda/gpt2-xl-japanese This is Japanese GPT2 with approximately 1.5B parameters pretrained on Japanese Wikipedia and CC-100", + "url": "https://huggingface.co./nlp-waseda/gpt2-xl-japanese", + "project_name": "gpt2-xl-japanese", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2024-01-19 06:59:08", - "latest_commit": "2024-03-04 05:24:31", + "score": -0.09545377752344217, + "first_commit": "2022-11-30 04:33:31", + "latest_commit": "2023-06-21 04:29:10", "languages": [], "model_or_dataset": "model", - "model_size": 13.1, - "model_architectures": "LlamaForCausalLM" + "model_size": 1.61, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "You agree to the terms of the LICENSE when using this dataset. ", - "url": "https://huggingface.co./datasets/litagin/ehehe-corpus", - "project_name": "ehehe-corpus", + "description": "deberta-large-japanese-unidic-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-ud-head", + "project_name": "deberta-large-japanese-unidic-ud-head", "downloads": 32, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": null, - "latest_commit": null, + "score": -0.09545377752344217, + "first_commit": "2022-06-19 00:10:56", + "latest_commit": "2023-11-05 17:51:08", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "Wikipediaを用いた日本語の固有表現抽出データセット GitHub: https://github.com/stockmarkteam/ner-wikipedia-dataset/ LICENSE: CC-BY-SA 3.0 Developed by Stockmark Inc.", - "url": "https://huggingface.co./datasets/stockmark/ner-wikipedia-dataset", - "project_name": "ner-wikipedia-dataset", - "downloads": 32, + "description": "日本語 gpt2 蒸留モデル このモデルはrinna/japanese-gpt2-meduimを教師として蒸留したものです。 ", + "url": "https://huggingface.co./knok/japanese-distilgpt2", + "project_name": "japanese-distilgpt2", + "downloads": 31, "source": "Hugging Face", - "score": -0.09245564415909563, - "first_commit": "2023-09-02 14:38:55", - "latest_commit": "2023-09-02 14:42:18", + "score": -0.09546349555594118, + "first_commit": "2022-04-14 09:32:23", + "latest_commit": "2022-04-15 06:00:51", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "deberta-base-japanese-unidic Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic", - "project_name": "deberta-base-japanese-unidic", + "description": "このモデルはluke-japanese-largeをファインチューニングして、JCommonsenseQA(選択式応答)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-large-commonsenseqa-japanese", + "project_name": "luke-large-commonsenseqa-japanese", "downloads": 31, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2022-06-08 08:05:33", - "latest_commit": "2022-06-18 23:02:31", + "score": -0.09546349555594118, + "first_commit": "2023-02-05 16:17:54", + "latest_commit": "2023-02-05 17:04:47", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": "LukeForMultipleChoice" }, { - "description": "alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000", - "project_name": "jmedroberta-base-manbyo-wordpiece-vocab50000", + "description": "bert-base-japanese-char-extended Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-char-extended", + "project_name": "bert-base-japanese-char-extended", "downloads": 31, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2022-12-22 17:19:15", - "latest_commit": "2023-03-08 01:47:12", + "score": -0.09546349555594118, + "first_commit": "2021-08-26 22:44:12", + "latest_commit": "2022-06-21 07:21:54", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "BertForMaskedLM" }, { - "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-ner", - "project_name": "deberta-v2-base-japanese-finetuned-ner", - "downloads": 31, + "description": "deberta-large-japanese-wikipedia-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-goeswith", + "project_name": "deberta-large-japanese-wikipedia-ud-goeswith", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2023-01-20 05:57:37", - "latest_commit": "2023-03-27 08:05:06", + "score": -0.09547321358844017, + "first_commit": "2022-09-18 08:41:06", + "latest_commit": "2023-05-12 01:29:13", "languages": [], "model_or_dataset": "model", - "model_size": 0.112, + "model_size": null, "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Japanese-LLaMA-2-7B-GGUF Japanese-LLaMA-2-7B-GGUFはJapanese-LLaMA-2-7BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-2-7b-gguf", - "project_name": "japanese-llama-2-7b-gguf", - "downloads": 31, + "description": "whisper-large-v2-mix-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-mix-jp to the CTranslate2 model format.", + "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-mix-jp", + "project_name": "faster-whisper-large-v2-mix-jp", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2024-01-22 03:00:02", - "latest_commit": "2024-06-05 02:30:01", + "score": -0.09547321358844017, + "first_commit": "2023-07-07 05:53:52", + "latest_commit": "2023-07-07 17:56:03", "languages": [], "model_or_dataset": "model", - "model_size": 6.97, + "model_size": null, "model_architectures": null }, { - "description": "Mistral-Large-Instruct-2407-gguf mistralaiさんが公開しているMistral-Large-Instruct-2407のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Mistral-Large-Instruct-2407-gguf", - "project_name": "Mistral-Large-Instruct-2407-gguf", - "downloads": 31, + "description": "Tanuki-ZeRo-gguf kanhatakeyamaさんが公開しているTanuki-ZeRoのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Tanuki-ZeRo-gguf", + "project_name": "Tanuki-ZeRo-gguf", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2024-07-24 18:59:58", - "latest_commit": "2024-07-26 12:21:45", + "score": -0.09547321358844017, + "first_commit": "2024-03-30 10:49:02", + "latest_commit": "2024-03-30 17:01:16", "languages": [], "model_or_dataset": "model", - "model_size": 123.0, + "model_size": 13.1, "model_architectures": null }, { - "description": "Japanese-LLaMA-3-8B-Instruct-v2-GGUF Japanese-LLaMA-3-8B-Instruct-v2-GGUFはJapanese-LLaMA-3-8B-Instruct-v2のGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2-gguf", - "project_name": "japanese-llama-3-8b-instruct-v2-gguf", - "downloads": 31, + "description": "roberta-large-japanese-juman-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-juman-ud-goeswith", + "project_name": "roberta-large-japanese-juman-ud-goeswith", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2024-06-10 11:21:01", - "latest_commit": "2024-06-21 06:35:03", + "score": -0.09547321358844017, + "first_commit": "2023-02-21 06:38:32", + "latest_commit": "2024-08-30 14:49:26", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": null + "model_size": null, + "model_architectures": "RobertaForTokenClassification" }, { - "description": "Deepreneur-blue-lizard Model Description Deepreneur-blue-lizardは、MetaのLlama-2-7bに対して、Wikipediaや書籍等の日本語の学習データを用いて追加事前学習と独自データによるファインチューニングを実施したモデルです。", - "url": "https://huggingface.co./Deepreneur/blue-lizard", - "project_name": "blue-lizard", - "downloads": 31, + "description": "deberta-base-japanese-juman-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith", + "project_name": "deberta-base-japanese-juman-ud-goeswith", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2024-02-05 16:29:48", - "latest_commit": "2024-02-12 14:43:33", + "score": -0.09547321358844017, + "first_commit": "2023-02-05 06:48:32", + "latest_commit": "2023-05-12 01:16:53", "languages": [], "model_or_dataset": "model", - "model_size": 6.74, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Japanese-Alpaca-2-13B-GGUF Japanese-Alpaca-2-13B-GGUFはJapanese-Alpaca-2-13BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b-gguf", - "project_name": "japanese-alpaca-2-13b-gguf", - "downloads": 31, + "description": "bert-large-japanese-char-extended Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-char-extended", + "project_name": "bert-large-japanese-char-extended", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2023-12-20 10:56:08", - "latest_commit": "2023-12-26 11:46:41", + "score": -0.09547321358844017, + "first_commit": "2021-06-04 13:29:34", + "latest_commit": "2024-08-20 17:45:37", "languages": [], "model_or_dataset": "model", - "model_size": 13.3, + "model_size": null, + "model_architectures": "BertForMaskedLM" + }, + { + "description": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、約10000件の日本語instruction tuning用データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", + "project_name": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", + "downloads": 30, + "source": "Hugging Face", + "score": -0.09547321358844017, + "first_commit": "2024-07-05 13:53:45", + "latest_commit": "2024-07-05 13:57:08", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "deberta-large-japanese-wikipedia-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-goeswith", - "project_name": "deberta-large-japanese-wikipedia-ud-goeswith", - "downloads": 31, + "description": "Japanese-Heron-Bench Dataset Description Japanese-Heron-Bench is a benchmark for evaluating Japanese VLMs (Vision-Language Models).", + "url": "https://huggingface.co./datasets/turing-motors/Japanese-Heron-Bench", + "project_name": "Japanese-Heron-Bench", + "downloads": 30, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2022-09-18 08:41:06", - "latest_commit": "2023-05-12 01:29:13", + "score": -0.09547321358844017, + "first_commit": "2024-04-12 01:54:01", + "latest_commit": "2024-04-12 08:59:36", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": null }, { - "description": "deberta-base-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora", - "project_name": "deberta-base-japanese-aozora", - "downloads": 31, + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-Umievo-Shizuko-sqlcoder-2x8B", + "project_name": "Llama-3-Umievo-Shizuko-sqlcoder-2x8B", + "downloads": 29, "source": "Hugging Face", - "score": -0.09246624910963197, - "first_commit": "2022-05-24 04:30:28", - "latest_commit": "2023-01-08 08:41:04", + "score": -0.09548293162093918, + "first_commit": "2024-06-09 12:17:00", + "latest_commit": "2024-06-11 07:39:45", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": 13.7, + "model_architectures": "MixtralForCausalLM" }, { - "description": "deberta-base-japanese-juman-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith", - "project_name": "deberta-base-japanese-juman-ud-goeswith", - "downloads": 30, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-LongChat", + "project_name": "Orion-14B-LongChat", + "downloads": 29, "source": "Hugging Face", - "score": -0.0924768540601683, - "first_commit": "2023-02-05 06:48:32", - "latest_commit": "2023-05-12 01:16:53", + "score": -0.09548293162093918, + "first_commit": "2024-01-19 07:15:36", + "latest_commit": "2024-03-26 10:10:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "OrionForCausalLM" }, { - "description": "This dataset was created by automatically translating \"OpenAssistant/oasst1\" into Japanese.", - "url": "https://huggingface.co./datasets/kunishou/oasst1-89k-ja", - "project_name": "oasst1-89k-ja", - "downloads": 30, + "description": "つくよみちゃんデータセットを用いて calm-2-7b-chat をファインチューニングしたモデルです。", + "url": "https://huggingface.co./offtoung/tsukuyomi-chan-calm2-7b", + "project_name": "tsukuyomi-chan-calm2-7b", + "downloads": 29, "source": "Hugging Face", - "score": -0.0924768540601683, - "first_commit": "2023-05-06 09:12:30", - "latest_commit": "2024-04-01 17:15:31", + "score": -0.09548293162093918, + "first_commit": "2023-12-21 08:46:37", + "latest_commit": "2023-12-27 04:07:20", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 7.01, + "model_architectures": "LlamaForCausalLM" }, { - "description": "deberta-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-upos", - "project_name": "deberta-large-japanese-upos", + "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-base Fine-tuned openai/whisper-base on Japanese using Common Voice, JVS and JSUT.", + "url": "https://huggingface.co./Ivydata/whisper-base-japanese", + "project_name": "whisper-base-japanese", "downloads": 29, "source": "Hugging Face", - "score": -0.09248745901070464, - "first_commit": "2022-05-27 06:50:55", - "latest_commit": "2024-07-26 16:00:59", + "score": -0.09548293162093918, + "first_commit": "2023-05-17 04:36:41", + "latest_commit": "2023-06-08 00:17:50", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "WhisperForConditionalGeneration" }, { "description": "roberta-base-japanese-juman-ud-goeswith Model Description", @@ -10906,7 +11114,7 @@ "project_name": "roberta-base-japanese-juman-ud-goeswith", "downloads": 29, "source": "Hugging Face", - "score": -0.09248745901070464, + "score": -0.09548293162093918, "first_commit": "2023-02-21 03:43:52", "latest_commit": "2024-08-30 14:46:25", "languages": [], @@ -10915,656 +11123,670 @@ "model_architectures": "RobertaForTokenClassification" }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-LongChat", - "project_name": "Orion-14B-LongChat", - "downloads": 29, + "description": "roberta-small-japanese-aozora-char Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-aozora-char", + "project_name": "roberta-small-japanese-aozora-char", + "downloads": 28, "source": "Hugging Face", - "score": -0.09248745901070464, - "first_commit": "2024-01-19 07:15:36", - "latest_commit": "2024-03-26 10:10:34", + "score": -0.09549264965343818, + "first_commit": "2021-12-23 02:38:26", + "latest_commit": "2021-12-23 11:55:42", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "OrionForCausalLM" + "model_architectures": "RobertaForMaskedLM" }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Chat", - "project_name": "SambaLingo-Japanese-Chat", - "downloads": 29, + "description": "deberta-small-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-aozora", + "project_name": "deberta-small-japanese-aozora", + "downloads": 28, "source": "Hugging Face", - "score": -0.09248745901070464, - "first_commit": "2024-02-15 22:45:08", - "latest_commit": "2024-04-16 22:32:15", + "score": -0.09549264965343818, + "first_commit": "2022-05-23 04:58:53", + "latest_commit": "2023-01-15 15:25:14", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DebertaV2ForMaskedLM" + }, + { + "description": "deberta-large-japanese-wikipedia-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-head", + "project_name": "deberta-large-japanese-wikipedia-ud-head", + "downloads": 28, + "source": "Hugging Face", + "score": -0.09549264965343818, + "first_commit": "2022-07-06 03:51:14", + "latest_commit": "2024-08-20 19:51:21", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DebertaV2ForQuestionAnswering" + }, + { + "description": "roberta-base-japanese-aozora-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora-ud-goeswith", + "project_name": "roberta-base-japanese-aozora-ud-goeswith", + "downloads": 28, + "source": "Hugging Face", + "score": -0.09549264965343818, + "first_commit": "2022-10-15 04:01:29", + "latest_commit": "2024-08-20 18:49:41", "languages": [], "model_or_dataset": "model", - "model_size": 6.95, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "RobertaForTokenClassification" }, { - "description": "deberta-base-japanese-wikipedia-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-ud-head", - "project_name": "deberta-base-japanese-wikipedia-ud-head", - "downloads": 29, + "description": "deberta-base-japanese-aozora-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-goeswith", + "project_name": "deberta-base-japanese-aozora-ud-goeswith", + "downloads": 28, "source": "Hugging Face", - "score": -0.09248745901070464, - "first_commit": "2022-06-25 13:03:09", - "latest_commit": "2024-08-20 19:47:27", + "score": -0.09549264965343818, + "first_commit": "2022-10-14 09:43:58", + "latest_commit": "2024-08-20 18:52:19", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "モデル説明 (model explanation) V1 = MoeDiffusion 1.0 + (HassanBlend 1.5 - VMix03) * 0.2 V2 = MoeDiffusion 0.6 : HassanBlend 1.5 0.2 : VMix03 : 0.2 マージ元のルーツにNAIリークやInsta系モデルが含まれるという噂があるので、NAIリークアンチ・Insta系モデルアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", - "url": "https://huggingface.co./ThePioneer/MoeDiffusionPlusPlus", - "project_name": "MoeDiffusionPlusPlus", + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-small-long", + "project_name": "t5-small-long", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2023-01-19 13:04:02", - "latest_commit": "2023-01-21 02:05:54", + "score": -0.09549264965343818, + "first_commit": "2023-04-26 08:26:49", + "latest_commit": "2023-05-10 10:01:29", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "japanese-gpt-1b-PII-masking Model Description japanese-gpt-1b-PII-masking は、 日本語事前学習済み1B GPTモデルをベースとして、日本語の文章から個人情報をマスキングするように学習したモデルです。 ", - "url": "https://huggingface.co./cameltech/japanese-gpt-1b-PII-masking", - "project_name": "japanese-gpt-1b-PII-masking", + "description": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", + "project_name": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-04-05 07:26:29", - "latest_commit": "2024-05-17 11:42:00", + "score": -0.09549264965343818, + "first_commit": "2023-09-26 06:16:23", + "latest_commit": "2023-09-28 00:02:06", "languages": [], "model_or_dataset": "model", - "model_size": 1.3, - "model_architectures": "GPT2LMHeadModel" + "model_size": 1.17, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged Mixtral-8x7B-Instruct-v0.1-japanese-alpha-mergedはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施した学習途中のモデルに対して、差分マージを実施したモデルです。", - "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", - "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", + "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", + "url": "https://huggingface.co./if001/tiny_mixtral_ja", + "project_name": "tiny_mixtral_ja", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-04-16 07:54:14", - "latest_commit": "2024-04-20 09:14:59", + "score": -0.09549264965343818, + "first_commit": "2024-01-22 15:02:21", + "latest_commit": "2024-01-23 00:42:05", "languages": [], "model_or_dataset": "model", - "model_size": 46.9, + "model_size": 0.276, "model_architectures": "MixtralForCausalLM" }, { - "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", - "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "description": "HODACHI様の Llama-3-EZO-8b-Common-it をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3-EZO-8b-Common-it-GGUF", + "project_name": "Llama-3-EZO-8b-Common-it-GGUF", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-07-26 21:06:54", - "latest_commit": "2024-07-26 21:37:02", + "score": -0.09549264965343818, + "first_commit": "2024-07-15 11:58:12", + "latest_commit": "2024-07-15 20:08:22", "languages": [], "model_or_dataset": "model", - "model_size": 11.0, - "model_architectures": "LlamaForCausalLM" + "model_size": 8.03, + "model_architectures": null }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-small", - "project_name": "ruri-reranker-stage1-small", + "description": "Llama-3-8B-Instruct-JP-nk2t-v0.3 Model Details: Built with Meta Llama 3 llama-3-8bの日本語継続学習モデルにChatVectorを適用し、さらにQLoraでファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./nk2t/Llama-3-8B-Instruct-japanese-nk2t-v0.3", + "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.3", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-08-19 09:44:00", - "latest_commit": "2024-09-04 08:53:02", + "score": -0.09549264965343818, + "first_commit": "2024-05-15 12:24:06", + "latest_commit": "2024-05-22 11:02:28", "languages": [], "model_or_dataset": "model", - "model_size": 0.06870000000000001, - "model_architectures": "DistilBertForSequenceClassification" + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "deberta-base-japanese-unidic-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic-ud-head", - "project_name": "deberta-base-japanese-unidic-ud-head", + "description": "deberta-large-japanese-juman-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-juman-ud-goeswith", + "project_name": "deberta-large-japanese-juman-ud-goeswith", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2022-06-18 10:20:24", - "latest_commit": "2024-08-20 20:09:13", + "score": -0.09549264965343818, + "first_commit": "2023-02-05 13:24:47", + "latest_commit": "2024-08-30 14:27:11", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Kendamarron/jimba-wiki-instruction-calm3 grapevine-AI/CALM3-22B-Chat-GGUFのQ4_K_Mを使った合成instructionデータセットです。 ", - "url": "https://huggingface.co./datasets/Kendamarron/jimba-wiki-instruction-calm3", - "project_name": "jimba-wiki-instruction-calm3", + "description": "electra-base-cyberbullying This is a BERT Base model for the Japanese language finetuned for automatic cyberbullying detection.", + "url": "https://huggingface.co./kit-nlp/bert-base-japanese-basic-char-v2-cyberbullying", + "project_name": "bert-base-japanese-basic-char-v2-cyberbullying", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-07-09 22:18:35", - "latest_commit": "2024-07-20 12:57:05", + "score": -0.09549264965343818, + "first_commit": "2022-09-08 09:09:39", + "latest_commit": "2022-11-01 07:20:52", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "BertForSequenceClassification" }, { - "description": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、約10000件の日本語instruction tuning用データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", - "project_name": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", + "description": "タイトルから記事本文を生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-article-generation", + "project_name": "t5-base-japanese-article-generation", "downloads": 28, "source": "Hugging Face", - "score": -0.09249806396124097, - "first_commit": "2024-07-05 13:53:45", - "latest_commit": "2024-07-05 13:57:08", + "score": -0.09549264965343818, + "first_commit": "2021-04-03 13:55:25", + "latest_commit": "2024-04-17 11:39:12", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 0.223, + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Summary This is a text classifier for assigning a JLPT level.", - "url": "https://huggingface.co./bennexx/cl-tohoku-bert-base-japanese-v3-jlpt-classifier", - "project_name": "cl-tohoku-bert-base-japanese-v3-jlpt-classifier", - "downloads": 27, + "description": "日本語ByT5事前学習済みモデル This is a ByT5 (a tokenizer-free extension of the Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/byt5-small-japanese", + "project_name": "byt5-small-japanese", + "downloads": 28, "source": "Hugging Face", - "score": -0.0925086689117773, - "first_commit": "2024-01-19 00:32:15", - "latest_commit": "2024-07-10 13:41:08", + "score": -0.09549264965343818, + "first_commit": "2021-06-04 13:14:22", + "latest_commit": "2021-09-23 18:29:53", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "MT5ForConditionGeneration" }, { - "description": "alabnii/jmedroberta-base-sentencepiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece-vocab50000", - "project_name": "jmedroberta-base-sentencepiece-vocab50000", - "downloads": 27, + "description": "Dataset Preprocessing Supported Tasks and Leaderboards Languages 注釈はすべて日本語を主要言語としています。", + "url": "https://huggingface.co./datasets/shunk031/jsnli", + "project_name": "jsnli", + "downloads": 28, "source": "Hugging Face", - "score": -0.0925086689117773, - "first_commit": "2022-12-22 17:22:14", - "latest_commit": "2023-06-27 03:44:17", + "score": -0.09549264965343818, + "first_commit": "2022-12-01 01:31:32", + "latest_commit": "2022-12-12 16:36:58", "languages": [], - "model_or_dataset": "model", - "model_size": 0.124, - "model_architectures": "BertForMaskedLM" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "モデル ベースモデル:microsoft/Phi-3-mini-4k-instruct 学習データセット:llm-jp/hh-rlhf-12k-ja 学習方式:フルパラメータチューニング サンプル import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", trust_remote_code=True, ) model = AutoModelForCausalLM.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", device_map=\"auto\", torch_dtype='auto', trust_remote_code=True, ) text = \"<|user|>\\n与えられた質問に対して英語で思考し、日本語で答えてください。", - "url": "https://huggingface.co./ryota39/Phi-3-mini-4k-instruct-dpo", - "project_name": "Phi-3-mini-4k-instruct-dpo", - "downloads": 27, + "description": "固有表現ラベルはllm-book/ner-wikipedia-datasetと同様のものを採用しており、全部で8種類 (人名、法人名、地名、製品名、政治的組織名、施設名、その他の組織名、イベント名)あります。 ", + "url": "https://huggingface.co./datasets/llm-book/ner-wikinews-dataset", + "project_name": "ner-wikinews-dataset", + "downloads": 28, "source": "Hugging Face", - "score": -0.0925086689117773, - "first_commit": "2024-04-24 16:21:32", - "latest_commit": "2024-05-01 07:41:46", + "score": -0.09549264965343818, + "first_commit": "2023-04-22 14:32:21", + "latest_commit": "2023-12-12 11:22:26", "languages": [], - "model_or_dataset": "model", - "model_size": 3.82, - "model_architectures": "Phi3ForCausalLM" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "Heron BLIP Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v0", - "project_name": "heron-chat-blip-ja-stablelm-base-7b-v0", - "downloads": 27, + "description": "llm-japanese-dataset-vanilla LLM構築用の日本語チャットデータセット izumi-lab/llm-japanese-dataset から,日英翻訳のデータセット等を抜いたものです. ", + "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset-vanilla", + "project_name": "llm-japanese-dataset-vanilla", + "downloads": 28, "source": "Hugging Face", - "score": -0.0925086689117773, - "first_commit": "2023-09-06 09:31:44", - "latest_commit": "2023-09-07 16:59:14", + "score": -0.09549264965343818, + "first_commit": "2023-05-23 14:45:27", + "latest_commit": "2024-02-17 16:17:18", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "VideoBlipForConditionalGeneration" + "model_architectures": null }, { - "description": "日本語ByT5事前学習済みモデル This is a ByT5 (a tokenizer-free extension of the Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/byt5-small-japanese", - "project_name": "byt5-small-japanese", + "description": "roberta-large-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-luw-upos", + "project_name": "roberta-large-japanese-luw-upos", "downloads": 27, "source": "Hugging Face", - "score": -0.0925086689117773, - "first_commit": "2021-06-04 13:14:22", - "latest_commit": "2021-09-23 18:29:53", + "score": -0.09550236768593719, + "first_commit": "2021-12-26 13:51:46", + "latest_commit": "2024-08-20 18:34:07", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MT5ForConditionGeneration" + "model_architectures": "RobertaForTokenClassification" }, { - "description": "deberta-base-japanese-aozora-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-goeswith", - "project_name": "deberta-base-japanese-aozora-ud-goeswith", - "downloads": 26, + "description": "deberta-base-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-luw-upos", + "project_name": "deberta-base-japanese-luw-upos", + "downloads": 27, "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2022-10-14 09:43:58", - "latest_commit": "2024-08-20 18:52:19", + "score": -0.09550236768593719, + "first_commit": "2022-05-24 06:55:47", + "latest_commit": "2024-08-20 19:21:57", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Swallow-8Bは追加の日本語継続事前学習により日本語が大変流暢なLlama-3派生モデルです。", - "url": "https://huggingface.co./aixsatoshi/Meta-Llama-3.1-8B-Instruct-plus-Swallow", - "project_name": "Meta-Llama-3.1-8B-Instruct-plus-Swallow", - "downloads": 26, - "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2024-07-24 03:10:38", - "latest_commit": "2024-07-24 04:03:21", - "languages": [], - "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "Tanuki-8B-dpo-v1.0-GPTQ-4bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGPTQ 4bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GPTQ-4bit", - "project_name": "Tanuki-8B-dpo-v1.0-GPTQ-4bit", - "downloads": 26, + "description": "deberta-large-japanese-aozora-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora-ud-goeswith", + "project_name": "deberta-large-japanese-aozora-ud-goeswith", + "downloads": 27, "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2024-08-27 16:17:17", - "latest_commit": "2024-09-03 09:29:10", + "score": -0.09550236768593719, + "first_commit": "2022-10-14 11:35:36", + "latest_commit": "2024-08-20 19:20:44", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "データセットについて オープンソースLLMの出力を人手でチェック・修正したinstructionにSwallow-MXでoutputを生成したデータセットです。 ", - "url": "https://huggingface.co./datasets/Kendamarron/pret-a-porter-instruction-v0.1", - "project_name": "pret-a-porter-instruction-v0.1", - "downloads": 26, + "description": "Japanese-Alpaca-2-13B-GGUF Japanese-Alpaca-2-13B-GGUFはJapanese-Alpaca-2-13BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b-gguf", + "project_name": "japanese-alpaca-2-13b-gguf", + "downloads": 27, "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2024-03-26 13:08:14", - "latest_commit": "2024-04-01 04:30:44", + "score": -0.09550236768593719, + "first_commit": "2023-12-20 10:56:08", + "latest_commit": "2023-12-26 11:46:41", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 13.3, "model_architectures": null }, { - "description": "固有表現ラベルはllm-book/ner-wikipedia-datasetと同様のものを採用しており、全部で8種類 (人名、法人名、地名、製品名、政治的組織名、施設名、その他の組織名、イベント名)あります。 ", - "url": "https://huggingface.co./datasets/llm-book/ner-wikinews-dataset", - "project_name": "ner-wikinews-dataset", - "downloads": 26, + "description": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-instruct-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", + "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", + "downloads": 27, "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2023-04-22 14:32:21", - "latest_commit": "2023-12-12 11:22:26", + "score": -0.09550236768593719, + "first_commit": "2024-01-16 12:23:01", + "latest_commit": "2024-01-16 12:27:54", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "llm-japanese-dataset-vanilla LLM構築用の日本語チャットデータセット izumi-lab/llm-japanese-dataset から,日英翻訳のデータセット等を抜いたものです. ", - "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset-vanilla", - "project_name": "llm-japanese-dataset-vanilla", - "downloads": 26, + "description": "rinna/nekomata-14b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-14b-instruction.", + "url": "https://huggingface.co./rinna/nekomata-14b-instruction-gguf", + "project_name": "nekomata-14b-instruction-gguf", + "downloads": 27, "source": "Hugging Face", - "score": -0.09251927386231364, - "first_commit": "2023-05-23 14:45:27", - "latest_commit": "2024-02-17 16:17:18", + "score": -0.09550236768593719, + "first_commit": "2023-12-19 08:12:06", + "latest_commit": "2024-07-20 08:34:05", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, + "model_or_dataset": "model", + "model_size": 14.2, "model_architectures": null }, { - "description": "roberta-base-japanese-aozora-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora-ud-goeswith", - "project_name": "roberta-base-japanese-aozora-ud-goeswith", - "downloads": 25, + "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-ner", + "project_name": "deberta-v2-base-japanese-finetuned-ner", + "downloads": 27, "source": "Hugging Face", - "score": -0.09252987881284996, - "first_commit": "2022-10-15 04:01:29", - "latest_commit": "2024-08-20 18:49:41", + "score": -0.09550236768593719, + "first_commit": "2023-01-20 05:57:37", + "latest_commit": "2023-03-27 08:05:06", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "RobertaForTokenClassification" + "model_size": 0.112, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "bart-base-japanese-news(base-sized model)", - "url": "https://huggingface.co./stockmark/bart-base-japanese-news", - "project_name": "bart-base-japanese-news", - "downloads": 25, + "description": "J-ResearchCorpus Update: 2024/3/16言語処理学会第30回年次大会(NLP2024)を含む、論文 1,343 本のデータを追加 2024/2/25言語処理学会誌「自然言語処理」のうち CC-BY-4.0 で公開されている論文 360 本のデータを追加 概要 CC-BY-* ライセンスで公開されている日本語論文や学会誌等から抜粋した高品質なテキストのデータセットです。", + "url": "https://huggingface.co./datasets/kunishou/J-ResearchCorpus", + "project_name": "J-ResearchCorpus", + "downloads": 27, "source": "Hugging Face", - "score": -0.09252987881284996, - "first_commit": "2023-01-20 04:23:07", - "latest_commit": "2023-12-08 03:39:50", + "score": -0.09550236768593719, + "first_commit": "2024-02-12 14:03:42", + "latest_commit": "2024-03-16 07:55:08", "languages": [], - "model_or_dataset": "model", - "model_size": 0.125, - "model_architectures": "BartForConditionalGeneration" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "GPT-Neo 1.3B pre-trained model for Japanese Model Description GPT2/GPT3 like model trained on Japanese.corpus.", - "url": "https://huggingface.co./yellowback/gpt-neo-japanese-1.3B", - "project_name": "gpt-neo-japanese-1.3B", - "downloads": 24, + "description": "This pre-trained model is work in progress!", + "url": "https://huggingface.co./naclbit/gpt-j-japanese-6.8b", + "project_name": "gpt-j-japanese-6.8b", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2021-12-09 08:09:40", - "latest_commit": "2021-12-09 17:59:05", + "score": -0.09551208571843618, + "first_commit": "2021-10-17 08:02:54", + "latest_commit": "2021-11-10 15:28:57", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPTNeoForCausalLM" + "model_architectures": "GPTJForCausalLM" }, { - "description": "deberta-large-japanese-unidic Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic", - "project_name": "deberta-large-japanese-unidic", - "downloads": 24, + "description": "t5-base-japanese-web-8k (with Byte-fallback, 8K) Description megagonlabs/t5-base-japanese-web-8k is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", + "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web-8k", + "project_name": "t5-base-japanese-web-8k", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2022-06-10 12:49:12", - "latest_commit": "2022-06-19 09:15:35", + "score": -0.09551208571843618, + "first_commit": "2021-09-06 10:13:42", + "latest_commit": "2023-07-04 07:05:38", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "t5-base-xlsum-ja", - "url": "https://huggingface.co./p1atdev/t5-base-xlsum-ja", - "project_name": "t5-base-xlsum-ja", - "downloads": 24, + "description": "yuyuyui-chatbot", + "url": "https://huggingface.co./ushikado/yuyuyui-chatbot", + "project_name": "yuyuyui-chatbot", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2023-10-06 03:18:28", - "latest_commit": "2023-11-20 09:25:16", + "score": -0.09551208571843618, + "first_commit": "2021-05-04 14:52:12", + "latest_commit": "2021-05-23 13:27:10", "languages": [], "model_or_dataset": "model", - "model_size": 0.248, - "model_architectures": "T5ForConditionalGeneration" + "model_size": null, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "transformer-lm-japanese-0.1b", - "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-0.1b", - "project_name": "transformer-lm-japanese-0.1b", - "downloads": 24, + "description": "deberta-small-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-upos", + "project_name": "deberta-small-japanese-upos", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2023-07-12 02:11:11", - "latest_commit": "2024-06-03 06:17:19", + "score": -0.09551208571843618, + "first_commit": "2022-05-23 23:55:56", + "latest_commit": "2024-07-26 15:38:41", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "TransformerLMForCausalLM" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "deberta-large-japanese-unidic-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-ud-head", - "project_name": "deberta-large-japanese-unidic-ud-head", - "downloads": 24, + "description": "deberta-base-japanese-wikipedia-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-ud-head", + "project_name": "deberta-base-japanese-wikipedia-ud-head", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2022-06-19 00:10:56", - "latest_commit": "2023-11-05 17:51:08", + "score": -0.09551208571843618, + "first_commit": "2022-06-25 13:03:09", + "latest_commit": "2024-08-20 19:47:27", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "roberta-base-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora", - "project_name": "roberta-base-japanese-aozora", - "downloads": 24, + "description": "deberta-base-japanese-unidic-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic-ud-head", + "project_name": "deberta-base-japanese-unidic-ud-head", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2021-12-21 00:04:03", - "latest_commit": "2022-10-15 14:20:11", + "score": -0.09551208571843618, + "first_commit": "2022-06-18 10:20:24", + "latest_commit": "2024-08-20 20:09:13", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "ShareGPT-Processed The RyokoAI/ShareGPT52K dataset, converted to Markdown and labeled with the language used.", - "url": "https://huggingface.co./datasets/zetavg/ShareGPT-Processed", - "project_name": "ShareGPT-Processed", - "downloads": 24, + "description": "bert-base-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-luw-upos", + "project_name": "bert-base-japanese-luw-upos", + "downloads": 26, "source": "Hugging Face", - "score": -0.0925404837633863, - "first_commit": "2023-05-16 19:50:04", - "latest_commit": "2023-05-21 03:50:14", + "score": -0.09551208571843618, + "first_commit": "2021-10-26 13:26:38", + "latest_commit": "2022-09-18 19:43:18", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "BertForTokenClassification" }, { - "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59363 Validation Metrics Loss: 0.12651239335536957 Accuracy: 0.9532079853817648 Precision: 0.9729688278823665 Recall: 0.9744633462616643 AUC: 0.9717333684823413 F1: 0.9737155136027014 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", - "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59363", - "project_name": "autonlp-japanese-sentiment-59363", - "downloads": 23, + "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-small Fine-tuned openai/whisper-small on Japanese using Common Voice, JVS and JSUT.", + "url": "https://huggingface.co./Ivydata/whisper-small-japanese", + "project_name": "whisper-small-japanese", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2021-04-21 11:28:24", - "latest_commit": "2021-05-18 22:56:15", + "score": -0.09552180375093519, + "first_commit": "2023-05-19 10:42:27", + "latest_commit": "2023-05-19 10:50:13", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForSequenceClassification" + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "モデル説明 (model explanation) YaguruMagiku 0.6 : AbyssOrangeMix2_sfw 0.4 マージ元のルーツにNAIリークが含まれるという噂があるので、NAIリークアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", - "url": "https://huggingface.co./ThePioneer/MoeDiffusion", - "project_name": "MoeDiffusion", - "downloads": 23, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./nold/Orion-14B-Base-GGUF", + "project_name": "Orion-14B-Base-GGUF", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2023-01-18 11:14:31", - "latest_commit": "2023-01-21 02:10:41", + "score": -0.09552180375093519, + "first_commit": "2024-03-07 14:56:51", + "latest_commit": "2024-03-07 19:33:53", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 14.5, "model_architectures": null }, { - "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-base Fine-tuned openai/whisper-base on Japanese using Common Voice, JVS and JSUT.", - "url": "https://huggingface.co./Ivydata/whisper-base-japanese", - "project_name": "whisper-base-japanese", - "downloads": 23, + "description": "japanese-gpt-1b-PII-masking Model Description japanese-gpt-1b-PII-masking は、 日本語事前学習済み1B GPTモデルをベースとして、日本語の文章から個人情報をマスキングするように学習したモデルです。 ", + "url": "https://huggingface.co./cameltech/japanese-gpt-1b-PII-masking", + "project_name": "japanese-gpt-1b-PII-masking", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2023-05-17 04:36:41", - "latest_commit": "2023-06-08 00:17:50", + "score": -0.09552180375093519, + "first_commit": "2024-04-05 07:26:29", + "latest_commit": "2024-05-17 11:42:00", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "WhisperForConditionalGeneration" + "model_size": 1.3, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-Umievo-Shizuko-sqlcoder-2x8B", - "project_name": "Llama-3-Umievo-Shizuko-sqlcoder-2x8B", - "downloads": 23, + "description": "Overview This model is based on rinna's [rinna/llama-3-youko-8b], fine-tuned using LoRA on a small number of parallel sentences from English to Japanese.", + "url": "https://huggingface.co./lyu-boxuan/llama-3-youko-8b-En-Ja-MT-LoRA", + "project_name": "llama-3-youko-8b-En-Ja-MT-LoRA", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2024-06-09 12:17:00", - "latest_commit": "2024-06-11 07:39:45", + "score": -0.09552180375093519, + "first_commit": "2024-05-10 14:33:57", + "latest_commit": "2024-05-21 14:54:46", "languages": [], "model_or_dataset": "model", - "model_size": 13.7, - "model_architectures": "MixtralForCausalLM" + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "deberta-base-japanese-wikipedia-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-ud-goeswith", - "project_name": "deberta-base-japanese-wikipedia-ud-goeswith", - "downloads": 23, + "description": "Deepreneur-blue-lizard Model Description Deepreneur-blue-lizardは、MetaのLlama-2-7bに対して、Wikipediaや書籍等の日本語の学習データを用いて追加事前学習と独自データによるファインチューニングを実施したモデルです。", + "url": "https://huggingface.co./Deepreneur/blue-lizard", + "project_name": "blue-lizard", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2022-09-18 06:02:55", - "latest_commit": "2024-08-20 19:38:50", + "score": -0.09552180375093519, + "first_commit": "2024-02-05 16:29:48", + "latest_commit": "2024-02-12 14:43:33", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_size": 6.74, + "model_architectures": "LlamaForCausalLM" }, { - "description": "deberta-large-japanese-aozora-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora-ud-head", - "project_name": "deberta-large-japanese-aozora-ud-head", - "downloads": 23, + "description": "transformer-lm-japanese-0.1b", + "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-0.1b", + "project_name": "transformer-lm-japanese-0.1b", + "downloads": 25, "source": "Hugging Face", - "score": -0.09255108871392263, - "first_commit": "2022-06-17 15:00:25", - "latest_commit": "2023-03-04 20:17:12", + "score": -0.09552180375093519, + "first_commit": "2023-07-12 02:11:11", + "latest_commit": "2024-06-03 06:17:19", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_architectures": "TransformerLMForCausalLM" }, { - "description": "名言推論モデル", - "url": "https://huggingface.co./Momerio/meigen_generate_Japanese", - "project_name": "meigen_generate_Japanese", - "downloads": 22, + "description": "deberta-base-japanese-unidic Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic", + "project_name": "deberta-base-japanese-unidic", + "downloads": 25, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2021-10-13 15:30:14", - "latest_commit": "2021-10-26 01:19:59", + "score": -0.09552180375093519, + "first_commit": "2022-06-08 08:05:33", + "latest_commit": "2022-06-18 23:02:31", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "deberta-large-japanese-wikipedia Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia", - "project_name": "deberta-large-japanese-wikipedia", - "downloads": 22, + "description": "japanese-soseki-gpt2-1b", + "url": "https://huggingface.co./jweb/japanese-soseki-gpt2-1b", + "project_name": "japanese-soseki-gpt2-1b", + "downloads": 25, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2022-07-05 22:01:16", - "latest_commit": "2023-02-27 10:15:35", + "score": -0.09552180375093519, + "first_commit": "2022-03-03 04:53:15", + "latest_commit": "2023-03-27 12:09:04", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "japanese-sexual-moderation-v2は、studio-ousia/luke-japanese-large-liteをファインチューニングしたモデルです。", - "url": "https://huggingface.co./oshizo/japanese-sexual-moderation-v2", - "project_name": "japanese-sexual-moderation-v2", - "downloads": 22, + "description": "bert-base-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-unidic-luw-upos", + "project_name": "bert-base-japanese-unidic-luw-upos", + "downloads": 25, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2024-01-03 04:58:17", - "latest_commit": "2024-01-03 07:09:05", + "score": -0.09552180375093519, + "first_commit": "2022-02-13 01:00:01", + "latest_commit": "2023-11-05 18:44:10", "languages": [], "model_or_dataset": "model", - "model_size": 0.41400000000000003, - "model_architectures": "LukeForSequenceClassification" + "model_size": null, + "model_architectures": "BertForTokenClassification" }, { - "description": "rinna/nekomata-14b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-14b-instruction.", - "url": "https://huggingface.co./rinna/nekomata-14b-instruction-gguf", - "project_name": "nekomata-14b-instruction-gguf", - "downloads": 22, + "description": "JEMHopQA JEMHopQA (Japanese Explainable Multi-hop Question Answering) is a Japanese multi-hop QA dataset that can evaluate internal reasoning.", + "url": "https://huggingface.co./datasets/sbintuitions/JEMHopQA", + "project_name": "JEMHopQA", + "downloads": 25, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2023-12-19 08:12:06", - "latest_commit": "2024-07-20 08:34:05", + "score": -0.09552180375093519, + "first_commit": "2024-06-25 16:26:47", + "latest_commit": "2024-09-13 05:38:43", "languages": [], - "model_or_dataset": "model", - "model_size": 14.2, + "model_or_dataset": "dataset", + "model_size": null, "model_architectures": null }, { - "description": "llm-jp-13b-instruct-lora-jaster-v1.0", - "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-v1.0", - "project_name": "llm-jp-13b-instruct-lora-jaster-v1.0", - "downloads": 22, + "description": "Heron BLIP Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1", + "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1", + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2023-10-18 18:53:58", - "latest_commit": "2023-10-20 08:41:20", + "score": -0.09553152178343419, + "first_commit": "2024-02-20 11:32:57", + "latest_commit": "2024-02-27 13:57:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "VideoBlipForConditionalGeneration" }, { - "description": "nlp-waseda/gpt2-xl-japanese This is Japanese GPT2 with approximately 1.5B parameters pretrained on Japanese Wikipedia and CC-100", - "url": "https://huggingface.co./nlp-waseda/gpt2-xl-japanese", - "project_name": "gpt2-xl-japanese", - "downloads": 22, + "description": "Japanese GPT2 Lyric Model Model description", + "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-medium", + "project_name": "gpt2-japanese-lyric-medium", + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2022-11-30 04:33:31", - "latest_commit": "2023-06-21 04:29:10", + "score": -0.09553152178343419, + "first_commit": "2022-07-08 13:28:12", + "latest_commit": "2023-10-21 14:53:57", "languages": [], "model_or_dataset": "model", - "model_size": 1.61, + "model_size": 0.361, "model_architectures": "GPT2LMHeadModel" }, { - "description": "bert-large-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-unidic-luw-upos", - "project_name": "bert-large-japanese-unidic-luw-upos", - "downloads": 22, + "description": "Barba Barba is a multilingual natural language inference model for textual entailment and zero-shot text classification, available as an end-to-end service through TensorFlow Serving.", + "url": "https://huggingface.co./hyperonym/barba", + "project_name": "barba", + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2022-02-13 01:00:41", - "latest_commit": "2023-11-05 18:44:20", + "score": -0.09553152178343419, + "first_commit": "2023-04-29 06:27:12", + "latest_commit": "2023-04-29 13:45:12", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_architectures": "XLMRobertaForSequenceClassification" }, { - "description": "jpn-heb source group: Japanese target group:", - "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-he", - "project_name": "opus-mt-ja-he", - "downloads": 22, + "description": "roberta-base-japanese-aozora-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora-ud-head", + "project_name": "roberta-base-japanese-aozora-ud-head", + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2020-08-19 00:28:58", - "latest_commit": "2023-08-16 11:59:12", + "score": -0.09553152178343419, + "first_commit": "2022-06-21 05:21:38", + "latest_commit": "2024-08-20 19:52:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MarianMTModel" + "model_architectures": "RobertaForQuestionAnswering" }, { - "description": "Heron GIT Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-git-ja-stablelm-base-7b-v1", - "project_name": "heron-chat-git-ja-stablelm-base-7b-v1", - "downloads": 22, + "description": "deberta-large-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-luw-upos", + "project_name": "deberta-large-japanese-unidic-luw-upos", + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, - "first_commit": "2024-03-29 09:09:32", - "latest_commit": "2024-05-02 07:55:57", + "score": -0.09553152178343419, + "first_commit": "2022-06-10 12:53:45", + "latest_commit": "2024-08-20 20:16:18", "languages": [], "model_or_dataset": "model", - "model_size": 7.32, - "model_architectures": "GitJapaneseStableLMAlphaForCausalLM" + "model_size": null, + "model_architectures": "DebertaV2ForTokenClassification" }, { "description": "Dataset Summary From the official README.md: CAMERA (CyberAgent Multimodal Evaluation for Ad Text GeneRAtion) is the Japanese ad text generation dataset.", "url": "https://huggingface.co./datasets/creative-graphic-design/CAMERA", "project_name": "CAMERA", - "downloads": 22, + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, + "score": -0.09553152178343419, "first_commit": "2023-03-17 23:02:32", "latest_commit": "2023-03-17 23:49:35", "languages": [], @@ -11576,9 +11798,9 @@ "description": "Dataset Description This is the Japanese Translation version of sciq.", "url": "https://huggingface.co./datasets/izumi-lab/sciq-ja-mbartm2m", "project_name": "sciq-ja-mbartm2m", - "downloads": 22, + "downloads": 24, "source": "Hugging Face", - "score": -0.09256169366445897, + "score": -0.09553152178343419, "first_commit": "2023-05-19 02:03:47", "latest_commit": "2023-05-19 03:54:18", "languages": [], @@ -11587,40 +11809,110 @@ "model_architectures": null }, { - "description": "jpn-msa source group: Japanese target group: Malay (macrolanguage) OPUS readme: jpn-msa model: transformer-align source language(s): jpn jpn_Hani jpn_Hira jpn_Kana target language(s): ind", - "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-ms", - "project_name": "opus-mt-ja-ms", - "downloads": 21, + "description": "Heron BLIP Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", + "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", + "downloads": 23, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2020-08-19 00:29:11", - "latest_commit": "2023-08-16 11:59:16", + "score": -0.0955412398159332, + "first_commit": "2024-02-27 13:48:02", + "latest_commit": "2024-02-27 13:59:23", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MarianMTModel" + "model_architectures": "VideoBlipForConditionalGeneration" }, { - "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/bert-small-japanese-fin", - "project_name": "bert-small-japanese-fin", - "downloads": 21, + "description": "Google's mt5-base fine-tuned in Japanese to summarize patent claims in a limited Pharmaceutical domain. ", + "url": "https://huggingface.co./kz/mt5base-finetuned-patentsum-japanese-small", + "project_name": "mt5base-finetuned-patentsum-japanese-small", + "downloads": 23, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2021-10-04 13:15:37", - "latest_commit": "2022-12-09 00:41:24", + "score": -0.0955412398159332, + "first_commit": "2021-04-10 00:31:15", + "latest_commit": "2022-05-19 06:50:32", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": "MT5ForConditionalGeneration" + }, + { + "description": "roberta-large-japanese-aozora-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora-ud-head", + "project_name": "roberta-large-japanese-aozora-ud-head", + "downloads": 23, + "source": "Hugging Face", + "score": -0.0955412398159332, + "first_commit": "2022-06-22 00:49:08", + "latest_commit": "2024-08-20 19:54:48", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "RobertaForQuestionAnswering" + }, + { + "description": "モデル説明 (model explanation) YaguruMagiku 0.6 : AbyssOrangeMix2_sfw 0.4 マージ元のルーツにNAIリークが含まれるという噂があるので、NAIリークアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", + "url": "https://huggingface.co./ThePioneer/MoeDiffusion", + "project_name": "MoeDiffusion", + "downloads": 23, + "source": "Hugging Face", + "score": -0.0955412398159332, + "first_commit": "2023-01-18 11:14:31", + "latest_commit": "2023-01-21 02:10:41", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": null + }, + { + "description": "日本語でtrainingしたllama2 model size: 417.12M trainingは以下のscript参照https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", + "url": "https://huggingface.co./if001/llama2_ja_small", + "project_name": "llama2_ja_small", + "downloads": 23, + "source": "Hugging Face", + "score": -0.0955412398159332, + "first_commit": "2023-10-11 09:11:41", + "latest_commit": "2023-10-14 13:50:54", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "range3/cc100-ja This dataset consists of parquet files from the cc100 dataset with only the Japanese language extracted and sharded.", + "url": "https://huggingface.co./datasets/range3/cc100-ja", + "project_name": "cc100-ja", + "downloads": 23, + "source": "Hugging Face", + "score": -0.0955412398159332, + "first_commit": "2023-02-04 05:10:34", + "latest_commit": "2023-02-04 05:43:32", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "roberta-small-japanese-char-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-char-luw-upos", + "project_name": "roberta-small-japanese-char-luw-upos", + "downloads": 22, + "source": "Hugging Face", + "score": -0.09555095784843219, + "first_commit": "2021-12-23 02:47:23", + "latest_commit": "2024-08-20 18:36:17", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "RobertaForTokenClassification" }, { "description": "deberta-base-japanese-upos Model Description", "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-upos", "project_name": "deberta-base-japanese-upos", - "downloads": 21, + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, + "score": -0.09555095784843219, "first_commit": "2022-05-24 08:12:05", "latest_commit": "2024-07-26 15:59:24", "languages": [], @@ -11629,434 +11921,490 @@ "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "roberta-large-japanese-juman-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-juman-ud-goeswith", - "project_name": "roberta-large-japanese-juman-ud-goeswith", - "downloads": 21, + "description": "bart-base-japanese This model is converted from the original Japanese BART Pretrained model released by Kyoto University.", + "url": "https://huggingface.co./Formzu/bart-base-japanese", + "project_name": "bart-base-japanese", + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2023-02-21 06:38:32", - "latest_commit": "2024-08-30 14:49:26", + "score": -0.09555095784843219, + "first_commit": "2022-10-31 06:52:38", + "latest_commit": "2022-11-07 11:13:39", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForTokenClassification" + "model_architectures": "MBartForConditionalGeneration" }, { - "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", - "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-base", - "project_name": "ruri-reranker-stage1-base", - "downloads": 21, + "description": "Model Card for Model ID Fine tunned ASR model from distil-whisper/distil-large-v2.", + "url": "https://huggingface.co./spow12/Visual-novel-transcriptor", + "project_name": "Visual-novel-transcriptor", + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2024-08-19 16:14:12", - "latest_commit": "2024-09-04 08:52:18", + "score": -0.09555095784843219, + "first_commit": "2024-04-15 01:43:08", + "latest_commit": "2024-08-12 12:39:52", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": 0.756, + "model_architectures": "WhisperForConditionalGeneration" }, { - "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/30876467ad5a8a81821f", - "url": "https://huggingface.co./sonoisa/t5-qiita-title-generation", - "project_name": "t5-qiita-title-generation", - "downloads": 21, + "description": "zenz-v1 zenz-v1はGPT-2アーキテクチャに基づくかな漢字変換タスクに特化した言語モデルです。", + "url": "https://huggingface.co./Miwa-Keita/zenz-v1", + "project_name": "zenz-v1", + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2021-10-17 14:46:56", - "latest_commit": "2022-02-21 13:39:01", + "score": -0.09555095784843219, + "first_commit": "2024-05-12 15:48:46", + "latest_commit": "2024-05-13 16:34:02", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.09509999999999999, + "model_architectures": "GPT2LMHeadModel" + }, + { + "description": "Tanuki-8B-dpo-v1.0-4k-AWQ 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0-4kのAWQ 4bit量子化モデルです。", + "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-4k-AWQ", + "project_name": "Tanuki-8B-dpo-v1.0-4k-AWQ", + "downloads": 22, + "source": "Hugging Face", + "score": -0.09555095784843219, + "first_commit": "2024-08-27 05:48:42", + "latest_commit": "2024-09-03 09:28:33", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.47, + "model_architectures": "LlamaForCausalLM" + }, + { + "description": "transformer-lm-japanese-1.0b This is a JAX/Flax-based transformer language model trained on a Japanese dataset.", + "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-1.0b", + "project_name": "transformer-lm-japanese-1.0b", + "downloads": 22, + "source": "Hugging Face", + "score": -0.09555095784843219, + "first_commit": "2024-07-25 04:27:53", + "latest_commit": "2024-09-06 12:44:00", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": "TransformerLMForCausalLM" }, { - "description": "roberta-large-japanese-char-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-char-luw-upos", - "project_name": "roberta-large-japanese-char-luw-upos", - "downloads": 21, + "description": "roberta-small-hi-char-mlm Model Description", + "url": "https://huggingface.co./nakamura196/roberta-small-hi-char-mlm", + "project_name": "roberta-small-hi-char-mlm", + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2021-12-30 15:56:46", - "latest_commit": "2022-09-18 19:44:49", + "score": -0.09555095784843219, + "first_commit": "2022-07-14 20:34:59", + "latest_commit": "2022-07-22 09:10:42", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "RobertaForMaskedLM" + }, + { + "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-1b on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", + "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-1b-japanese-hiragana-katakana", + "project_name": "wav2vec2-xls-r-1b-japanese-hiragana-katakana", + "downloads": 22, + "source": "Hugging Face", + "score": -0.09555095784843219, + "first_commit": "2022-02-04 11:27:09", + "latest_commit": "2022-03-24 11:56:32", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForTokenClassification" + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "Japanese-Heron-Bench Dataset Description Japanese-Heron-Bench is a benchmark for evaluating Japanese VLMs (Vision-Language Models).", - "url": "https://huggingface.co./datasets/turing-motors/Japanese-Heron-Bench", - "project_name": "Japanese-Heron-Bench", - "downloads": 21, + "description": "Sakura_dataset 商用利用可能な超小規模高品質日本語データセット。 ", + "url": "https://huggingface.co./datasets/saldra/sakura_japanese_dataset", + "project_name": "sakura_japanese_dataset", + "downloads": 22, "source": "Hugging Face", - "score": -0.0925722986149953, - "first_commit": "2024-04-12 01:54:01", - "latest_commit": "2024-04-12 08:59:36", + "score": -0.09555095784843219, + "first_commit": "2023-06-07 05:44:23", + "latest_commit": "2023-06-08 11:31:06", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "ELECTRA base Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-base-japanese-generator", - "project_name": "electra-base-japanese-generator", - "downloads": 20, + "description": "For more information, see website below!", + "url": "https://huggingface.co./datasets/Hoshikuzu/JParaCrawl", + "project_name": "JParaCrawl", + "downloads": 22, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2021-11-15 17:23:50", - "latest_commit": "2023-10-21 13:21:16", + "score": -0.09555095784843219, + "first_commit": "2024-08-24 15:07:12", + "latest_commit": "2024-08-25 13:15:52", "languages": [], - "model_or_dataset": "model", - "model_size": 0.035500000000000004, - "model_architectures": "ElectraForMaskedLM" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "yuyuyui-chatbot", - "url": "https://huggingface.co./ushikado/yuyuyui-chatbot", - "project_name": "yuyuyui-chatbot", - "downloads": 20, + "description": "deberta-large-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-upos", + "project_name": "deberta-large-japanese-upos", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2021-05-04 14:52:12", - "latest_commit": "2021-05-23 13:27:10", + "score": -0.0955606758809312, + "first_commit": "2022-05-27 06:50:55", + "latest_commit": "2024-07-26 16:00:59", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Japanese GPT2 Lyric Model Model description", - "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-medium", - "project_name": "gpt2-japanese-lyric-medium", - "downloads": 20, + "description": "AnzuMixSeries VAEの内臓はないぞ!と言わせないぞ!!!! ", + "url": "https://huggingface.co./natsusakiyomi/AnzuMix", + "project_name": "AnzuMix", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2022-07-08 13:28:12", - "latest_commit": "2023-10-21 14:53:57", + "score": -0.0955606758809312, + "first_commit": "2023-07-30 13:10:10", + "latest_commit": "2023-11-15 12:39:10", "languages": [], "model_or_dataset": "model", - "model_size": 0.361, - "model_architectures": "GPT2LMHeadModel" + "model_size": null, + "model_architectures": null }, { - "description": "deberta-large-japanese-juman-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-juman-ud-goeswith", - "project_name": "deberta-large-japanese-juman-ud-goeswith", - "downloads": 20, + "description": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-base-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", + "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2023-02-05 13:24:47", - "latest_commit": "2024-08-30 14:27:11", + "score": -0.0955606758809312, + "first_commit": "2024-01-17 04:41:20", + "latest_commit": "2024-01-17 04:46:18", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "MistralForCausalLM" }, { - "description": "bert-base-japanese-v3-bpr-passage-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRのパッセージエンコーダです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-passage-aio", - "project_name": "bert-base-japanese-v3-bpr-passage-aio", - "downloads": 20, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base-Int4", + "project_name": "Orion-14B-Base-Int4", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2023-06-06 08:22:28", - "latest_commit": "2023-07-24 07:14:59", + "score": -0.0955606758809312, + "first_commit": "2024-01-18 09:50:31", + "latest_commit": "2024-03-26 09:55:37", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "BertModel" + "model_size": 2.69, + "model_architectures": "OrionForCausalLM" }, { - "description": "transformer-lm-japanese-1.0b This is a JAX/Flax-based transformer language model trained on a Japanese dataset.", - "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-1.0b", - "project_name": "transformer-lm-japanese-1.0b", - "downloads": 20, + "description": "deberta-large-japanese-aozora-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora-ud-head", + "project_name": "deberta-large-japanese-aozora-ud-head", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2024-07-25 04:27:53", - "latest_commit": "2024-09-06 12:44:00", + "score": -0.0955606758809312, + "first_commit": "2022-06-17 15:00:25", + "latest_commit": "2023-03-04 20:17:12", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "TransformerLMForCausalLM" + "model_architectures": "DebertaV2ForQuestionAnswering" }, { - "description": "Style-Bert-VITS2 Japanese Only Sakura Miko こちらは「さくらみこ」の音声データセットに基づいて学習されたVITS-TTSモデルです。 ", - "url": "https://huggingface.co./Lycoris53/style-bert-vits2-sakura-miko", - "project_name": "style-bert-vits2-sakura-miko", - "downloads": 20, + "description": "Japanese-Law-Translation Dataset Summary", + "url": "https://huggingface.co./datasets/Hoshikuzu/Japanese-Law-Translation", + "project_name": "Japanese-Law-Translation", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2024-05-27 14:58:38", - "latest_commit": "2024-05-28 03:02:14", + "score": -0.0955606758809312, + "first_commit": "2024-08-24 14:43:16", + "latest_commit": "2024-08-25 13:26:11", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-short", - "project_name": "t5-base-short", - "downloads": 20, + "description": "Kaidan Nihonbunka: A Journey Through Hyakumonogatari's Ghostly Tales Welcome to the Kaidan Nihonbunka Dataset About Name kaidan Nihonbunka translates to 怪談日本文化 in Japanese: 怪談 (Kwaidan): Ghost story or supernatural tale.", + "url": "https://huggingface.co./datasets/mohamed-khalil/KaidanNihonbunka", + "project_name": "KaidanNihonbunka", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2023-04-26 08:20:52", - "latest_commit": "2023-05-10 10:00:23", + "score": -0.0955606758809312, + "first_commit": "2024-04-15 15:43:41", + "latest_commit": "2024-04-15 16:03:13", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": null }, { - "description": "JAINU-Model (T5 fine-tuned model) JAINU is a Japanese - Ainu language machine translation model. ", - "url": "https://huggingface.co./astremo/JAINU", - "project_name": "JAINU", - "downloads": 20, + "description": "For more information, see website below!", + "url": "https://huggingface.co./datasets/Hoshikuzu/Tanaka-corpus", + "project_name": "Tanaka-corpus", + "downloads": 21, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2022-04-30 13:57:31", - "latest_commit": "2022-05-22 05:51:12", + "score": -0.0955606758809312, + "first_commit": "2024-08-24 14:51:15", + "latest_commit": "2024-08-25 13:20:34", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "T5ForConditionalGeneration" + "model_architectures": null }, { - "description": "TinySlime-1.1B-Chat-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", - "url": "https://huggingface.co./2121-8/TinySlime-1.1B-Chat-v1.0", - "project_name": "TinySlime-1.1B-Chat-v1.0", + "description": "bert-large-japanese-wikipedia-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-wikipedia-ud-head", + "project_name": "bert-large-japanese-wikipedia-ud-head", "downloads": 20, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2024-07-02 03:34:30", - "latest_commit": "2024-07-02 08:53:11", + "score": -0.0955703939134302, + "first_commit": "2022-06-21 07:38:19", + "latest_commit": "2024-08-20 19:45:52", "languages": [], "model_or_dataset": "model", - "model_size": 1.1, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "BertForQuestionAnswering" }, { - "description": ", 2023) was trained on.", - "url": "https://huggingface.co./datasets/zan/lima-ja", - "project_name": "lima-ja", + "description": "roberta-large-japanese-aozora-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora-ud-goeswith", + "project_name": "roberta-large-japanese-aozora-ud-goeswith", "downloads": 20, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2023-07-08 07:35:34", - "latest_commit": "2023-07-08 13:39:45", + "score": -0.0955703939134302, + "first_commit": "2022-10-15 04:15:39", + "latest_commit": "2024-08-20 18:51:15", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "RobertaForTokenClassification" }, { - "description": "For more information, see website below!", - "url": "https://huggingface.co./datasets/Hoshikuzu/JParaCrawl", - "project_name": "JParaCrawl", + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-small-medium", + "project_name": "t5-small-medium", "downloads": 20, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2024-08-24 15:07:12", - "latest_commit": "2024-08-25 13:15:52", + "score": -0.0955703939134302, + "first_commit": "2023-04-26 08:26:19", + "latest_commit": "2023-05-10 10:01:16", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "Dataset Summary JMultiWOZ is a large-scale Japanese multi-domain task-oriented dialogue dataset.", - "url": "https://huggingface.co./datasets/nu-dialogue/jmultiwoz", - "project_name": "jmultiwoz", + "description": "Model trained on 800,000 Japanese sentences after reducing oshizo/japanese-e5-mistral-7b_slerp to 8 layers.", + "url": "https://huggingface.co./oshizo/japanese-e5-mistral-1.9b", + "project_name": "japanese-e5-mistral-1.9b", "downloads": 20, "source": "Hugging Face", - "score": -0.09258290356553164, - "first_commit": "2024-02-29 15:38:29", - "latest_commit": "2024-03-13 02:15:37", + "score": -0.0955703939134302, + "first_commit": "2024-02-02 12:39:11", + "latest_commit": "2024-02-03 00:28:28", "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null + "model_or_dataset": "model", + "model_size": 1.88, + "model_architectures": "MistralForEmbedding" }, { - "description": "gpt2-small-japanese-ud-causal Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-small-japanese-ud-causal", - "project_name": "gpt2-small-japanese-ud-causal", - "downloads": 19, + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-small", + "project_name": "ruri-reranker-stage1-small", + "downloads": 20, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-22 13:33:02", - "latest_commit": "2024-08-25 17:54:09", + "score": -0.0955703939134302, + "first_commit": "2024-08-19 09:44:00", + "latest_commit": "2024-09-04 08:53:02", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPT2ForTokenClassification" + "model_size": 0.06870000000000001, + "model_architectures": "DistilBertForSequenceClassification" }, { - "description": "Genji-JP 6B Please check our blog post for more details, samples, evaluations and more: Blogpost Model Description Genji-JP 6B is a model finetuned on our Japanese storytelling dataset based on EleutherAI's GPT-J 6B model.", - "url": "https://huggingface.co./NovelAI/genji-jp", - "project_name": "genji-jp", - "downloads": 19, + "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "downloads": 20, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2021-11-03 15:07:47", - "latest_commit": "2022-08-09 17:36:02", + "score": -0.0955703939134302, + "first_commit": "2024-07-26 21:06:54", + "latest_commit": "2024-07-26 21:37:02", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPTJForCausalLM" + "model_size": 11.0, + "model_architectures": "LlamaForCausalLM" }, { - "description": "spekulatius マージしているとたまに出てくる「目的の意図とは違うのだけどなんだか消すにはもったいないモデル」をおすそ分けするシリーズです。 ", - "url": "https://huggingface.co./Lasorco/spekulatius", - "project_name": "spekulatius", - "downloads": 19, + "description": "Llama-3-Umievo-itr014-Shizuko-8b このモデルは日本語に対応しているLlama-3ベースの4つのモデルを進化的アルゴリズムで進化的マージしたものです。", + "url": "https://huggingface.co./umiyuki/Llama-3-Umievo-itr014-Shizuko-8b", + "project_name": "Llama-3-Umievo-itr014-Shizuko-8b", + "downloads": 20, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2023-10-24 13:56:21", - "latest_commit": "2023-10-26 04:21:35", + "score": -0.0955703939134302, + "first_commit": "2024-06-08 05:25:05", + "latest_commit": "2024-06-08 07:47:59", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 8.03, + "model_architectures": "LlamaForCausalLM" }, { - "description": "XML-RoBERTa-NER-Japanese This model is a fine-tuned version of xlm-roberta-base on the Wikipedia Japanese NER dataset from Stockmark Inc.", - "url": "https://huggingface.co./ithattieu/XML-RoBERTa-NER-Japanese", - "project_name": "XML-RoBERTa-NER-Japanese", - "downloads": 19, + "description": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged Mixtral-8x7B-Instruct-v0.1-japanese-alpha-mergedはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施した学習途中のモデルに対して、差分マージを実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", + "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", + "downloads": 20, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-17 08:18:04", - "latest_commit": "2024-08-18 04:03:33", + "score": -0.0955703939134302, + "first_commit": "2024-04-16 07:54:14", + "latest_commit": "2024-04-20 09:14:59", "languages": [], "model_or_dataset": "model", - "model_size": 0.277, - "model_architectures": "RobertaForTokenClassification" + "model_size": 46.9, + "model_architectures": "MixtralForCausalLM" + }, + { + "description": "オリジナルのサイトと同じものを使用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/ja-vicuna-qa-benchmark", + "project_name": "ja-vicuna-qa-benchmark", + "downloads": 20, + "source": "Hugging Face", + "score": -0.0955703939134302, + "first_commit": "2024-06-25 22:14:55", + "latest_commit": "2024-08-31 12:37:25", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "BERT for Sentiment Analysis of Japanese Twitter", - "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter-sentiment-mixed-label", - "project_name": "bert-for-japanese-twitter-sentiment-mixed-label", - "downloads": 19, + "description": "Dataset Summary 53,640 Japanese tweets with annotation if a tweet is related to COVID-19 or not.", + "url": "https://huggingface.co./datasets/community-datasets/covid_tweets_japanese", + "project_name": "covid_tweets_japanese", + "downloads": 20, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-09 11:38:05", - "latest_commit": "2024-08-09 12:10:35", + "score": -0.0955703939134302, + "first_commit": "2022-01-25 16:35:12", + "latest_commit": "2024-06-24 11:21:23", "languages": [], - "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", - "url": "https://huggingface.co./hibikaze/tiny_mixtral_ja_with_tokenizer", - "project_name": "tiny_mixtral_ja_with_tokenizer", + "description": "Example ESPnet2 TTS model kan-bayashi/jsut_fastspeech2 ♻", + "url": "https://huggingface.co./espnet/kan-bayashi_jsut_fastspeech2", + "project_name": "kan-bayashi_jsut_fastspeech2", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-07-20 05:30:59", - "latest_commit": "2024-07-20 05:33:38", + "score": -0.0955801119459292, + "first_commit": "2021-07-03 14:45:57", + "latest_commit": "2021-07-03 10:46:00", "languages": [], "model_or_dataset": "model", - "model_size": 0.276, - "model_architectures": "MixtralForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Model Card for Japanese character-level GPT-2 Large Model description", - "url": "https://huggingface.co./ku-nlp/gpt2-large-japanese-char", - "project_name": "gpt2-large-japanese-char", + "description": "bert-base-sudachitra-v11", + "url": "https://huggingface.co./hiroshi-matsuda-rit/bert-base-sudachitra-v11", + "project_name": "bert-base-sudachitra-v11", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2023-12-27 11:18:45", - "latest_commit": "2023-12-27 12:07:30", + "score": -0.0955801119459292, + "first_commit": "2023-05-06 11:00:39", + "latest_commit": "2024-01-14 16:29:56", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": null }, { - "description": "Wav2Vec2-XLS-R-300M-Japanese-Hiragana Fine-tuned facebook/wav2vec2-xls-r-300m on Japanese Hiragana characters using the Common Voice and JSUT.", - "url": "https://huggingface.co./slplab/wav2vec2-xls-r-300m-japanese-hiragana", - "project_name": "wav2vec2-xls-r-300m-japanese-hiragana", + "description": "gpt2-small-japanese-ud-causal Model Description", + "url": "https://huggingface.co./KoichiYasuoka/gpt2-small-japanese-ud-causal", + "project_name": "gpt2-small-japanese-ud-causal", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2022-09-16 07:34:58", - "latest_commit": "2022-09-16 11:01:54", + "score": -0.0955801119459292, + "first_commit": "2024-08-22 13:33:02", + "latest_commit": "2024-08-25 17:54:09", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "Wav2Vec2ForCTC" + "model_architectures": "GPT2ForTokenClassification" }, { - "description": "deberta-small-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-aozora", - "project_name": "deberta-small-japanese-aozora", + "description": "nlp-waseda/gpt2-small-japanese-wikipedia This model is Japanese GPT-2 pretrained on Japanese Wikipedia.", + "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese-wikipedia", + "project_name": "gpt2-small-japanese-wikipedia", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2022-05-23 04:58:53", - "latest_commit": "2023-01-15 15:25:14", + "score": -0.0955801119459292, + "first_commit": "2021-12-28 01:22:40", + "latest_commit": "2021-12-28 15:31:38", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "Model Overview: 日本語で質問すると、日本語で回答を得られます。", - "url": "https://huggingface.co./Ryu-m0m/16bit-japanese-finetuned-mistral-7b-v0", - "project_name": "16bit-japanese-finetuned-mistral-7b-v0", + "description": "Japanese-Vietnamese Translated Sentence Pairs.", + "url": "https://huggingface.co./datasets/dichmau/ja_vi_translation", + "project_name": "ja_vi_translation", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-20 14:33:15", - "latest_commit": "2024-08-21 15:31:43", + "score": -0.0955801119459292, + "first_commit": "2024-03-13 17:52:50", + "latest_commit": "2024-04-08 19:35:06", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_architectures": null }, { - "description": "Japanese-Law-Translation Dataset Summary", - "url": "https://huggingface.co./datasets/Hoshikuzu/Japanese-Law-Translation", - "project_name": "Japanese-Law-Translation", + "description": "Wikidata parallel descriptions en-ja Parallel corpus for machine translation generated from wikidata dump (2024-05-06).", + "url": "https://huggingface.co./datasets/Mitsua/wikidata-parallel-descriptions-en-ja", + "project_name": "wikidata-parallel-descriptions-en-ja", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-24 14:43:16", - "latest_commit": "2024-08-25 13:26:11", + "score": -0.0955801119459292, + "first_commit": "2024-05-13 12:02:43", + "latest_commit": "2024-05-17 00:25:10", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "mqaデータセットのquery--passageのペアについて重複を削除したデータセットです。 ", - "url": "https://huggingface.co./datasets/hpprc/mqa-ja", - "project_name": "mqa-ja", + "description": "This dataset is a collection of Korean, Chinese, and Japanese OpenOrca translation datasets.", + "url": "https://huggingface.co./datasets/werty1248/OpenOrca-EnKoZhJa-18k", + "project_name": "OpenOrca-EnKoZhJa-18k", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-04-07 06:23:02", - "latest_commit": "2024-04-07 15:16:42", + "score": -0.0955801119459292, + "first_commit": "2024-08-10 18:54:09", + "latest_commit": "2024-08-10 19:16:35", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "For more information, see website below!", - "url": "https://huggingface.co./datasets/Hoshikuzu/Tanaka-corpus", - "project_name": "Tanaka-corpus", + "description": "データセットについて オープンソースLLMの出力を人手でチェック・修正したinstructionにSwallow-MXでoutputを生成したデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/pret-a-porter-instruction-v0.1", + "project_name": "pret-a-porter-instruction-v0.1", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, - "first_commit": "2024-08-24 14:51:15", - "latest_commit": "2024-08-25 13:20:34", + "score": -0.0955801119459292, + "first_commit": "2024-03-26 13:08:14", + "latest_commit": "2024-04-01 04:30:44", "languages": [], "model_or_dataset": "dataset", "model_size": null, @@ -12068,7 +12416,7 @@ "project_name": "English-Japanese_Parallel_Corpus_Data", "downloads": 19, "source": "Hugging Face", - "score": -0.09259350851606797, + "score": -0.0955801119459292, "first_commit": "2023-11-08 10:47:40", "latest_commit": "2024-08-05 03:14:27", "languages": [], @@ -12077,311 +12425,227 @@ "model_architectures": null }, { - "description": "bert-base-japanese-char-extended Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-char-extended", - "project_name": "bert-base-japanese-char-extended", + "description": "friendly_JA-Model (T5 fine-tuned model) MT model trained using the friendly_JA Corpus attempting to make Japanese easier/more accessible to occidental people by using the Latin/English derived katakana lexicon instead of the standard Sino-Japanese lexicon Examples input output 最適化を応用した機械翻訳モデルは高精度だ オプティマイゼーションを応用したマシントランスレーションモデルは高いアキュラシーだ 彼は架空の世界に住んでいる 彼はイマジナリー世界に住んでいる 新型コロナウイルスに感染してしまった コロナウイルスにかかってしまった 深層学習は難しい ディープラーニングはむずかしい 新たな概念を紹介する 新しいコンセプトを紹介する 津波の警報が流れた ツナミのアラートが流れた 南海トラフの災害は震源地による 南海トラフのディザスターはエピ", + "url": "https://huggingface.co./astremo/friendly_JA", + "project_name": "friendly_JA", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2021-08-26 22:44:12", - "latest_commit": "2022-06-21 07:21:54", + "score": -0.0955898299784282, + "first_commit": "2022-01-10 06:31:18", + "latest_commit": "2022-05-22 14:57:21", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": "T5ForConditionalGeneration" }, { - "description": "deberta-large-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-luw-upos", - "project_name": "deberta-large-japanese-unidic-luw-upos", + "description": "reazonspeech-espnet-v1 reazonspeech-espnet-v1 is an ESPnet model trained for Japanese automatic speech recognition (ASR).", + "url": "https://huggingface.co./reazon-research/reazonspeech-espnet-v1", + "project_name": "reazonspeech-espnet-v1", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2022-06-10 12:53:45", - "latest_commit": "2024-08-20 20:16:18", + "score": -0.0955898299784282, + "first_commit": "2023-01-13 07:44:37", + "latest_commit": "2023-01-16 16:44:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": null }, { - "description": "GPT2 Japanese base model version 2 Prerequisites transformers==4.19.2 Model architecture This model uses GPT2 base setttings except vocabulary size.", - "url": "https://huggingface.co./ClassCat/gpt2-base-japanese-v2", - "project_name": "gpt2-base-japanese-v2", + "description": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2022-06-04 02:30:34", - "latest_commit": "2022-06-25 15:36:22", + "score": -0.0955898299784282, + "first_commit": "2023-09-26 06:15:16", + "latest_commit": "2023-09-29 03:19:23", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 0.446, "model_architectures": "GPT2LMHeadModel" }, { - "description": "Donut (base-sized model, fine-tuned on visual novel like synthetic dataset ) ビジュアルノベル風画像の合成データセットでnaver-clova-ix/donut-baseを訓練したモデルです。 ", - "url": "https://huggingface.co./oshizo/donut-base-japanese-visual-novel", - "project_name": "donut-base-japanese-visual-novel", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Plugin", + "project_name": "Orion-14B-Chat-Plugin", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2023-05-03 04:53:49", - "latest_commit": "2023-05-03 09:25:19", + "score": -0.0955898299784282, + "first_commit": "2024-01-16 12:19:45", + "latest_commit": "2024-03-26 10:12:37", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "VisionEncoderDecoderModel" + "model_architectures": "OrionForCausalLM" }, { - "description": "karasu-lora-jp-qa-chat karasu fine tuned model by lora method with the original Q&A dataset.", - "url": "https://huggingface.co./aipib/karasu-lora-jp-qa-chat", - "project_name": "karasu-lora-jp-qa-chat", + "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese This model was trained using SentenceTransformers Cross-Encoder class.", + "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick", + "project_name": "bert-base-japanese-v3_nli-jsnli-jnli-jsick", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2024-04-24 02:26:58", - "latest_commit": "2024-06-03 01:02:33", + "score": -0.0955898299784282, + "first_commit": "2024-04-26 05:15:05", + "latest_commit": "2024-04-26 06:02:55", "languages": [], "model_or_dataset": "model", - "model_size": 1.1, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" }, { - "description": "Tanuki-8B-dpo-v1.0-GPTQ-8bit 概要 GENIAC 松尾研 LLM開発プロジェクトで開発されたLLMであるweblab-GENIAC/Tanuki-8B-dpo-v1.0のGPTQ 8bit量子化モデルです。", - "url": "https://huggingface.co./team-hatakeyama-phase2/Tanuki-8B-dpo-v1.0-GPTQ-8bit", - "project_name": "Tanuki-8B-dpo-v1.0-GPTQ-8bit", + "description": "Miwa-Keita/zenz-v1-checkpoints を optimum 用に ONNX に変換したモデルです。", + "url": "https://huggingface.co./p1atdev/zenz-v1-onnx", + "project_name": "zenz-v1-onnx", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2024-08-27 17:32:47", - "latest_commit": "2024-09-03 09:28:59", + "score": -0.0955898299784282, + "first_commit": "2024-06-29 03:03:03", + "latest_commit": "2024-06-29 03:40:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "This model is a merged version of qwen-14b-vntl and Qwen1.5-14B-Chat , aiming for the translation of Japanese context into Chinese.", - "url": "https://huggingface.co./GralchemOz/Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", - "project_name": "Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", + "description": "Overview of bert-japanese-12M The bert-japanese-12M model is a transformer-based model with BERT architecture, which is designed to be used on Japanese text.", + "url": "https://huggingface.co./nptdat/bert-japanese-12M", + "project_name": "bert-japanese-12M", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2024-03-03 02:29:43", - "latest_commit": "2024-03-03 03:17:09", + "score": -0.0955898299784282, + "first_commit": "2024-08-16 16:46:49", + "latest_commit": "2024-08-19 02:56:14", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "Qwen2ForCausalLM" - }, - { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-AWQ", - "project_name": "japanese-stablelm-base-beta-70B-AWQ", - "downloads": 18, - "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2023-11-06 11:33:47", - "latest_commit": "2023-11-09 18:16:05", - "languages": [], - "model_or_dataset": "model", - "model_size": 9.68, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "BertForMaskedLM" }, { - "description": "Japanese DialoGPT trained with Aozora (ja) 青空文庫のセリフで学習した日本語のDialoGPT Smallです(en) Japanese DialoGPT Small trained on Aozora Bunko.", - "url": "https://huggingface.co./akiFQC/japanese-dialogpt-small-aozora", - "project_name": "japanese-dialogpt-small-aozora", + "description": "This model learned the proceedings of the Japanese parliament in 2022.", + "url": "https://huggingface.co./ohtaman/falcon-7b-kokkai2022-lora", + "project_name": "falcon-7b-kokkai2022-lora", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2023-02-08 13:22:24", - "latest_commit": "2023-02-09 00:55:31", + "score": -0.0955898299784282, + "first_commit": "2023-07-14 20:05:55", + "latest_commit": "2023-09-20 16:36:19", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": null }, { - "description": "roberta-large-japanese-aozora-char Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora-char", - "project_name": "roberta-large-japanese-aozora-char", + "description": "モデル説明 (model explanation) MoeDiffusionPlusPlus 0.7 : DreamShaper 3.3 (full) 0.3。 ", + "url": "https://huggingface.co./ThePioneer/MoeSharpV1", + "project_name": "MoeSharpV1", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2021-12-30 14:19:53", - "latest_commit": "2022-06-22 10:22:43", + "score": -0.0955898299784282, + "first_commit": "2023-01-21 07:30:06", + "latest_commit": "2023-02-03 23:46:40", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": null }, { - "description": "roberta-base-japanese-char-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-char-luw-upos", - "project_name": "roberta-base-japanese-char-luw-upos", + "description": "Model Card for Model ID このモデルはrinna/japanese-gpt-1bをベースモデルとして、 コンテキストからの抽出型QAと、解答を新たなコンテキストでリファインするための学習を行ったモデルです。 ", + "url": "https://huggingface.co./oshizo/qa-refine-japanese-gpt-1b", + "project_name": "qa-refine-japanese-gpt-1b", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2021-12-28 05:01:56", - "latest_commit": "2024-08-20 18:21:15", + "score": -0.0955898299784282, + "first_commit": "2023-01-18 15:43:39", + "latest_commit": "2023-01-19 10:14:36", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForTokenClassification" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "JaWiki WikipediaのHTML形式のダンプファイルから抽出したテキストデータセットです。 ", - "url": "https://huggingface.co./datasets/hpprc/jawiki", - "project_name": "jawiki", + "description": "roberta-base-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-luw-upos", + "project_name": "roberta-base-japanese-luw-upos", "downloads": 18, "source": "Hugging Face", - "score": -0.09260411346660431, - "first_commit": "2024-02-02 06:36:01", - "latest_commit": "2024-02-13 15:19:49", - "languages": [], - "model_or_dataset": "dataset", - "model_size": null, - "model_architectures": null - }, - { - "description": "friendly_JA-Model (T5 fine-tuned model) MT model trained using the friendly_JA Corpus attempting to make Japanese easier/more accessible to occidental people by using the Latin/English derived katakana lexicon instead of the standard Sino-Japanese lexicon Examples input output 最適化を応用した機械翻訳モデルは高精度だ オプティマイゼーションを応用したマシントランスレーションモデルは高いアキュラシーだ 彼は架空の世界に住んでいる 彼はイマジナリー世界に住んでいる 新型コロナウイルスに感染してしまった コロナウイルスにかかってしまった 深層学習は難しい ディープラーニングはむずかしい 新たな概念を紹介する 新しいコンセプトを紹介する 津波の警報が流れた ツナミのアラートが流れた 南海トラフの災害は震源地による 南海トラフのディザスターはエピ", - "url": "https://huggingface.co./astremo/friendly_JA", - "project_name": "friendly_JA", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2022-01-10 06:31:18", - "latest_commit": "2022-05-22 14:57:21", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "T5ForConditionalGeneration" - }, - { - "description": "This pre-trained model is work in progress!", - "url": "https://huggingface.co./naclbit/gpt-j-japanese-6.8b", - "project_name": "gpt-j-japanese-6.8b", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2021-10-17 08:02:54", - "latest_commit": "2021-11-10 15:28:57", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPTJForCausalLM" - }, - { - "description": "roberta-large-japanese-aozora-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora-ud-goeswith", - "project_name": "roberta-large-japanese-aozora-ud-goeswith", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2022-10-15 04:15:39", - "latest_commit": "2024-08-20 18:51:15", + "score": -0.0955898299784282, + "first_commit": "2021-12-21 00:41:00", + "latest_commit": "2022-09-18 19:44:22", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "RobertaForTokenClassification" }, { - "description": "deberta-large-japanese-aozora-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora-ud-goeswith", - "project_name": "deberta-large-japanese-aozora-ud-goeswith", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2022-10-14 11:35:36", - "latest_commit": "2024-08-20 19:20:44", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" - }, - { - "description": "Fine-tuned Japanese Wav2Vec2 model for speech recognition using XLSR-53 large Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using Common Voice, JVS and JSUT.", - "url": "https://huggingface.co./Ivydata/wav2vec2-large-xlsr-53-japanese", - "project_name": "wav2vec2-large-xlsr-53-japanese", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2023-05-11 08:47:29", - "latest_commit": "2023-05-12 02:15:39", - "languages": [], - "model_or_dataset": "model", - "model_size": null, - "model_architectures": "Wav2Vec2ForCTC" - }, - { - "description": "About This model is Lightblue's QLoRA finetune of OpenOrca's Open-Orca/OpenOrcaxOpenChat-Preview2-13B model on Japanese fine-tuning datasets.", - "url": "https://huggingface.co./lightblue/openorca_stx", - "project_name": "openorca_stx", - "downloads": 17, + "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", + "url": "https://huggingface.co./cinmodel/electra-small-japanese-generator", + "project_name": "electra-small-japanese-generator", + "downloads": 18, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2023-09-12 09:29:10", - "latest_commit": "2023-10-02 10:25:36", + "score": -0.0955898299784282, + "first_commit": "2020-11-13 06:49:52", + "latest_commit": "2020-12-11 22:26:17", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "ElectraForMaskedLM" }, { - "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese This model was trained using SentenceTransformers Cross-Encoder class.", - "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick", - "project_name": "bert-base-japanese-v3_nli-jsnli-jnli-jsick", + "description": "Example ESPnet2 TTS model kan-bayashi/jsut_tacotron2 ♻", + "url": "https://huggingface.co./espnet/kan-bayashi_jsut_tacotron2", + "project_name": "kan-bayashi_jsut_tacotron2", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2024-04-26 05:15:05", - "latest_commit": "2024-04-26 06:02:55", + "score": -0.09559954801092721, + "first_commit": "2021-07-03 14:43:58", + "latest_commit": "2021-07-03 10:44:00", "languages": [], "model_or_dataset": "model", - "model_size": 0.111, - "model_architectures": "BertForSequenceClassification" + "model_size": null, + "model_architectures": null }, { - "description": "gpt2-small-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-small-japanese-upos", - "project_name": "gpt2-small-japanese-upos", + "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-with-auto-jumanpp", + "project_name": "deberta-v2-base-japanese-with-auto-jumanpp", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2024-06-22 22:28:58", - "latest_commit": "2024-07-27 07:49:34", + "score": -0.09559954801092721, + "first_commit": "2023-09-07 06:04:29", + "latest_commit": "2023-11-20 06:00:08", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "GPT2ForTokenClassification" + "model_size": 0.137, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-instruct-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", - "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", + "description": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", + "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2024-01-16 12:23:01", - "latest_commit": "2024-01-16 12:27:54", + "score": -0.09559954801092721, + "first_commit": "2023-09-26 06:16:04", + "latest_commit": "2023-09-27 23:54:44", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": 0.771, + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "rinna/nekomata-14b-gguf Overview The model is the GGUF version of rinna/nekomata-14b.", - "url": "https://huggingface.co./rinna/nekomata-14b-gguf", - "project_name": "nekomata-14b-gguf", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-GGUF", + "project_name": "SambaLingo-Japanese-Chat-GGUF", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2023-12-19 08:11:51", - "latest_commit": "2024-07-20 08:29:58", + "score": -0.09559954801092721, + "first_commit": "2024-03-07 06:38:01", + "latest_commit": "2024-03-07 06:48:27", "languages": [], "model_or_dataset": "model", - "model_size": 14.2, + "model_size": 6.95, "model_architectures": null }, { @@ -12390,7 +12654,7 @@ "project_name": "nekomata-7b-gguf", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, + "score": -0.09559954801092721, "first_commit": "2023-12-19 08:10:42", "latest_commit": "2024-07-20 08:36:15", "languages": [], @@ -12399,130 +12663,116 @@ "model_architectures": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-AWQ", - "project_name": "japanese-stablelm-instruct-beta-70B-AWQ", - "downloads": 17, - "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2023-11-02 15:45:23", - "latest_commit": "2023-11-09 18:16:16", - "languages": [], - "model_or_dataset": "model", - "model_size": 9.68, - "model_architectures": "LlamaForCausalLM" - }, - { - "description": "deberta-base-japanese-wikipedia Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia", - "project_name": "deberta-base-japanese-wikipedia", + "description": "spekulatius マージしているとたまに出てくる「目的の意図とは違うのだけどなんだか消すにはもったいないモデル」をおすそ分けするシリーズです。 ", + "url": "https://huggingface.co./Lasorco/spekulatius", + "project_name": "spekulatius", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2022-06-25 03:46:58", - "latest_commit": "2023-01-27 17:51:51", + "score": -0.09559954801092721, + "first_commit": "2023-10-24 13:56:21", + "latest_commit": "2023-10-26 04:21:35", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForMaskedLM" + "model_architectures": null }, { - "description": "deberta-base-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-luw-upos", - "project_name": "deberta-base-japanese-luw-upos", + "description": "line-corporation/japanese-large-lm-3.6b line-corporationさんが公開しているjapanese-large-lm-3.6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-gguf", + "project_name": "line-corp-japanese-large-lm-3.6b-gguf", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2022-05-24 06:55:47", - "latest_commit": "2024-08-20 19:21:57", + "score": -0.09559954801092721, + "first_commit": "2023-09-02 18:18:41", + "latest_commit": "2023-09-08 02:53:05", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": null }, { - "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-fin-generator", - "project_name": "electra-small-paper-japanese-fin-generator", + "description": "bert-base-japanese-v3-bpr-passage-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRのパッセージエンコーダです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-passage-aio", + "project_name": "bert-base-japanese-v3-bpr-passage-aio", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2021-10-04 13:38:47", - "latest_commit": "2023-10-21 13:21:24", + "score": -0.09559954801092721, + "first_commit": "2023-06-06 08:22:28", + "latest_commit": "2023-07-24 07:14:59", "languages": [], "model_or_dataset": "model", - "model_size": 0.00491, - "model_architectures": "ElectraForMaskedLM" + "model_size": null, + "model_architectures": "BertModel" }, { - "description": "Japanese-Vietnamese Translated Sentence Pairs.", - "url": "https://huggingface.co./datasets/dichmau/ja_vi_translation", - "project_name": "ja_vi_translation", + "description": "Synthetic-Japanese-Roleplay-gpt-4o-mini-39.6k-formatted 20240907 データ増量(約19800件→約39600件) 概要 gpt-4o-miniを用いて作成した日本語ロールプレイデータセットであるAratako/Synthetic-Japanese-Roleplay-gpt-4o-mini-39.6kにsystem messageを追加して整形したデータセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-Japanese-Roleplay-gpt-4o-mini-39.6k-formatted", + "project_name": "Synthetic-Japanese-Roleplay-gpt-4o-mini-39.6k-formatted", "downloads": 17, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2024-03-13 17:52:50", - "latest_commit": "2024-04-08 19:35:06", + "score": -0.09559954801092721, + "first_commit": "2024-08-16 16:46:06", + "latest_commit": "2024-09-07 12:34:01", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "This dataset is a collection of Korean, Chinese, and Japanese OpenOrca translation datasets.", - "url": "https://huggingface.co./datasets/werty1248/OpenOrca-EnKoZhJa-18k", - "project_name": "OpenOrca-EnKoZhJa-18k", - "downloads": 17, + "description": "deberta-base-japanese-wikipedia Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia", + "project_name": "deberta-base-japanese-wikipedia", + "downloads": 16, "source": "Hugging Face", - "score": -0.09261471841714064, - "first_commit": "2024-08-10 18:54:09", - "latest_commit": "2024-08-10 19:16:35", + "score": -0.0956092660434262, + "first_commit": "2022-06-25 03:46:58", + "latest_commit": "2023-01-27 17:51:51", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "deberta-base-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic-luw-upos", - "project_name": "deberta-base-japanese-unidic-luw-upos", + "description": "NLLB-200 1.3B fine-tuned on Ascendance of a Bookworm", + "url": "https://huggingface.co./thefrigidliquidation/nllb-200-distilled-1.3B-bookworm", + "project_name": "nllb-200-distilled-1.3B-bookworm", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2022-06-08 08:26:25", - "latest_commit": "2024-08-20 20:15:13", + "score": -0.0956092660434262, + "first_commit": "2022-07-27 20:39:08", + "latest_commit": "2024-04-14 18:45:22", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_size": 1.37, + "model_architectures": "M2M100ForConditionalGeneration" }, { - "description": "line-corporation/japanese-large-lm-3.6b line-corporationさんが公開しているjapanese-large-lm-3.6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-gguf", - "project_name": "line-corp-japanese-large-lm-3.6b-gguf", + "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for Ninja-v1-128k The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 Ninja-128k has the following changes compared to Mistral-7B-v0.1.", + "url": "https://huggingface.co./Local-Novel-LLM-project/Ninja-v1-128k", + "project_name": "Ninja-v1-128k", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2023-09-02 18:18:41", - "latest_commit": "2023-09-08 02:53:05", + "score": -0.0956092660434262, + "first_commit": "2024-05-01 02:56:38", + "latest_commit": "2024-05-04 04:07:00", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": null + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "日本語でtrainingしたllama2 model size: 417.12M trainingは以下のscript参照https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", - "url": "https://huggingface.co./if001/llama2_ja_small", - "project_name": "llama2_ja_small", + "description": "A very tiny 33.5M Llama3 model trained on a Macbook Pro with M3 Max for 10 hours.", + "url": "https://huggingface.co./frost-beta/Llama3-33.5M-Japanese", + "project_name": "Llama3-33.5M-Japanese", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2023-10-11 09:11:41", - "latest_commit": "2023-10-14 13:50:54", + "score": -0.0956092660434262, + "first_commit": "2024-07-16 23:58:54", + "latest_commit": "2024-07-17 08:27:07", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": null }, { "description": "gpt2-large-japanese-ud-causal Model Description", @@ -12530,7 +12780,7 @@ "project_name": "gpt2-large-japanese-ud-causal", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, + "score": -0.0956092660434262, "first_commit": "2024-08-25 00:35:43", "latest_commit": "2024-08-29 17:08:21", "languages": [], @@ -12539,606 +12789,676 @@ "model_architectures": "GPT2ForTokenClassification" }, { - "description": "Model Card for Model ID MMedBench and KoreanMedMCQA Instruction Fine-Tuned Multilingual Llama3 8B 4Bit quantized model using QLoRA.", - "url": "https://huggingface.co./SpassMedAI/MLMedLlama3", - "project_name": "MLMedLlama3", + "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したモデルです。 ", + "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-v0.1", + "project_name": "Tokara-0.5B-v0.1", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2024-08-20 17:08:05", - "latest_commit": "2024-08-28 15:39:15", + "score": -0.0956092660434262, + "first_commit": "2024-05-06 11:39:26", + "latest_commit": "2024-05-08 12:44:05", "languages": [], "model_or_dataset": "model", - "model_size": 4.65, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.464, + "model_architectures": "Qwen2ForCausalLM" }, { - "description": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", - "project_name": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", + "description": "モデル ベースモデル:microsoft/Phi-3-mini-4k-instruct 学習データセット:llm-jp/hh-rlhf-12k-ja 学習方式:フルパラメータチューニング サンプル import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", trust_remote_code=True, ) model = AutoModelForCausalLM.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", device_map=\"auto\", torch_dtype='auto', trust_remote_code=True, ) text = \"<|user|>\\n与えられた質問に対して英語で思考し、日本語で答えてください。", + "url": "https://huggingface.co./ryota39/Phi-3-mini-4k-instruct-dpo", + "project_name": "Phi-3-mini-4k-instruct-dpo", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2024-03-07 13:21:38", - "latest_commit": "2024-03-07 13:47:58", + "score": -0.0956092660434262, + "first_commit": "2024-04-24 16:21:32", + "latest_commit": "2024-05-01 07:41:46", "languages": [], "model_or_dataset": "model", - "model_size": 11.2, - "model_architectures": null + "model_size": 3.82, + "model_architectures": "Phi3ForCausalLM" }, { - "description": "nlp-waseda/gpt2-small-japanese This model is Japanese GPT-2 pretrained on Japanese Wikipedia and CC-100.", - "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese", - "project_name": "gpt2-small-japanese", + "description": "MobileBERT 日本語事前学習済みモデル爆誕!! ", + "url": "https://huggingface.co./ysakuramoto/mobilebert-ja", + "project_name": "mobilebert-ja", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2022-03-30 03:34:11", - "latest_commit": "2022-03-30 04:28:17", + "score": -0.0956092660434262, + "first_commit": "2022-01-23 11:29:39", + "latest_commit": "2022-01-24 05:25:31", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": null }, { - "description": "This is a Japanese sentence-T5 model.", - "url": "https://huggingface.co./sonoisa/sentence-t5-base-ja-mean-tokens", - "project_name": "sentence-t5-base-ja-mean-tokens", + "description": "Dataset Details Dataset Type:Japanese LLaVA v1.5", + "url": "https://huggingface.co./datasets/turing-motors/LLaVA-v1.5-Instruct-620K-JA", + "project_name": "LLaVA-v1.5-Instruct-620K-JA", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2021-12-27 11:57:10", - "latest_commit": "2022-07-31 07:54:13", + "score": -0.0956092660434262, + "first_commit": "2024-04-10 05:04:58", + "latest_commit": "2024-04-12 09:18:42", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "T5Model" + "model_architectures": null }, { - "description": "bert-japanese-ner このモデルは日本語の固有表現抽出タスクを目的として、京都大学 黒橋・褚・村脇研究室が公開しているBERT日本語Pretrainedモデルをベースにストックマーク株式会社が公開しているner-wikipedia-datasetでファインチューニングしたものです。 ", - "url": "https://huggingface.co./ken11/bert-japanese-ner", - "project_name": "bert-japanese-ner", + "description": "Japanese Wikipedia Human Retrieval dataset This is a Japanese question answereing dataset with retrieval on Wikipedia articles by trained human workers.", + "url": "https://huggingface.co./datasets/baobab-trees/wikipedia-human-retrieval-ja", + "project_name": "wikipedia-human-retrieval-ja", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2021-11-13 16:28:23", - "latest_commit": "2021-11-14 02:34:01", + "score": -0.0956092660434262, + "first_commit": "2024-01-15 13:52:30", + "latest_commit": "2024-03-19 04:25:44", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_architectures": null }, { - "description": "roberta-small-japanese-aozora-char Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-aozora-char", - "project_name": "roberta-small-japanese-aozora-char", + "description": "Amenokaku-Code-Instruct Update: 2023/12/27データセットに JaxTon , プロになるJava のコードデータ 180 レコードを追加しました。 ", + "url": "https://huggingface.co./datasets/kunishou/amenokaku-code-instruct", + "project_name": "amenokaku-code-instruct", "downloads": 16, "source": "Hugging Face", - "score": -0.09262532336767697, - "first_commit": "2021-12-23 02:38:26", - "latest_commit": "2021-12-23 11:55:42", + "score": -0.0956092660434262, + "first_commit": "2023-10-01 01:04:50", + "latest_commit": "2024-04-01 17:01:54", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": null }, { - "description": "deberta-large-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-luw-upos", - "project_name": "deberta-large-japanese-luw-upos", + "description": "roberta-large-japanese-char-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-char-luw-upos", + "project_name": "roberta-large-japanese-char-luw-upos", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2022-05-26 14:52:32", - "latest_commit": "2023-01-14 23:15:30", + "score": -0.09561898407592521, + "first_commit": "2021-12-30 15:56:46", + "latest_commit": "2022-09-18 19:44:49", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "RobertaForTokenClassification" }, { - "description": "Barba Barba is a multilingual natural language inference model for textual entailment and zero-shot text classification, available as an end-to-end service through TensorFlow Serving.", - "url": "https://huggingface.co./hyperonym/barba", - "project_name": "barba", + "description": "ELECTRA base Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-base-japanese-generator", + "project_name": "electra-base-japanese-generator", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2023-04-29 06:27:12", - "latest_commit": "2023-04-29 13:45:12", + "score": -0.09561898407592521, + "first_commit": "2021-11-15 17:23:50", + "latest_commit": "2023-10-21 13:21:16", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "XLMRobertaForSequenceClassification" + "model_size": 0.035500000000000004, + "model_architectures": "ElectraForMaskedLM" }, { - "description": "こちらでアップロードできないので、civitaiにて先に公開しています。 ", - "url": "https://huggingface.co./sazyou-roukaku/AfterRealXL", - "project_name": "AfterRealXL", + "description": "wav2vec2-live-japanese https://github.com/ttop32/wav2vec2-live-japanese-translatorFine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese hiragana using the common_voice JSUT CSS10", + "url": "https://huggingface.co./ttop324/wav2vec2-live-japanese", + "project_name": "wav2vec2-live-japanese", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2023-09-23 08:43:02", - "latest_commit": "2023-10-01 18:12:09", + "score": -0.09561898407592521, + "first_commit": "2021-10-26 14:51:21", + "latest_commit": "2021-10-31 15:34:55", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": null + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", - "url": "https://huggingface.co./youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", - "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", + "description": "ku-accms/bert-base-japanese-ssuw Model description This is a pre-trained Japanese BERT base model for super short unit words (SSUW).", + "url": "https://huggingface.co./ku-accms/bert-base-japanese-ssuw", + "project_name": "bert-base-japanese-ssuw", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2024-06-02 04:49:31", - "latest_commit": "2024-06-02 04:52:45", + "score": -0.09561898407592521, + "first_commit": "2023-04-11 13:57:30", + "latest_commit": "2023-04-12 04:40:42", "languages": [], "model_or_dataset": "model", - "model_size": 70.6, - "model_architectures": null + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "Mixtral-8x7B-Instruct-v0.1-japanese Mixtral-8x7B-Instruct-v0.1-japaneseはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", - "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese", - "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese", + "description": "ベースモデル:cl-tohoku/bert-base-japanese-whole-word-masking データセット:llm-book/wrime-sentiment オプティマイザ: adamw Optunaでハイパーパラメータ探索 学習率スケジュールのタイプ(lr_scheduler_type):", + "url": "https://huggingface.co./A-Funakoshi/bert-base-japanese-v3-wrime-v2", + "project_name": "bert-base-japanese-v3-wrime-v2", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2024-04-17 02:56:15", - "latest_commit": "2024-04-20 09:14:27", + "score": -0.09561898407592521, + "first_commit": "2023-10-27 12:05:45", + "latest_commit": "2023-10-27 12:16:22", "languages": [], "model_or_dataset": "model", - "model_size": 46.9, - "model_architectures": "MixtralForCausalLM" + "model_size": null, + "model_architectures": "BertForSequenceClassification" }, { - "description": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-base-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", - "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", + "description": "LINE DistilBERT Japanese (forked by liwii)", + "url": "https://huggingface.co./liwii/line-distilbert-base-japanese-fork", + "project_name": "line-distilbert-base-japanese-fork", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2024-01-17 04:41:20", - "latest_commit": "2024-01-17 04:46:18", + "score": -0.09561898407592521, + "first_commit": "2023-12-01 08:26:36", + "latest_commit": "2023-12-01 09:16:46", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralForCausalLM" + "model_size": null, + "model_architectures": "DistilBertForMaskedLM" }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GPTQ", - "project_name": "japanese-stablelm-instruct-beta-70B-GPTQ", + "description": "Style-Bert-VITS2 Japanese Only Sakura Miko こちらは「さくらみこ」の音声データセットに基づいて学習されたVITS-TTSモデルです。 ", + "url": "https://huggingface.co./Lycoris53/style-bert-vits2-sakura-miko", + "project_name": "style-bert-vits2-sakura-miko", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2023-11-02 15:45:24", - "latest_commit": "2023-11-02 20:04:07", + "score": -0.09561898407592521, + "first_commit": "2024-05-27 14:58:38", + "latest_commit": "2024-05-28 03:02:14", "languages": [], "model_or_dataset": "model", - "model_size": 9.1, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": null }, { - "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-with-auto-jumanpp", - "project_name": "deberta-v2-base-japanese-with-auto-jumanpp", + "description": "Vecteus-V2-7B このモデルは、ベクトルマージなどを用い作成された高性能ベースモデルです。 ", + "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-V2-7B", + "project_name": "Vecteus-V2-7B", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2023-09-07 06:04:29", - "latest_commit": "2023-11-20 06:00:08", + "score": -0.09561898407592521, + "first_commit": "2024-06-16 03:51:43", + "latest_commit": "2024-07-06 13:39:41", "languages": [], "model_or_dataset": "model", - "model_size": 0.137, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": 7.24, + "model_architectures": "MistralForCausalLM" }, { - "description": "bart-large-japanese This model is converted from the original Japanese BART Pretrained model released by Kyoto University.", - "url": "https://huggingface.co./Formzu/bart-large-japanese", - "project_name": "bart-large-japanese", + "description": "rinna/nekomata-14b-gguf Overview The model is the GGUF version of rinna/nekomata-14b.", + "url": "https://huggingface.co./rinna/nekomata-14b-gguf", + "project_name": "nekomata-14b-gguf", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2022-10-31 06:53:19", - "latest_commit": "2022-11-07 12:06:32", + "score": -0.09561898407592521, + "first_commit": "2023-12-19 08:11:51", + "latest_commit": "2024-07-20 08:29:58", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MBartForConditionalGeneration" + "model_size": 14.2, + "model_architectures": null }, { - "description": "Japanese Stable Diffusion Pokemon Model Card Stable-Diffusion-Pokemon-ja is a Japanese-specific latent text-to-image diffusion model capable of generating Pokemon images given any text input.", - "url": "https://huggingface.co./svjack/Stable-Diffusion-Pokemon-ja", - "project_name": "Stable-Diffusion-Pokemon-ja", + "description": "A pretrained Japanese TTS model intended for use in VITS-JaPros-WebUI.", + "url": "https://huggingface.co./litagin/vits-japros-pretrained", + "project_name": "vits-japros-pretrained", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2022-10-30 08:19:13", - "latest_commit": "2023-05-16 09:23:49", + "score": -0.09561898407592521, + "first_commit": "2023-09-30 00:16:22", + "latest_commit": "2023-10-11 09:55:47", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "ku-nlp/roberta-large-japanese-char-wwm Model description This is a Japanese RoBERTa large model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./ku-nlp/roberta-large-japanese-char-wwm", - "project_name": "roberta-large-japanese-char-wwm", + "description": "Japanese BERT-base (Sudachi + Unigram)", + "url": "https://huggingface.co./hitachi-nlp/bert-base-japanese_sudachi-unigram", + "project_name": "bert-base-japanese_sudachi-unigram", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2022-09-18 08:10:44", - "latest_commit": "2023-03-19 01:58:12", + "score": -0.09561898407592521, + "first_commit": "2023-06-14 07:16:29", + "latest_commit": "2023-06-16 01:03:54", "languages": [], "model_or_dataset": "model", - "model_size": 0.323, - "model_architectures": "RobertaForMaskedLM" + "model_size": null, + "model_architectures": "BertForMaskedLM" }, { - "description": "Google's mt5-base fine-tuned in Japanese to summarize patent claims in a limited Pharmaceutical domain. ", - "url": "https://huggingface.co./kz/mt5base-finetuned-patentsum-japanese-small", - "project_name": "mt5base-finetuned-patentsum-japanese-small", + "description": "TakoMT", + "url": "https://huggingface.co./staka/takomt", + "project_name": "takomt", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2021-04-10 00:31:15", - "latest_commit": "2022-05-19 06:50:32", + "score": -0.09561898407592521, + "first_commit": "2022-05-08 03:52:40", + "latest_commit": "2023-08-15 17:32:13", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MT5ForConditionalGeneration" + "model_architectures": "MarianMTModel" }, { - "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-generator", - "project_name": "electra-small-paper-japanese-generator", + "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-fin-generator", + "project_name": "electra-small-paper-japanese-fin-generator", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2021-10-04 13:47:24", - "latest_commit": "2023-10-21 13:21:31", + "score": -0.09561898407592521, + "first_commit": "2021-10-04 13:38:47", + "latest_commit": "2023-10-21 13:21:24", "languages": [], "model_or_dataset": "model", "model_size": 0.00491, "model_architectures": "ElectraForMaskedLM" }, { - "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-generator", - "project_name": "electra-small-japanese-generator", + "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-japanese-fin-generator", + "project_name": "electra-small-japanese-fin-generator", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2021-10-04 13:43:37", - "latest_commit": "2023-10-21 13:21:28", + "score": -0.09561898407592521, + "first_commit": "2021-10-04 14:07:16", + "latest_commit": "2023-10-21 13:21:23", "languages": [], "model_or_dataset": "model", "model_size": 0.013800000000000002, "model_architectures": "ElectraForMaskedLM" }, { - "description": "BERT base Japanese model This repository contains a BERT base model trained on Japanese Wikipedia dataset.", - "url": "https://huggingface.co./colorfulscoop/bert-base-ja", - "project_name": "bert-base-ja", + "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", + "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-300m-japanese", + "project_name": "wav2vec2-xls-r-300m-japanese", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2021-07-30 10:11:35", - "latest_commit": "2021-09-23 15:46:05", + "score": -0.09561898407592521, + "first_commit": "2022-01-26 15:43:02", + "latest_commit": "2022-03-23 18:34:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForPreTraining" + "model_architectures": "Wav2Vec2ForCTC" }, { - "description": "JP Voice-Text Dataset for", - "url": "https://huggingface.co./datasets/deepghs/fgo_voices_jp", - "project_name": "fgo_voices_jp", + "description": "GitHub リポジトリ cl-tohoku/quiz-datasets で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/aio-passages", + "project_name": "aio-passages", "downloads": 15, "source": "Hugging Face", - "score": -0.09263592831821331, - "first_commit": "2024-08-28 08:56:04", - "latest_commit": "2024-08-28 09:14:22", + "score": -0.09561898407592521, + "first_commit": "2023-06-06 02:03:34", + "latest_commit": "2023-06-24 05:55:37", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "NLLB-200 1.3B fine-tuned on Ascendance of a Bookworm", - "url": "https://huggingface.co./thefrigidliquidation/nllb-200-distilled-1.3B-bookworm", - "project_name": "nllb-200-distilled-1.3B-bookworm", + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-mC4-Wikipedia", + "project_name": "t5-base-japanese-mC4-Wikipedia", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2022-07-27 20:39:08", - "latest_commit": "2024-04-14 18:45:22", + "score": -0.09562870210842421, + "first_commit": "2021-06-30 12:53:09", + "latest_commit": "2021-09-23 18:29:58", "languages": [], "model_or_dataset": "model", - "model_size": 1.37, - "model_architectures": "M2M100ForConditionalGeneration" + "model_size": null, + "model_architectures": null + }, + { + "description": "japanese-gpt2-medium-unidic This is a medium-sized Japanese GPT-2 model using BERT-like tokenizer.", + "url": "https://huggingface.co./okazaki-lab/japanese-gpt2-medium-unidic", + "project_name": "japanese-gpt2-medium-unidic", + "downloads": 14, + "source": "Hugging Face", + "score": -0.09562870210842421, + "first_commit": "2023-02-27 05:42:22", + "latest_commit": "2023-03-22 06:22:32", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.362, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "(English part follows Japanese one.", - "url": "https://huggingface.co./tohoku-nlp/stable-diffusion-xl-jp-base-1.0", - "project_name": "stable-diffusion-xl-jp-base-1.0", + "description": "◆REV-Mix \"レボリューション\"なモデルです。 ", + "url": "https://huggingface.co./Hemlok/REV-Mix", + "project_name": "REV-Mix", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-11-06 05:02:27", - "latest_commit": "2023-11-06 05:37:01", + "score": -0.09562870210842421, + "first_commit": "2023-08-06 17:04:53", + "latest_commit": "2023-08-26 16:19:02", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-3.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-3.0bpw-h6-exl2", + "description": "Heron BLIP Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v0", + "project_name": "heron-chat-blip-ja-stablelm-base-7b-v0", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-12-07 17:52:29", - "latest_commit": "2023-12-07 18:54:23", + "score": -0.09562870210842421, + "first_commit": "2023-09-06 09:31:44", + "latest_commit": "2023-09-07 16:59:14", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_architectures": "VideoBlipForConditionalGeneration" }, { - "description": "This model was created by merging intfloat/e5-mistral-7b-instruct and stabilityai/japanese-stablelm-base-gamma-7b.", - "url": "https://huggingface.co./oshizo/japanese-e5-mistral-7b_slerp", - "project_name": "japanese-e5-mistral-7b_slerp", + "description": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", + "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2024-01-04 12:33:19", - "latest_commit": "2024-01-05 15:48:24", + "score": -0.09562870210842421, + "first_commit": "2023-09-26 06:14:25", + "latest_commit": "2023-09-27 01:23:34", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": "MistralModel" + "model_size": 0.487, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "SpiralAI Spiral-RetNet-3b-base We have conducted pre-training from scratch on the RetNet (https://arxiv.org/abs/2307.08621)", - "url": "https://huggingface.co./Spiral-AI/Spiral-RetNet-3b-base", - "project_name": "Spiral-RetNet-3b-base", + "description": "Model Description", + "url": "https://huggingface.co./knosing/japanese_ner_model", + "project_name": "japanese_ner_model", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2024-04-30 09:33:26", - "latest_commit": "2024-05-01 04:54:26", + "score": -0.09562870210842421, + "first_commit": "2024-05-08 06:15:37", + "latest_commit": "2024-05-08 07:06:22", "languages": [], "model_or_dataset": "model", - "model_size": 2.86, - "model_architectures": "RetNetForCausalLM" + "model_size": 0.111, + "model_architectures": "BertForTokenClassification" }, { - "description": "はじめに GoogleのGemma-2Bを日本語で使えるように継続事前学習を施した、商用利用可能なベースモデルです。 ", - "url": "https://huggingface.co./alfredplpl/suzume-poc", - "project_name": "suzume-poc", + "description": "Oumuamua-7b-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP", + "project_name": "Oumuamua-7b-RP", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2024-03-14 09:51:38", - "latest_commit": "2024-03-17 15:05:20", + "score": -0.09562870210842421, + "first_commit": "2024-06-23 12:30:16", + "latest_commit": "2024-06-23 17:06:53", "languages": [], "model_or_dataset": "model", - "model_size": 2.51, - "model_architectures": "GemmaForCausalLM" + "model_size": 7.33, + "model_architectures": "MistralForCausalLM" }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", + "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", + "url": "https://huggingface.co./masato12/bert-base-japanese-v3-jsts-with-tokenizer", + "project_name": "bert-base-japanese-v3-jsts-with-tokenizer", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-10-28 20:16:15", - "latest_commit": "2023-10-28 15:16:25", + "score": -0.09562870210842421, + "first_commit": "2024-07-21 04:58:46", + "latest_commit": "2024-07-21 18:21:41", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_architectures": "BertForSequenceClassification" }, { - "description": "ku-accms/bert-base-japanese-ssuw Model description This is a pre-trained Japanese BERT base model for super short unit words (SSUW).", - "url": "https://huggingface.co./ku-accms/bert-base-japanese-ssuw", - "project_name": "bert-base-japanese-ssuw", + "description": "Mixtral-8x7B-Instruct-v0.1-japanese Mixtral-8x7B-Instruct-v0.1-japaneseはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese", + "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-04-11 13:57:30", - "latest_commit": "2023-04-12 04:40:42", + "score": -0.09562870210842421, + "first_commit": "2024-04-17 02:56:15", + "latest_commit": "2024-04-20 09:14:27", + "languages": [], + "model_or_dataset": "model", + "model_size": 46.9, + "model_architectures": "MixtralForCausalLM" + }, + { + "description": "deberta-base-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic-luw-upos", + "project_name": "deberta-base-japanese-unidic-luw-upos", + "downloads": 14, + "source": "Hugging Face", + "score": -0.09562870210842421, + "first_commit": "2022-06-08 08:26:25", + "latest_commit": "2024-08-20 20:15:13", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "japanese-gpt2-medium-unidic This is a medium-sized Japanese GPT-2 model using BERT-like tokenizer.", - "url": "https://huggingface.co./okazaki-lab/japanese-gpt2-medium-unidic", - "project_name": "japanese-gpt2-medium-unidic", + "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-generator", + "project_name": "electra-small-paper-japanese-generator", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-02-27 05:42:22", - "latest_commit": "2023-03-22 06:22:32", + "score": -0.09562870210842421, + "first_commit": "2021-10-04 13:47:24", + "latest_commit": "2023-10-21 13:21:31", "languages": [], "model_or_dataset": "model", - "model_size": 0.362, - "model_architectures": "GPT2LMHeadModel" + "model_size": 0.00491, + "model_architectures": "ElectraForMaskedLM" }, { - "description": "bart-base-japanese This model is converted from the original Japanese BART Pretrained model released by Kyoto University.", - "url": "https://huggingface.co./Formzu/bart-base-japanese", - "project_name": "bart-base-japanese", + "description": "Heron GIT Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-git-ja-stablelm-base-7b-v1", + "project_name": "heron-chat-git-ja-stablelm-base-7b-v1", + "downloads": 14, + "source": "Hugging Face", + "score": -0.09562870210842421, + "first_commit": "2024-03-29 09:09:32", + "latest_commit": "2024-05-02 07:55:57", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.32, + "model_architectures": "GitJapaneseStableLMAlphaForCausalLM" + }, + { + "description": "Synthetic-JP-EN-Coding-Dataset-Magpie-69k Magpieの手法を様々なモデルに対して適用し作成した、約69000件の日本語・英語のコーディング対話データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-EN-Coding-Dataset-Magpie-69k", + "project_name": "Synthetic-JP-EN-Coding-Dataset-Magpie-69k", + "downloads": 14, + "source": "Hugging Face", + "score": -0.09562870210842421, + "first_commit": "2024-07-11 10:19:45", + "latest_commit": "2024-07-11 12:07:01", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "ShareGPT-Processed The RyokoAI/ShareGPT52K dataset, converted to Markdown and labeled with the language used.", + "url": "https://huggingface.co./datasets/zetavg/ShareGPT-Processed", + "project_name": "ShareGPT-Processed", + "downloads": 14, + "source": "Hugging Face", + "score": -0.09562870210842421, + "first_commit": "2023-05-16 19:50:04", + "latest_commit": "2023-05-21 03:50:14", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "JBLiMP This is the data from \"JBLiMP: Japanese Benchmark of Linguistic Minimal Pairs\" (Someya and Oseki, 2023).", + "url": "https://huggingface.co./datasets/polm-stability/jblimp", + "project_name": "jblimp", "downloads": 14, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2022-10-31 06:52:38", - "latest_commit": "2022-11-07 11:13:39", + "score": -0.09562870210842421, + "first_commit": "2023-05-29 09:31:31", + "latest_commit": "2023-05-29 18:49:16", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "MBartForConditionalGeneration" + "model_architectures": null }, { - "description": "japanese-soseki-gpt2-1b", - "url": "https://huggingface.co./jweb/japanese-soseki-gpt2-1b", - "project_name": "japanese-soseki-gpt2-1b", - "downloads": 14, + "description": "jpn-heb source group: Japanese target group:", + "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-he", + "project_name": "opus-mt-ja-he", + "downloads": 13, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2022-03-03 04:53:15", - "latest_commit": "2023-03-27 12:09:04", + "score": -0.09563842014092322, + "first_commit": "2020-08-19 00:28:58", + "latest_commit": "2023-08-16 11:59:12", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "MarianMTModel" }, { - "description": "roberta-large-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora", - "project_name": "roberta-large-japanese-aozora", - "downloads": 14, + "description": "名言推論モデル", + "url": "https://huggingface.co./Momerio/meigen_generate_Japanese", + "project_name": "meigen_generate_Japanese", + "downloads": 13, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2021-12-26 13:08:52", - "latest_commit": "2022-10-15 14:22:11", + "score": -0.09563842014092322, + "first_commit": "2021-10-13 15:30:14", + "latest_commit": "2021-10-26 01:19:59", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": "GPT2LMHeadModel" }, { - "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/jawiki-paragraphs", - "project_name": "jawiki-paragraphs", - "downloads": 14, + "description": "Example ESPnet2 TTS model kan-bayashi/jsut_transformer_accent_with_pause ♻", + "url": "https://huggingface.co./espnet/kan-bayashi_jsut_transformer_accent_with_pause", + "project_name": "kan-bayashi_jsut_transformer_accent_with_pause", + "downloads": 13, "source": "Hugging Face", - "score": -0.09264653326874964, - "first_commit": "2023-06-03 03:04:05", - "latest_commit": "2023-06-03 03:04:43", + "score": -0.09563842014092322, + "first_commit": "2021-07-03 14:53:36", + "latest_commit": "2021-07-03 11:04:24", "languages": [], - "model_or_dataset": "dataset", + "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "bert-large-japanese-char-extended Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-char-extended", - "project_name": "bert-large-japanese-char-extended", + "description": "Japanese transformer pipeline (bert-base).", + "url": "https://huggingface.co./hiroshi-matsuda-rit/ja_gsd_bert_wwm_unidic_lite", + "project_name": "ja_gsd_bert_wwm_unidic_lite", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2021-06-04 13:29:34", - "latest_commit": "2024-08-20 17:45:37", + "score": -0.09563842014092322, + "first_commit": "2021-07-08 12:11:06", + "latest_commit": "2021-08-11 20:25:04", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForMaskedLM" + "model_architectures": null }, { - "description": "Unihan LM: Coarse-to-Fine Chinese-Japanese Language Model Pretraining with the Unihan Database Model description Chinese and Japanese share many characters with similar surface morphology.", - "url": "https://huggingface.co./microsoft/unihanlm-base", - "project_name": "unihanlm-base", + "description": "deberta-base-japanese-wikipedia-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-luw-upos", + "project_name": "deberta-base-japanese-wikipedia-luw-upos", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2020-09-27 11:23:02", - "latest_commit": "2021-09-22 11:00:56", + "score": -0.09563842014092322, + "first_commit": "2022-06-25 06:28:11", + "latest_commit": "2024-08-20 17:53:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "XLMModel" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "wav2vec2-live-japanese https://github.com/ttop32/wav2vec2-live-japanese-translatorFine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese hiragana using the common_voice JSUT CSS10", - "url": "https://huggingface.co./ttop324/wav2vec2-live-japanese", - "project_name": "wav2vec2-live-japanese", + "description": "roberta-base-japanese-jsnli This model is a fine-tuned version of nlp-waseda/roberta-base-japanese on the JSNLI dataset.", + "url": "https://huggingface.co./Formzu/roberta-base-japanese-jsnli", + "project_name": "roberta-base-japanese-jsnli", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2021-10-26 14:51:21", - "latest_commit": "2021-10-31 15:34:55", + "score": -0.09563842014092322, + "first_commit": "2022-10-14 07:50:47", + "latest_commit": "2022-10-19 11:08:59", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "Wav2Vec2ForCTC" + "model_architectures": "RobertaForSequenceClassification" }, { - "description": "nlp-waseda/roberta-large-japanese-with-auto-jumanpp Model description", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-with-auto-jumanpp", - "project_name": "roberta-large-japanese-with-auto-jumanpp", + "description": "このモデルはdeberta-v2-large-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-large-japanese-finetuned-ner", + "project_name": "deberta-v2-large-japanese-finetuned-ner", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2022-10-15 05:40:40", - "latest_commit": "2022-10-21 15:55:27", + "score": -0.09563842014092322, + "first_commit": "2023-05-10 13:22:23", + "latest_commit": "2023-07-21 14:10:02", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_size": 0.339, + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Model Card for Model ID", - "url": "https://huggingface.co./kkuramitsu/mt5-mini9L", - "project_name": "mt5-mini9L", + "description": "llm-jp-13b-instruct-lora-jaster-v1.0", + "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-v1.0", + "project_name": "llm-jp-13b-instruct-lora-jaster-v1.0", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2023-03-30 01:11:17", - "latest_commit": "2023-10-15 10:56:23", + "score": -0.09563842014092322, + "first_commit": "2023-10-18 18:53:58", + "latest_commit": "2023-10-20 08:41:20", "languages": [], "model_or_dataset": "model", - "model_size": 0.08040000000000001, - "model_architectures": "MT5ForConditionalGeneration" + "model_size": null, + "model_architectures": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GPTQ", - "project_name": "japanese-stablelm-base-beta-70B-GPTQ", + "description": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", + "project_name": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2023-11-06 11:33:47", - "latest_commit": "2023-11-06 16:00:08", + "score": -0.09563842014092322, + "first_commit": "2024-03-03 12:51:40", + "latest_commit": "2024-03-03 13:39:01", "languages": [], "model_or_dataset": "model", - "model_size": 9.1, - "model_architectures": "LlamaForCausalLM" + "model_size": 21.5, + "model_architectures": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-5.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-5.0bpw-h6-exl2", + "description": "mathstral-7B-v0.1-gguf mistralaiさんが公開しているmathstral-7B-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/mathstral-7B-v0.1-gguf", + "project_name": "mathstral-7B-v0.1-gguf", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2023-12-07 18:07:21", - "latest_commit": "2023-12-07 18:54:27", + "score": -0.09563842014092322, + "first_commit": "2024-07-17 17:49:56", + "latest_commit": "2024-07-17 18:54:27", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_size": 7.25, + "model_architectures": null }, { - "description": "Model Card for Model ID Fine tunned ASR model from distil-whisper/distil-large-v2.", - "url": "https://huggingface.co./spow12/Visual-novel-transcriptor", - "project_name": "Visual-novel-transcriptor", + "description": "Model Card for Model ID MMedBench and KoreanMedMCQA Instruction Fine-Tuned Multilingual Llama3 8B 4Bit quantized model using QLoRA.", + "url": "https://huggingface.co./SpassMedAI/MLMedLlama3", + "project_name": "MLMedLlama3", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2024-04-15 01:43:08", - "latest_commit": "2024-08-12 12:39:52", + "score": -0.09563842014092322, + "first_commit": "2024-08-20 17:08:05", + "latest_commit": "2024-08-28 15:39:15", "languages": [], "model_or_dataset": "model", - "model_size": 0.756, - "model_architectures": "WhisperForConditionalGeneration" + "model_size": 4.65, + "model_architectures": "LlamaForCausalLM" }, { "description": "gpt2-medium-japanese-ud-causal Model Description", @@ -13146,7 +13466,7 @@ "project_name": "gpt2-medium-japanese-ud-causal", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, + "score": -0.09563842014092322, "first_commit": "2024-08-22 23:01:26", "latest_commit": "2024-08-25 17:55:41", "languages": [], @@ -13155,410 +13475,452 @@ "model_architectures": "GPT2ForTokenClassification" }, { - "description": "gpt2-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-large-japanese-upos", - "project_name": "gpt2-large-japanese-upos", + "description": "rinna-gpt-neox-small-japanese-ud-causal Model Description", + "url": "https://huggingface.co./KoichiYasuoka/rinna-gpt-neox-small-japanese-ud-causal", + "project_name": "rinna-gpt-neox-small-japanese-ud-causal", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2024-06-22 22:44:06", - "latest_commit": "2024-07-27 07:49:47", + "score": -0.09563842014092322, + "first_commit": "2024-09-08 01:53:03", + "latest_commit": "2024-09-12 22:30:27", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2ForTokenClassification" + "model_architectures": "GPTNeoXForTokenClassification" }, { - "description": "Assistance のGGUF版 Our Models for GGUF Vecteus-GGUF Ninja-v1-GGUF Ninja-v1-NSFW-GGUF Ninja-v1-128k-GGUF Ninja-v1-NSFW-128k-GGUF", - "url": "https://huggingface.co./Local-Novel-LLM-project/Assistance-GGUF", - "project_name": "Assistance-GGUF", + "description": "INPUT: Japanese name in ROMAJI FORM OUTPUT:", + "url": "https://huggingface.co./tarudesu/gendec-with-distilmbert", + "project_name": "gendec-with-distilmbert", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2024-05-03 04:03:47", - "latest_commit": "2024-05-03 04:30:45", + "score": -0.09563842014092322, + "first_commit": "2023-11-14 02:12:39", + "latest_commit": "2024-03-23 16:49:33", "languages": [], "model_or_dataset": "model", - "model_size": 7.24, - "model_architectures": null + "model_size": null, + "model_architectures": "DistilBertForSequenceClassification" }, { - "description": "DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", - "url": "https://huggingface.co./DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", - "project_name": "alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GPTQ", + "project_name": "japanese-stablelm-instruct-beta-70B-GPTQ", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2024-04-20 08:49:14", - "latest_commit": "2024-04-20 08:49:19", + "score": -0.09563842014092322, + "first_commit": "2023-11-02 15:45:24", + "latest_commit": "2023-11-02 20:04:07", "languages": [], "model_or_dataset": "model", - "model_size": 1.39, - "model_architectures": null + "model_size": 9.1, + "model_architectures": "LlamaForCausalLM" }, { - "description": "deberta-base-japanese-wikipedia-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia-luw-upos", - "project_name": "deberta-base-japanese-wikipedia-luw-upos", + "description": "Aerner LM-v1 事前学習から全部日本語で学習させたモデルです。 ", + "url": "https://huggingface.co./aerner/lm-v1", + "project_name": "lm-v1", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2022-06-25 06:28:11", - "latest_commit": "2024-08-20 17:53:34", + "score": -0.09563842014092322, + "first_commit": "2023-05-25 12:35:32", + "latest_commit": "2023-05-25 13:35:34", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "LlamaForCausalLM" }, { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-mC4-Wikipedia", - "project_name": "t5-base-japanese-mC4-Wikipedia", + "description": "A Japanese dataset generated with an opensource elyza/ELYZA-japanese-Llama-2-13b-instruct model.", + "url": "https://huggingface.co./datasets/iam-ajaymeena/Self-Instruct-Japanese-Elzya-13B", + "project_name": "Self-Instruct-Japanese-Elzya-13B", "downloads": 13, "source": "Hugging Face", - "score": -0.09265713821928598, - "first_commit": "2021-06-30 12:53:09", - "latest_commit": "2021-09-23 18:29:58", + "score": -0.09563842014092322, + "first_commit": "2024-06-15 08:11:55", + "latest_commit": "2024-06-16 09:12:56", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "RakutenAI-7B-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/RakutenAI-7B-upos", - "project_name": "RakutenAI-7B-upos", - "downloads": 12, + "description": "⚠", + "url": "https://huggingface.co./datasets/hotchpotch/jaqket_v1_qa_wikija_context", + "project_name": "jaqket_v1_qa_wikija_context", + "downloads": 13, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-07-11 06:42:57", - "latest_commit": "2024-08-20 17:20:57", + "score": -0.09563842014092322, + "first_commit": "2023-12-07 08:36:30", + "latest_commit": "2024-02-25 06:14:45", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "MistralForTokenClassification" + "model_architectures": null }, { - "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-1b on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", - "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-1b-japanese-hiragana-katakana", - "project_name": "wav2vec2-xls-r-1b-japanese-hiragana-katakana", + "description": "alpaca_jp_python alpaca_jp_pythonは、 Stanford Alpacaの手法 mistralai/Mixtral-8x22B-Instruct-v0.1 で作った合成データ(Synthetic data)です。", + "url": "https://huggingface.co./datasets/HachiML/alpaca_jp_python", + "project_name": "alpaca_jp_python", + "downloads": 13, + "source": "Hugging Face", + "score": -0.09563842014092322, + "first_commit": "2024-05-16 02:02:09", + "latest_commit": "2024-05-20 01:44:32", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null + }, + { + "description": "bert-base-irony", + "url": "https://huggingface.co./kit-nlp/bert-base-japanese-basic-char-v2-irony", + "project_name": "bert-base-japanese-basic-char-v2-irony", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2022-02-04 11:27:09", - "latest_commit": "2022-03-24 11:56:32", + "score": -0.09564813817342221, + "first_commit": "2022-11-07 07:33:23", + "latest_commit": "2022-11-08 00:10:26", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "Wav2Vec2ForCTC" + "model_architectures": "BertForSequenceClassification" }, { - "description": "Example ESPnet2 TTS model kan-bayashi/jsut_transformer_accent_with_pause ♻", - "url": "https://huggingface.co./espnet/kan-bayashi_jsut_transformer_accent_with_pause", - "project_name": "kan-bayashi_jsut_transformer_accent_with_pause", + "description": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", + "project_name": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2021-07-03 14:53:36", - "latest_commit": "2021-07-03 11:04:24", + "score": -0.09564813817342221, + "first_commit": "2024-03-07 13:21:38", + "latest_commit": "2024-03-07 13:47:58", "languages": [], "model_or_dataset": "model", - "model_size": null, + "model_size": 11.2, "model_architectures": null }, { - "description": "ELECTRA small Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-discriminator", - "project_name": "electra-small-japanese-discriminator", + "description": "XML-RoBERTa-NER-Japanese This model is a fine-tuned version of xlm-roberta-base on the Wikipedia Japanese NER dataset from Stockmark Inc.", + "url": "https://huggingface.co./ithattieu/XML-RoBERTa-NER-Japanese", + "project_name": "XML-RoBERTa-NER-Japanese", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2021-10-04 13:42:57", - "latest_commit": "2022-12-09 00:41:39", + "score": -0.09564813817342221, + "first_commit": "2024-08-17 08:18:04", + "latest_commit": "2024-08-18 04:03:33", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "ElectraForPreTraining" + "model_size": 0.277, + "model_architectures": "RobertaForTokenClassification" }, { - "description": "nlp-waseda/gpt2-small-japanese-wikipedia This model is Japanese GPT-2 pretrained on Japanese Wikipedia.", - "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese-wikipedia", - "project_name": "gpt2-small-japanese-wikipedia", + "description": "rinna-gpt2-medium-japanese-ud-causal Model Description", + "url": "https://huggingface.co./KoichiYasuoka/rinna-gpt2-medium-japanese-ud-causal", + "project_name": "rinna-gpt2-medium-japanese-ud-causal", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2021-12-28 01:22:40", - "latest_commit": "2021-12-28 15:31:38", + "score": -0.09564813817342221, + "first_commit": "2024-09-07 07:54:18", + "latest_commit": "2024-09-12 22:28:53", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "GPT2LMHeadModel" + "model_architectures": "GPT2ForTokenClassification" }, { - "description": "このモデルはdeberta-v2-tiny-japaneseをファインチューニングしてQAタスクに用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-tiny-japanese-finetuned-QA", - "project_name": "deberta-v2-tiny-japanese-finetuned-QA", + "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", + "url": "https://huggingface.co./AbeShinzo0708/Japanese-Starling-ChatV-7B-exl2", + "project_name": "Japanese-Starling-ChatV-7B-exl2", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-05-11 10:34:38", - "latest_commit": "2023-05-11 10:38:32", + "score": -0.09564813817342221, + "first_commit": "2024-04-22 09:34:13", + "latest_commit": "2024-04-22 09:39:09", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForQuestionAnswering" + "model_architectures": "MistralForCausalLM" }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-8.0bpw-h8-exl2", - "project_name": "shisa-7b-v1-8.0bpw-h8-exl2", + "description": "ebisuke/liz-nojaloli-ja License MIT Licenseベースとしてrinna/japanese-gpt-neox-3.6bを使用しています。 ", + "url": "https://huggingface.co./ebisuke/liz-nojaloli-ja", + "project_name": "liz-nojaloli-ja", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-12-07 18:22:23", - "latest_commit": "2023-12-07 18:54:33", + "score": -0.09564813817342221, + "first_commit": "2023-05-23 16:59:22", + "latest_commit": "2023-05-30 16:01:20", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_architectures": "GPTNeoXForCausalLM" }, { - "description": "Overview This model is based on rinna's [rinna/llama-3-youko-8b], fine-tuned using LoRA on a small number of parallel sentences from English to Japanese.", - "url": "https://huggingface.co./lyu-boxuan/llama-3-youko-8b-En-Ja-MT-LoRA", - "project_name": "llama-3-youko-8b-En-Ja-MT-LoRA", + "description": "Japanese DialoGPT trained with Aozora (ja) 青空文庫のセリフで学習した日本語のDialoGPT Smallです(en) Japanese DialoGPT Small trained on Aozora Bunko.", + "url": "https://huggingface.co./akiFQC/japanese-dialogpt-small-aozora", + "project_name": "japanese-dialogpt-small-aozora", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-05-10 14:33:57", - "latest_commit": "2024-05-21 14:54:46", + "score": -0.09564813817342221, + "first_commit": "2023-02-08 13:22:24", + "latest_commit": "2023-02-09 00:55:31", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": null, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", - "project_name": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", + "description": "nlp-waseda/roberta-large-japanese-with-auto-jumanpp Model description", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-with-auto-jumanpp", + "project_name": "roberta-large-japanese-with-auto-jumanpp", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-03-03 12:51:40", - "latest_commit": "2024-03-03 13:39:01", + "score": -0.09564813817342221, + "first_commit": "2022-10-15 05:40:40", + "latest_commit": "2022-10-21 15:55:27", "languages": [], "model_or_dataset": "model", - "model_size": 21.5, - "model_architectures": null + "model_size": null, + "model_architectures": "RobertaForMaskedLM" }, { - "description": "COMET-GPT2 ja v2 Finetuned GPT-2 xl on the large version of ATOMIC ja using a causal language modeling (CLM) objective.", - "url": "https://huggingface.co./nlp-waseda/comet-gpt2-xl-japanese", - "project_name": "comet-gpt2-xl-japanese", + "description": "GPT2 Japanese base model version 2 Prerequisites transformers==4.19.2 Model architecture This model uses GPT2 base setttings except vocabulary size.", + "url": "https://huggingface.co./ClassCat/gpt2-base-japanese-v2", + "project_name": "gpt2-base-japanese-v2", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-09-26 13:37:52", - "latest_commit": "2024-03-11 04:16:02", + "score": -0.09564813817342221, + "first_commit": "2022-06-04 02:30:34", + "latest_commit": "2022-06-25 15:36:22", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": "GPT2LMHeadModel" }, { - "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./G-Root/deberta-v2-base-japanese", - "project_name": "deberta-v2-base-japanese", + "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", + "url": "https://huggingface.co./yohida/yoshida_gpt", + "project_name": "yoshida_gpt", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-09-14 10:06:00", - "latest_commit": "2023-09-14 17:24:52", + "score": -0.09564813817342221, + "first_commit": "2022-02-04 10:03:54", + "latest_commit": "2022-02-04 10:13:45", "languages": [], "model_or_dataset": "model", - "model_size": 0.137, - "model_architectures": "DebertaV2ForMaskedLM" + "model_size": null, + "model_architectures": "GPT2LMHeadModel" }, { - "description": "このモデルはdeberta-v2-large-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-large-japanese-finetuned-ner", - "project_name": "deberta-v2-large-japanese-finetuned-ner", + "description": "This is a Japanese sentence-T5 model.", + "url": "https://huggingface.co./sonoisa/sentence-t5-base-ja-mean-tokens", + "project_name": "sentence-t5-base-ja-mean-tokens", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-05-10 13:22:23", - "latest_commit": "2023-07-21 14:10:02", + "score": -0.09564813817342221, + "first_commit": "2021-12-27 11:57:10", + "latest_commit": "2022-07-31 07:54:13", "languages": [], "model_or_dataset": "model", - "model_size": 0.339, - "model_architectures": "DebertaV2ForTokenClassification" + "model_size": null, + "model_architectures": "T5Model" }, { - "description": "deberta-small-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-luw-upos", - "project_name": "deberta-small-japanese-luw-upos", + "description": "doc2query/msmarco-japanese-mt5-base-v1 This is a doc2query model based on mT5 (also known as docT5query).", + "url": "https://huggingface.co./doc2query/msmarco-japanese-mt5-base-v1", + "project_name": "msmarco-japanese-mt5-base-v1", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2022-05-24 03:52:45", - "latest_commit": "2024-08-20 17:28:44", + "score": -0.09564813817342221, + "first_commit": "2022-04-29 12:05:21", + "latest_commit": "2022-04-29 14:05:37", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "DebertaV2ForTokenClassification" + "model_architectures": "MT5ForConditionalGeneration" }, { - "description": "roberta-small-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-aozora", - "project_name": "roberta-small-japanese-aozora", + "description": "Danbooru2023:", + "url": "https://huggingface.co./datasets/nyanko7/danbooru2023", + "project_name": "danbooru2023", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2021-11-02 12:54:50", - "latest_commit": "2021-11-03 23:44:50", + "score": -0.09564813817342221, + "first_commit": "2024-01-07 19:51:58", + "latest_commit": "2024-05-22 18:43:24", "languages": [], - "model_or_dataset": "model", + "model_or_dataset": "dataset", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": null }, { - "description": "Amenokaku-Code-Instruct Update: 2023/12/27データセットに JaxTon , プロになるJava のコードデータ 180 レコードを追加しました。 ", - "url": "https://huggingface.co./datasets/kunishou/amenokaku-code-instruct", - "project_name": "amenokaku-code-instruct", + "description": "Yandere2023:", + "url": "https://huggingface.co./datasets/nyanko7/yandere2023", + "project_name": "yandere2023", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2023-10-01 01:04:50", - "latest_commit": "2024-04-01 17:01:54", + "score": -0.09564813817342221, + "first_commit": "2024-01-07 10:31:53", + "latest_commit": "2024-05-06 08:22:23", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Introduction This is a LLM-filtered set of the first 1M rows from ntt's JParaCrawl v3 large English-Japanese parallel corpus.", - "url": "https://huggingface.co./datasets/Verah/JParaCrawl-Filtered-English-Japanese-Parallel-Corpus", - "project_name": "JParaCrawl-Filtered-English-Japanese-Parallel-Corpus", + "description": "Kendamarron/jimba-wiki-instruction-calm3 grapevine-AI/CALM3-22B-Chat-GGUFのQ4_K_Mを使った合成instructionデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/jimba-wiki-instruction-calm3", + "project_name": "jimba-wiki-instruction-calm3", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-03-01 06:17:09", - "latest_commit": "2024-03-07 21:20:21", + "score": -0.09564813817342221, + "first_commit": "2024-07-09 22:18:35", + "latest_commit": "2024-07-20 12:57:05", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "Synthetic-JP-EN-Coding-Dataset-Magpie-69k Magpieの手法を様々なモデルに対して適用し作成した、約69000件の日本語・英語のコーディング対話データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-EN-Coding-Dataset-Magpie-69k", - "project_name": "Synthetic-JP-EN-Coding-Dataset-Magpie-69k", + "description": "A slightly modified version of the parsing and chunking method for singletongue/wikipedia-utils.", + "url": "https://huggingface.co./datasets/oshizo/japanese-wikipedia-paragraphs", + "project_name": "japanese-wikipedia-paragraphs", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-07-11 10:19:45", - "latest_commit": "2024-07-11 12:07:01", + "score": -0.09564813817342221, + "first_commit": "2023-12-09 11:14:53", + "latest_commit": "2023-12-09 14:09:30", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "English - Japanese pairs taken from https://tatoeba.org/en/downloads and then deduplicated.", - "url": "https://huggingface.co./datasets/Verah/tatoeba_dedupe_en-jp_2024-March-01", - "project_name": "tatoeba_dedupe_en-jp_2024-March-01", + "description": "Allganize RAG Leaderboard とは Allganize RAG Leaderboard は、5つの業種ドメイン(金融、情報通信、製造、公共、流通・小売)において、日本語のRAGの性能評価を実施したものです。", + "url": "https://huggingface.co./datasets/allganize/RAG-Evaluation-Dataset-JA", + "project_name": "RAG-Evaluation-Dataset-JA", "downloads": 12, "source": "Hugging Face", - "score": -0.0926677431698223, - "first_commit": "2024-03-05 13:46:13", - "latest_commit": "2024-03-06 08:34:02", + "score": -0.09564813817342221, + "first_commit": "2024-09-03 09:00:27", + "latest_commit": "2024-09-13 00:53:44", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit The Model mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit was converted to MLX format from cyberagent/Llama-3.1-70B-Japanese-Instruct-2407 using mlx-lm version 0.16.1.", - "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-8bit", + "description": "「LLM-jp-3 172B beta1」利用規約 この利用規約(以下「本規約」といいます)は、大学共同利用機関法人 情報・システム研究機構 国立情報学研究所(以下「提供者」といいます)による開発の成果物として公開する大規模言語モデル「LLM-jp-3 172B beta1」(以下「本プログラム」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./llm-jp/llm-jp-3-172b-beta1", + "project_name": "llm-jp-3-172b-beta1", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-07-26 13:05:01", - "latest_commit": "2024-07-26 14:05:31", + "score": -0.09565785620592122, + "first_commit": null, + "latest_commit": null, "languages": [], "model_or_dataset": "model", - "model_size": 19.8, - "model_architectures": "LlamaForCausalLM" + "model_size": 172.0, + "model_architectures": null }, { - "description": "bert-base-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-unidic-luw-upos", - "project_name": "bert-base-japanese-unidic-luw-upos", + "description": "BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./hiroshi-matsuda-rit/bert-base-japanese-basic-char-v2", + "project_name": "bert-base-japanese-basic-char-v2", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2022-02-13 01:00:01", - "latest_commit": "2023-11-05 18:44:10", + "score": -0.09565785620592122, + "first_commit": "2021-08-04 11:01:49", + "latest_commit": "2021-09-23 16:49:50", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_architectures": "BertForMaskedLM" }, { - "description": "bert-base-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-luw-upos", - "project_name": "bert-base-japanese-luw-upos", + "description": "Unihan LM: Coarse-to-Fine Chinese-Japanese Language Model Pretraining with the Unihan Database Model description Chinese and Japanese share many characters with similar surface morphology.", + "url": "https://huggingface.co./microsoft/unihanlm-base", + "project_name": "unihanlm-base", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2021-10-26 13:26:38", - "latest_commit": "2022-09-18 19:43:18", + "score": -0.09565785620592122, + "first_commit": "2020-09-27 11:23:02", + "latest_commit": "2021-09-22 11:00:56", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "BertForTokenClassification" + "model_architectures": "XLMModel" }, { - "description": "transformers-ud-japanese-electra-ginza (sudachitra-wordpiece, mC4 Japanese)", - "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza", - "project_name": "transformers-ud-japanese-electra-base-ginza", + "description": "deberta-large-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-luw-upos", + "project_name": "deberta-large-japanese-luw-upos", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2021-08-23 09:54:23", - "latest_commit": "2021-09-22 11:00:17", + "score": -0.09565785620592122, + "first_commit": "2022-05-26 14:52:32", + "latest_commit": "2023-01-14 23:15:30", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "ElectraForPreTraining" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "nlp-waseda/roberta-large-japanese-seq512-with-auto-jumanpp Model description", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512-with-auto-jumanpp", - "project_name": "roberta-large-japanese-seq512-with-auto-jumanpp", + "description": "deberta-small-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-luw-upos", + "project_name": "deberta-small-japanese-luw-upos", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2022-10-15 06:04:06", - "latest_commit": "2022-10-21 15:56:38", + "score": -0.09565785620592122, + "first_commit": "2022-05-24 03:52:45", + "latest_commit": "2024-08-20 17:28:44", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "RobertaForMaskedLM" + "model_architectures": "DebertaV2ForTokenClassification" }, { - "description": "Aerner LM-v1 事前学習から全部日本語で学習させたモデルです。 ", - "url": "https://huggingface.co./aerner/lm-v1", - "project_name": "lm-v1", + "description": "ESを書くAI Japanese GPT-2 modelをファインチューニングしました ファインチューニングには、内定者の二万件以上のESを用いました。 ", + "url": "https://huggingface.co./huranokuma/es", + "project_name": "es", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-05-25 12:35:32", - "latest_commit": "2023-05-25 13:35:34", + "score": -0.09565785620592122, + "first_commit": "2022-08-01 14:59:47", + "latest_commit": "2022-08-14 05:47:18", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "GPT2LMHeadModel" + }, + { + "description": "Wav2Vec2-XLS-R-300M-Japanese-Hiragana Fine-tuned facebook/wav2vec2-xls-r-300m on Japanese Hiragana characters using the Common Voice and JSUT.", + "url": "https://huggingface.co./slplab/wav2vec2-xls-r-300m-japanese-hiragana", + "project_name": "wav2vec2-xls-r-300m-japanese-hiragana", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2022-09-16 07:34:58", + "latest_commit": "2022-09-16 11:01:54", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "Wav2Vec2ForCTC" + }, + { + "description": "このモデルはdeberta-v2-tiny-japaneseをファインチューニングしてQAタスクに用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-tiny-japanese-finetuned-QA", + "project_name": "deberta-v2-tiny-japanese-finetuned-QA", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2023-05-11 10:34:38", + "latest_commit": "2023-05-11 10:38:32", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "DebertaV2ForQuestionAnswering" }, { "description": "whisper-large-v2-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-jp to the CTranslate2 model format.", @@ -13566,7 +13928,7 @@ "project_name": "faster-whisper-large-v2-jp", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, + "score": -0.09565785620592122, "first_commit": "2023-07-07 06:16:06", "latest_commit": "2023-07-07 18:09:09", "languages": [], @@ -13575,184 +13937,228 @@ "model_architectures": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-6.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-6.0bpw-h6-exl2", + "description": "Heron GIT Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-git-ja-stablelm-base-7b-v0", + "project_name": "heron-chat-git-ja-stablelm-base-7b-v0", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-12-07 18:14:46", - "latest_commit": "2023-12-07 18:58:23", + "score": -0.09565785620592122, + "first_commit": "2023-09-06 09:19:59", + "latest_commit": "2023-09-11 16:55:23", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_architectures": "GitJapaneseStableLMAlphaForCausalLM" }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-4.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-4.0bpw-h6-exl2", + "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./G-Root/deberta-v2-base-japanese", + "project_name": "deberta-v2-base-japanese", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-12-07 17:59:51", - "latest_commit": "2023-12-07 18:54:26", + "score": -0.09565785620592122, + "first_commit": "2023-09-14 10:06:00", + "latest_commit": "2023-09-14 17:24:52", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_size": 0.137, + "model_architectures": "DebertaV2ForMaskedLM" }, { - "description": "This repository contains a model trained (QLoRA-SFT)", - "url": "https://huggingface.co./taoki/phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", - "project_name": "phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", + "description": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", + "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-05-29 15:11:10", - "latest_commit": "2024-05-31 11:28:45", + "score": -0.09565785620592122, + "first_commit": "2023-09-26 06:15:51", + "latest_commit": "2023-09-27 23:56:05", "languages": [], "model_or_dataset": "model", - "model_size": 3.82, + "model_size": 0.861, + "model_architectures": "GPTNeoXForCausalLM" + }, + { + "description": "DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "url": "https://huggingface.co./DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "project_name": "alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2024-04-20 08:49:14", + "latest_commit": "2024-04-20 08:49:19", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.39, "model_architectures": null }, { - "description": "Llama-3-Umievo-itr014-Shizuko-8b このモデルは日本語��対応しているLlama-3ベースの4つのモデルを進化的アルゴリズムで進化的マージしたものです。", - "url": "https://huggingface.co./umiyuki/Llama-3-Umievo-itr014-Shizuko-8b", - "project_name": "Llama-3-Umievo-itr014-Shizuko-8b", + "description": "Ruri-Reranker: Japanese General Reranker Usage Direct Usage (Sentence Transformers)", + "url": "https://huggingface.co./cl-nagoya/ruri-reranker-stage1-base", + "project_name": "ruri-reranker-stage1-base", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-06-08 05:25:05", - "latest_commit": "2024-06-08 07:47:59", + "score": -0.09565785620592122, + "first_commit": "2024-08-19 16:14:12", + "latest_commit": "2024-09-04 08:52:18", "languages": [], "model_or_dataset": "model", - "model_size": 8.03, - "model_architectures": "LlamaForCausalLM" + "model_size": 0.111, + "model_architectures": "BertForSequenceClassification" }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", - "project_name": "SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", + "description": "HPLT Bert for Japanese This is one of the encoder-only monolingual language models trained as a first release by the HPLT project.", + "url": "https://huggingface.co./HPLT/hplt_bert_base_ja", + "project_name": "hplt_bert_base_ja", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-03-07 06:50:19", - "latest_commit": "2024-03-07 06:51:42", + "score": -0.09565785620592122, + "first_commit": "2024-04-22 01:23:46", + "latest_commit": "2024-07-11 11:36:10", "languages": [], "model_or_dataset": "model", "model_size": null, - "model_architectures": "LlamaForCausalLM" + "model_architectures": "LtgbertForMaskedLM" }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", + "description": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha Mixtral-8x7B-Instruct-v0.1-japanese-alphaはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施した学習途中のモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese-alpha", + "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-10-28 20:43:59", - "latest_commit": "2023-10-28 15:44:20", + "score": -0.09565785620592122, + "first_commit": "2024-04-16 07:52:55", + "latest_commit": "2024-04-20 09:14:43", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_size": 46.9, + "model_architectures": "MixtralForCausalLM" }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", + "description": "はじめに GoogleのGemma-2Bを日本語で使えるように継続事前学習を施した、商用利用可能なベースモデルです。 ", + "url": "https://huggingface.co./alfredplpl/suzume-poc", + "project_name": "suzume-poc", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-10-28 20:29:59", - "latest_commit": "2023-10-28 15:30:13", + "score": -0.09565785620592122, + "first_commit": "2024-03-14 09:51:38", + "latest_commit": "2024-03-17 15:05:20", "languages": [], "model_or_dataset": "model", - "model_size": null, - "model_architectures": "MistralForCausalLM" + "model_size": 2.51, + "model_architectures": "GemmaForCausalLM" }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GPTQ", - "project_name": "japanese-stablelm-instruct-gamma-7B-GPTQ", + "description": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1 English description here 概要 Llama-2ベースの学習済み日本語モデルであるelyza/ELYZA-japanese-Llama-2-13bと、そのinstruction tuningモデルであるelyza/ELYZA-japanese-Llama-2-13b-instruct を、mergekitを使ってMoEを行い作成したモデルです。 ", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1", + "project_name": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2023-10-28 19:03:17", - "latest_commit": "2023-10-28 20:24:40", + "score": -0.09565785620592122, + "first_commit": "2024-03-03 09:25:37", + "latest_commit": "2024-03-19 02:34:53", "languages": [], "model_or_dataset": "model", - "model_size": 1.2, - "model_architectures": "MistralForCausalLM" + "model_size": 21.5, + "model_architectures": "MixtralForCausalLM" }, { - "description": "Example ESPnet2 TTS model kan-bayashi/jsut_fastspeech ♻", - "url": "https://huggingface.co./espnet/kan-bayashi_jsut_fastspeech", - "project_name": "kan-bayashi_jsut_fastspeech", + "description": "This model is a voice clone of myself created specifically for Style Bert VITS2.", + "url": "https://huggingface.co./ThePioneer/MyVoiceClone-Style-Bert-VITS2", + "project_name": "MyVoiceClone-Style-Bert-VITS2", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2021-07-03 14:44:06", - "latest_commit": "2021-07-03 10:44:10", + "score": -0.09565785620592122, + "first_commit": "2024-02-29 19:34:12", + "latest_commit": "2024-03-04 10:43:27", "languages": [], "model_or_dataset": "model", "model_size": null, "model_architectures": null }, { - "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したモデルです。 ", - "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-v0.1", - "project_name": "Tokara-0.5B-v0.1", + "description": "This model was created by merging intfloat/e5-mistral-7b-instruct and stabilityai/japanese-stablelm-base-gamma-7b.", + "url": "https://huggingface.co./oshizo/japanese-e5-mistral-7b_slerp", + "project_name": "japanese-e5-mistral-7b_slerp", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-05-06 11:39:26", - "latest_commit": "2024-05-08 12:44:05", + "score": -0.09565785620592122, + "first_commit": "2024-01-04 12:33:19", + "latest_commit": "2024-01-05 15:48:24", "languages": [], "model_or_dataset": "model", - "model_size": 0.464, - "model_architectures": "Qwen2ForCausalLM" + "model_size": 7.24, + "model_architectures": "MistralModel" }, { - "description": "A Japanese dataset generated with an opensource elyza/ELYZA-japanese-Llama-2-13b-instruct model.", - "url": "https://huggingface.co./datasets/iam-ajaymeena/Self-Instruct-Japanese-Elzya-13B", - "project_name": "Self-Instruct-Japanese-Elzya-13B", + "description": "MPT-7B-inst このモデルは、MosaicMLのllm-foundryリポジトリを使用してmosaicml/mpt-7b-instructをファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./Jumtra/mpt-7b-inst", + "project_name": "mpt-7b-inst", "downloads": 11, "source": "Hugging Face", - "score": -0.09267834812035865, - "first_commit": "2024-06-15 08:11:55", - "latest_commit": "2024-06-16 09:12:56", + "score": -0.09565785620592122, + "first_commit": "2023-05-24 14:22:33", + "latest_commit": "2023-06-26 01:09:06", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "MPTForCausalLM" + }, + { + "description": "モデル概要 このモデルは、 Twitter/twhin-bert-large をSNS上のコメントに人手で攻撃性評価を行ったデータセットでFine-tuningすることで作成しました", + "url": "https://huggingface.co./TomokiFujihara/twhin-bert-large-japanese-offensiveness-estimation", + "project_name": "twhin-bert-large-japanese-offensiveness-estimation", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2024-03-24 10:28:39", + "latest_commit": "2024-03-24 16:46:53", + "languages": [], + "model_or_dataset": "model", + "model_size": null, + "model_architectures": "OffensivenessEstimationModel" + }, + { + "description": "Overview This dataset is of conversations extracted from Aozora Bunko (青空文庫), which collects public-domain books in Japan, using a simple heuristic approach.", + "url": "https://huggingface.co./datasets/globis-university/aozorabunko-chats", + "project_name": "aozorabunko-chats", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2023-08-04 00:11:23", + "latest_commit": "2023-10-27 13:26:00", "languages": [], "model_or_dataset": "dataset", "model_size": null, "model_architectures": null }, { - "description": "External dictionary importer for Yomichan.", - "url": "https://github.com/FooSoft/yomichan-import", - "project_name": "yomichan-import", - "stargazers_count": 82, - "source": "GitHub", - "score": -0.0936077613058479, - "first_commit": "2016-07-26 20:24:33", - "latest_commit": "2023-02-25 12:43:03", - "languages": [ - "Go" - ], - "model_or_dataset": null + "description": "For the English version, please click here. ", + "url": "https://huggingface.co./datasets/sakusakumura/databricks-dolly-15k-ja-scored", + "project_name": "databricks-dolly-15k-ja-scored", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2023-06-27 09:14:41", + "latest_commit": "2023-06-27 09:18:39", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { - "description": "日英変換・英語略語展開のための IME 追加辞書 orange_book 日本語から英語への和英変換や英語略語の展開を Google 日本語入力や ATOK などで可能にする IME 拡張辞書", - "url": "https://github.com/peaceiris/google-ime-dictionary", - "project_name": "google-ime-dictionary", - "stargazers_count": 82, - "source": "GitHub", - "score": -0.0936077613058479, - "first_commit": "2018-09-13 01:54:32", - "latest_commit": "2023-01-16 10:47:31", + "description": "Introduction This is a LLM-filtered set of the first 1M rows from ntt's JParaCrawl v3 large English-Japanese parallel corpus.", + "url": "https://huggingface.co./datasets/Verah/JParaCrawl-Filtered-English-Japanese-Parallel-Corpus", + "project_name": "JParaCrawl-Filtered-English-Japanese-Parallel-Corpus", + "downloads": 11, + "source": "Hugging Face", + "score": -0.09565785620592122, + "first_commit": "2024-03-01 06:17:09", + "latest_commit": "2024-03-07 21:20:21", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null, + "model_architectures": null }, { "description": "databricks/dolly-v2-12b の学習データに使用されたdatabricks-dolly-15k.jsonl を日本語に翻訳したデータセットになります。", @@ -13760,7 +14166,7 @@ "project_name": "databricks-dolly-15k-ja", "stargazers_count": 81, "source": "GitHub", - "score": -0.09655412580540652, + "score": -0.09592338449612926, "first_commit": "2023-04-14 23:43:27", "latest_commit": "2023-07-26 00:08:32", "languages": [], @@ -13772,7 +14178,7 @@ "project_name": "cihai", "stargazers_count": 80, "source": "GitHub", - "score": -0.09950049030496515, + "score": -0.09888143697577269, "first_commit": "2013-12-03 09:42:52", "latest_commit": "2024-08-10 13:13:53", "languages": [ @@ -13780,13 +14186,29 @@ ], "model_or_dataset": null }, + { + "description": "日本語文字変換ライブラリ (javascript)", + "url": "https://github.com/kazuhikoarase/jaconv", + "project_name": "jaconv", + "stargazers_count": 80, + "source": "GitHub", + "score": -0.09888143697577269, + "first_commit": "2016-10-22 05:22:02", + "latest_commit": "2024-01-20 05:35:13", + "languages": [ + "TypeScript", + "JavaScript", + "Java" + ], + "model_or_dataset": null + }, { "description": "Neologism dictionary based on the language resources on the Web for mecab-unidic", "url": "https://github.com/neologd/mecab-unidic-neologd", "project_name": "mecab-unidic-neologd", "stargazers_count": 80, "source": "GitHub", - "score": -0.09950049030496515, + "score": -0.09888143697577269, "first_commit": "2015-03-19 10:52:02", "latest_commit": "2020-09-14 19:58:39", "languages": [], @@ -13798,7 +14220,7 @@ "project_name": "nihongo", "stargazers_count": 78, "source": "GitHub", - "score": -0.1053932193040824, + "score": -0.10479754193505952, "first_commit": "2013-09-03 00:22:50", "latest_commit": "2024-02-07 18:36:24", "languages": [ @@ -13807,13 +14229,27 @@ ], "model_or_dataset": null }, + { + "description": "Japanese SKK input method library", + "url": "https://github.com/ueno/libskk", + "project_name": "libskk", + "stargazers_count": 78, + "source": "GitHub", + "score": -0.10479754193505952, + "first_commit": "2011-10-05 18:18:07", + "latest_commit": "2024-09-02 12:09:00", + "languages": [ + "C" + ], + "model_or_dataset": "dataset" + }, { "description": "A large parallel corpus of English and Japanese", "url": "https://github.com/rpryzant/JESC", "project_name": "JESC", "stargazers_count": 78, "source": "GitHub", - "score": -0.1053932193040824, + "score": -0.10479754193505952, "first_commit": "2017-10-25 07:41:35", "latest_commit": "2017-10-31 21:08:56", "languages": [ @@ -13827,7 +14263,7 @@ "project_name": "goya", "stargazers_count": 77, "source": "GitHub", - "score": -0.10833958380364102, + "score": -0.10775559441470295, "first_commit": "2021-09-08 19:51:11", "latest_commit": "2021-12-30 19:44:15", "languages": [ @@ -13843,7 +14279,7 @@ "project_name": "SudachiTra", "stargazers_count": 77, "source": "GitHub", - "score": -0.10833958380364102, + "score": -0.10775559441470295, "first_commit": "2021-06-22 19:48:29", "latest_commit": "2023-12-15 08:13:45", "languages": [ @@ -13857,7 +14293,7 @@ "project_name": "emoji-ja", "stargazers_count": 77, "source": "GitHub", - "score": -0.10833958380364102, + "score": -0.10775559441470295, "first_commit": "2018-08-24 08:25:08", "latest_commit": "2023-05-09 14:57:44", "languages": [ @@ -13871,7 +14307,7 @@ "project_name": "KWDLC", "stargazers_count": 75, "source": "GitHub", - "score": -0.11423231280275825, + "score": -0.1136716993739898, "first_commit": "2015-05-20 19:13:17", "latest_commit": "2023-12-18 14:13:14", "languages": [ @@ -13885,7 +14321,7 @@ "project_name": "llm-japanese-dataset", "stargazers_count": 75, "source": "GitHub", - "score": -0.11423231280275825, + "score": -0.1136716993739898, "first_commit": "2023-04-19 14:34:02", "latest_commit": "2024-01-23 09:37:30", "languages": [ @@ -13899,7 +14335,7 @@ "project_name": "jrte-corpus", "stargazers_count": 75, "source": "GitHub", - "score": -0.11423231280275825, + "score": -0.1136716993739898, "first_commit": "2020-10-15 14:59:37", "latest_commit": "2023-06-23 14:06:26", "languages": [ @@ -13913,7 +14349,7 @@ "project_name": "JMTrans", "stargazers_count": 74, "source": "GitHub", - "score": -0.11717867730231687, + "score": -0.11662975185363322, "first_commit": "2020-08-19 23:30:03", "latest_commit": "2021-01-16 21:44:37", "languages": [ @@ -13928,7 +14364,7 @@ "project_name": "Dialog", "stargazers_count": 73, "source": "GitHub", - "score": -0.1201250418018755, + "score": -0.11958780433327663, "first_commit": "2019-09-12 13:05:51", "latest_commit": "2020-10-01 17:25:08", "languages": [ @@ -13943,7 +14379,7 @@ "project_name": "jmdict-yomitan", "stargazers_count": 73, "source": "GitHub", - "score": -0.1201250418018755, + "score": -0.11958780433327663, "first_commit": "2023-09-24 20:01:40", "latest_commit": "2024-07-30 10:44:53", "languages": [ @@ -13957,7 +14393,7 @@ "project_name": "nlp100v2020", "stargazers_count": 73, "source": "GitHub", - "score": -0.1201250418018755, + "score": -0.11958780433327663, "first_commit": "2020-04-07 20:23:08", "latest_commit": "2023-11-05 23:36:07", "languages": [ @@ -13971,7 +14407,7 @@ "project_name": "Laboro-BERT-Japanese", "stargazers_count": 72, "source": "GitHub", - "score": -0.12307140630143412, + "score": -0.12254585681292006, "first_commit": "2020-03-31 12:05:07", "latest_commit": "2022-05-12 17:06:31", "languages": [ @@ -13985,7 +14421,7 @@ "project_name": "resembla", "stargazers_count": 71, "source": "GitHub", - "score": -0.12601777080099275, + "score": -0.1255039092925635, "first_commit": "2017-07-24 17:07:39", "latest_commit": "2019-01-27 00:08:52", "languages": [ @@ -14001,7 +14437,7 @@ "project_name": "unidic-py", "stargazers_count": 71, "source": "GitHub", - "score": -0.12601777080099275, + "score": -0.1255039092925635, "first_commit": "2020-01-05 16:19:49", "latest_commit": "2023-06-16 20:50:30", "languages": [ @@ -14015,7 +14451,7 @@ "project_name": "wikipedia-utils", "stargazers_count": 71, "source": "GitHub", - "score": -0.12601777080099275, + "score": -0.1255039092925635, "first_commit": "2022-01-09 16:42:14", "latest_commit": "2024-04-10 08:41:09", "languages": [ @@ -14029,7 +14465,7 @@ "project_name": "TinySegmenterMaker", "stargazers_count": 69, "source": "GitHub", - "score": -0.13191049980011, + "score": -0.13142001425185032, "first_commit": "2012-11-15 22:24:06", "latest_commit": "2022-09-30 13:41:19", "languages": [ @@ -14051,7 +14487,7 @@ "project_name": "aozorabunko_text", "stargazers_count": 69, "source": "GitHub", - "score": -0.13191049980011, + "score": -0.13142001425185032, "first_commit": "2019-02-11 03:06:07", "latest_commit": "2023-03-22 01:21:29", "languages": [ @@ -14065,7 +14501,7 @@ "project_name": "5ch-analysis", "stargazers_count": 67, "source": "GitHub", - "score": -0.1378032287992272, + "score": -0.13733611921113717, "first_commit": "2018-11-11 16:58:44", "latest_commit": "2018-11-11 23:37:16", "languages": [ @@ -14079,7 +14515,7 @@ "project_name": "wana_kana_rust", "stargazers_count": 67, "source": "GitHub", - "score": -0.1378032287992272, + "score": -0.13733611921113717, "first_commit": "2018-02-02 18:39:03", "latest_commit": "2023-01-19 21:51:26", "languages": [ @@ -14094,7 +14530,7 @@ "project_name": "japanese-clip", "stargazers_count": 66, "source": "GitHub", - "score": -0.14074959329878584, + "score": -0.1402941716907806, "first_commit": "2022-04-25 17:19:28", "latest_commit": "2022-07-19 18:20:52", "languages": [ @@ -14108,7 +14544,7 @@ "project_name": "dic-nico-intersection-pixiv", "stargazers_count": 66, "source": "GitHub", - "score": -0.14074959329878584, + "score": -0.1402941716907806, "first_commit": "2017-03-09 08:44:44", "latest_commit": "2024-04-19 13:29:16", "languages": [ @@ -14122,7 +14558,7 @@ "project_name": "mouse_over_dictionary", "stargazers_count": 66, "source": "GitHub", - "score": -0.14074959329878584, + "score": -0.1402941716907806, "first_commit": "2020-01-09 20:26:17", "latest_commit": "2020-01-24 08:57:39", "languages": [ @@ -14136,7 +14572,7 @@ "project_name": "BSD", "stargazers_count": 66, "source": "GitHub", - "score": -0.14074959329878584, + "score": -0.1402941716907806, "first_commit": "2020-07-25 01:04:11", "latest_commit": "2021-11-10 21:33:34", "languages": [], @@ -14148,7 +14584,7 @@ "project_name": "neural_ime", "stargazers_count": 65, "source": "GitHub", - "score": -0.14369595779834446, + "score": -0.14325222417042402, "first_commit": "2016-10-31 15:23:42", "latest_commit": "2016-12-27 21:10:30", "languages": [ @@ -14162,7 +14598,7 @@ "project_name": "cskk", "stargazers_count": 65, "source": "GitHub", - "score": -0.14369595779834446, + "score": -0.14325222417042402, "first_commit": "2018-06-17 15:36:26", "latest_commit": "2024-03-10 13:45:41", "languages": [ @@ -14177,7 +14613,7 @@ "project_name": "ibus-hiragana", "stargazers_count": 64, "source": "GitHub", - "score": -0.14664232229790308, + "score": -0.14621027665006744, "first_commit": "2017-04-28 03:50:59", "latest_commit": "2024-08-15 04:09:53", "languages": [ @@ -14191,7 +14627,7 @@ "project_name": "pdmocrdataset-part1", "stargazers_count": 64, "source": "GitHub", - "score": -0.14664232229790308, + "score": -0.14621027665006744, "first_commit": "2022-04-20 11:55:33", "latest_commit": "2024-06-26 16:10:44", "languages": [], @@ -14203,7 +14639,7 @@ "project_name": "ja_sentence_segmenter", "stargazers_count": 63, "source": "GitHub", - "score": -0.1495886867974617, + "score": -0.14916832912971087, "first_commit": "2019-12-15 13:50:07", "latest_commit": "2023-04-03 13:09:20", "languages": [ @@ -14217,7 +14653,7 @@ "project_name": "daaja", "stargazers_count": 63, "source": "GitHub", - "score": -0.1495886867974617, + "score": -0.14916832912971087, "first_commit": "2022-02-12 20:22:34", "latest_commit": "2023-02-16 19:39:30", "languages": [ @@ -14232,7 +14668,7 @@ "project_name": "nlp-recipes-ja", "stargazers_count": 63, "source": "GitHub", - "score": -0.1495886867974617, + "score": -0.14916832912971087, "first_commit": "2020-08-01 09:09:07", "latest_commit": "2021-04-11 08:07:45", "languages": [ @@ -14247,7 +14683,7 @@ "project_name": "spacy_tutorial", "stargazers_count": 63, "source": "GitHub", - "score": -0.1495886867974617, + "score": -0.14916832912971087, "first_commit": "2019-12-29 04:28:30", "latest_commit": "2020-01-24 20:02:24", "languages": [ @@ -14261,7 +14697,7 @@ "project_name": "gptuber-by-langchain", "stargazers_count": 62, "source": "GitHub", - "score": -0.15253505129702033, + "score": -0.1521263816093543, "first_commit": "2023-01-07 00:37:20", "latest_commit": "2023-01-07 00:37:20", "languages": [ @@ -14275,7 +14711,7 @@ "project_name": "gptuber-by-langchain", "stargazers_count": 62, "source": "GitHub", - "score": -0.15253505129702033, + "score": -0.1521263816093543, "first_commit": "2023-01-07 00:37:20", "latest_commit": "2023-01-07 00:37:20", "languages": [ @@ -14289,7 +14725,7 @@ "project_name": "fugumt", "stargazers_count": 61, "source": "GitHub", - "score": -0.15548141579657895, + "score": -0.15508443408899772, "first_commit": "2021-01-02 20:35:49", "latest_commit": "2021-02-28 11:46:52", "languages": [ @@ -14303,7 +14739,7 @@ "project_name": "jakaroma", "stargazers_count": 61, "source": "GitHub", - "score": -0.15548141579657895, + "score": -0.15508443408899772, "first_commit": "2016-04-11 18:21:38", "latest_commit": "2021-03-30 23:21:16", "languages": [ @@ -14317,7 +14753,7 @@ "project_name": "jageocoder", "stargazers_count": 60, "source": "GitHub", - "score": -0.15842778029613758, + "score": -0.15804248656864114, "first_commit": "2021-02-20 17:31:56", "latest_commit": "2024-07-03 06:01:58", "languages": [ @@ -14331,7 +14767,7 @@ "project_name": "llm-leaderboard", "stargazers_count": 60, "source": "GitHub", - "score": -0.15842778029613758, + "score": -0.15804248656864114, "first_commit": "2023-06-27 15:09:25", "latest_commit": "2024-08-07 00:38:16", "languages": [ @@ -14345,7 +14781,7 @@ "project_name": "chatgpt-slackbot", "stargazers_count": 60, "source": "GitHub", - "score": -0.15842778029613758, + "score": -0.15804248656864114, "first_commit": "2022-12-06 22:50:09", "latest_commit": "2024-07-22 18:50:41", "languages": [ @@ -14359,7 +14795,7 @@ "project_name": "IOB2Corpus", "stargazers_count": 60, "source": "GitHub", - "score": -0.15842778029613758, + "score": -0.15804248656864114, "first_commit": "2016-01-29 09:21:25", "latest_commit": "2020-02-25 09:34:11", "languages": [], @@ -14371,7 +14807,7 @@ "project_name": "japanese", "stargazers_count": 60, "source": "GitHub", - "score": -0.15842778029613758, + "score": -0.15804248656864114, "first_commit": "2018-09-13 21:10:10", "latest_commit": "2018-09-13 22:02:23", "languages": [], @@ -14383,7 +14819,7 @@ "project_name": "japanese-numerals-to-number", "stargazers_count": 58, "source": "GitHub", - "score": -0.16432050929525482, + "score": -0.163958591527928, "first_commit": "2017-02-25 22:53:18", "latest_commit": "2023-02-17 01:34:12", "languages": [ @@ -14397,7 +14833,7 @@ "project_name": "simple-simcse-ja", "stargazers_count": 57, "source": "GitHub", - "score": -0.16726687379481345, + "score": -0.16691664400757142, "first_commit": "2022-11-11 19:05:53", "latest_commit": "2023-10-31 14:18:17", "languages": [ @@ -14411,7 +14847,7 @@ "project_name": "mecab-rs", "stargazers_count": 55, "source": "GitHub", - "score": -0.1731596027939307, + "score": -0.17283274896685824, "first_commit": "2015-04-19 09:30:14", "latest_commit": "2023-09-03 22:03:49", "languages": [ @@ -14425,7 +14861,7 @@ "project_name": "YouyakuMan", "stargazers_count": 53, "source": "GitHub", - "score": -0.17905233179304791, + "score": -0.1787488539261451, "first_commit": "2019-10-29 17:43:01", "latest_commit": "2020-09-02 13:37:05", "languages": [ @@ -14439,7 +14875,7 @@ "project_name": "dvorakjp-romantable", "stargazers_count": 53, "source": "GitHub", - "score": -0.17905233179304791, + "score": -0.1787488539261451, "first_commit": "2015-10-11 16:49:41", "latest_commit": "2024-06-24 12:24:34", "languages": [ @@ -14453,7 +14889,7 @@ "project_name": "mozcdict-ext", "stargazers_count": 52, "source": "GitHub", - "score": -0.18199869629260654, + "score": -0.18170690640578852, "first_commit": "2023-01-12 18:13:26", "latest_commit": "2024-07-13 17:37:43", "languages": [ @@ -14467,7 +14903,7 @@ "project_name": "japanese-lm-fin-harness", "stargazers_count": 51, "source": "GitHub", - "score": -0.18494506079216516, + "score": -0.18466495888543194, "first_commit": "2023-09-28 10:48:05", "latest_commit": "2024-08-07 13:01:57", "languages": [ @@ -14481,7 +14917,7 @@ "project_name": "kakasi-java", "stargazers_count": 51, "source": "GitHub", - "score": -0.18494506079216516, + "score": -0.18466495888543194, "first_commit": "2012-01-18 17:33:15", "latest_commit": "2016-04-13 15:56:34", "languages": [ @@ -14495,7 +14931,7 @@ "project_name": "chikkarpy", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2021-05-24 17:10:56", "latest_commit": "2022-02-07 15:11:36", "languages": [ @@ -14509,7 +14945,7 @@ "project_name": "Retrieval-based-Voice-Conversion-WebUI-JP-localization", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2023-03-27 17:59:11", "latest_commit": "2023-04-11 11:08:47", "languages": [ @@ -14524,7 +14960,7 @@ "project_name": "lindera-tantivy", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2020-02-19 10:37:34", "latest_commit": "2023-12-02 21:03:48", "languages": [ @@ -14538,7 +14974,7 @@ "project_name": "kuroshiro-analyzer-kuromoji", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2018-03-09 17:41:37", "latest_commit": "2018-08-05 12:41:55", "languages": [ @@ -14552,7 +14988,7 @@ "project_name": "yomichan-jlpt-vocab", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2021-09-01 18:36:57", "latest_commit": "2023-04-06 22:29:12", "languages": [ @@ -14566,7 +15002,7 @@ "project_name": "rohan4600", "stargazers_count": 50, "source": "GitHub", - "score": -0.18789142529172378, + "score": -0.18762301136507537, "first_commit": "2021-07-31 23:43:43", "latest_commit": "2023-02-01 12:51:29", "languages": [], @@ -14578,7 +15014,7 @@ "project_name": "fasttext-vs-word2vec-on-twitter-data", "stargazers_count": 49, "source": "GitHub", - "score": -0.1908377897912824, + "score": -0.1905810638447188, "first_commit": "2017-03-30 23:10:04", "latest_commit": "2017-08-23 10:53:09", "languages": [ @@ -14592,7 +15028,7 @@ "project_name": "embedrank", "stargazers_count": 48, "source": "GitHub", - "score": -0.19378415429084103, + "score": -0.19353911632436221, "first_commit": "2019-02-01 11:40:50", "latest_commit": "2019-03-19 09:05:41", "languages": [ @@ -14606,7 +15042,7 @@ "project_name": "japanese-llama-experiment", "stargazers_count": 48, "source": "GitHub", - "score": -0.19378415429084103, + "score": -0.19353911632436221, "first_commit": "2023-06-28 17:43:53", "latest_commit": "2024-03-10 23:31:45", "languages": [ @@ -14622,7 +15058,7 @@ "project_name": "esupar", "stargazers_count": 47, "source": "GitHub", - "score": -0.19673051879039966, + "score": -0.19649716880400564, "first_commit": "2021-09-18 07:28:30", "latest_commit": "2024-08-15 20:37:47", "languages": [ @@ -14637,7 +15073,7 @@ "project_name": "japanese-llm-ranking", "stargazers_count": 47, "source": "GitHub", - "score": -0.19673051879039966, + "score": -0.19649716880400564, "first_commit": "2023-06-28 18:31:52", "latest_commit": "2024-03-04 18:17:06", "languages": [ @@ -14652,7 +15088,7 @@ "project_name": "tinysegmenter", "stargazers_count": 47, "source": "GitHub", - "score": -0.19673051879039966, + "score": -0.19649716880400564, "first_commit": "2014-07-04 17:23:23", "latest_commit": "2015-11-03 21:49:19", "languages": [ @@ -14666,7 +15102,7 @@ "project_name": "jslingua", "stargazers_count": 47, "source": "GitHub", - "score": -0.19673051879039966, + "score": -0.19649716880400564, "first_commit": "2016-03-22 10:52:37", "latest_commit": "2023-10-19 22:01:23", "languages": [ @@ -14681,7 +15117,7 @@ "project_name": "LLaVA-JP", "stargazers_count": 46, "source": "GitHub", - "score": -0.19967688328995828, + "score": -0.19945522128364906, "first_commit": "2023-12-01 12:26:17", "latest_commit": "2024-06-05 23:42:35", "languages": [ @@ -14695,7 +15131,7 @@ "project_name": "language-pretraining", "stargazers_count": 46, "source": "GitHub", - "score": -0.19967688328995828, + "score": -0.19945522128364906, "first_commit": "2021-07-07 12:07:22", "latest_commit": "2023-05-19 23:15:30", "languages": [ @@ -14709,7 +15145,7 @@ "project_name": "azure-search-openai-demo", "stargazers_count": 46, "source": "GitHub", - "score": -0.19967688328995828, + "score": -0.19945522128364906, "first_commit": "2023-02-08 13:00:55", "latest_commit": "2023-12-07 18:16:07", "languages": [ @@ -14725,7 +15161,7 @@ "project_name": "jawiki-kana-kanji-dict", "stargazers_count": 46, "source": "GitHub", - "score": -0.19967688328995828, + "score": -0.19945522128364906, "first_commit": "2020-08-23 02:36:22", "latest_commit": "2024-08-09 14:54:51", "languages": [ @@ -14739,7 +15175,7 @@ "project_name": "google-ime-user-dictionary-ja-en", "stargazers_count": 45, "source": "GitHub", - "score": -0.2026232477895169, + "score": -0.2024132737632925, "first_commit": "2009-12-11 10:24:53", "latest_commit": "2016-12-23 19:44:09", "languages": [], @@ -14751,7 +15187,7 @@ "project_name": "LINE-DistilBERT-Japanese", "stargazers_count": 44, "source": "GitHub", - "score": -0.20556961228907553, + "score": -0.20537132624293591, "first_commit": "2023-03-09 18:50:06", "latest_commit": "2023-03-22 15:09:22", "languages": [], @@ -14763,7 +15199,7 @@ "project_name": "JSICK", "stargazers_count": 44, "source": "GitHub", - "score": -0.20556961228907553, + "score": -0.20537132624293591, "first_commit": "2021-05-24 18:12:15", "latest_commit": "2023-05-31 17:48:45", "languages": [], @@ -14775,7 +15211,7 @@ "project_name": "Convert-Numbers-to-Japanese", "stargazers_count": 43, "source": "GitHub", - "score": -0.20851597678863415, + "score": -0.20832937872257934, "first_commit": "2017-03-24 12:30:39", "latest_commit": "2020-11-26 16:37:30", "languages": [ @@ -14789,7 +15225,7 @@ "project_name": "japanese-daily-dialogue", "stargazers_count": 43, "source": "GitHub", - "score": -0.20851597678863415, + "score": -0.20832937872257934, "first_commit": "2023-03-15 16:53:41", "latest_commit": "2023-03-17 18:53:28", "languages": [], @@ -14801,7 +15237,7 @@ "project_name": "namaco", "stargazers_count": 41, "source": "GitHub", - "score": -0.21440870578775137, + "score": -0.21424548368186616, "first_commit": "2017-10-11 09:53:23", "latest_commit": "2018-02-09 06:27:36", "languages": [ @@ -14816,7 +15252,7 @@ "project_name": "llm-jp-sft", "stargazers_count": 41, "source": "GitHub", - "score": -0.21440870578775137, + "score": -0.21424548368186616, "first_commit": "2023-09-22 14:30:09", "latest_commit": "2024-06-13 13:17:38", "languages": [ @@ -14830,7 +15266,7 @@ "project_name": "node-romaji-name", "stargazers_count": 41, "source": "GitHub", - "score": -0.21440870578775137, + "score": -0.21424548368186616, "first_commit": "2013-08-24 10:50:11", "latest_commit": "2023-12-27 13:27:03", "languages": [ @@ -14844,7 +15280,7 @@ "project_name": "japanese-toolkit", "stargazers_count": 41, "source": "GitHub", - "score": -0.21440870578775137, + "score": -0.21424548368186616, "first_commit": "2020-07-09 05:58:18", "latest_commit": "2023-01-08 23:53:43", "languages": [ @@ -14859,7 +15295,7 @@ "project_name": "make-meidai-dialogue", "stargazers_count": 40, "source": "GitHub", - "score": -0.21735507028731, + "score": -0.21720353616150959, "first_commit": "2016-11-10 13:45:38", "latest_commit": "2017-09-29 07:53:24", "languages": [ @@ -14873,7 +15309,7 @@ "project_name": "vits-japros-webui", "stargazers_count": 40, "source": "GitHub", - "score": -0.21735507028731, + "score": -0.21720353616150959, "first_commit": "2023-09-30 17:09:14", "latest_commit": "2024-01-05 22:42:43", "languages": [ @@ -14889,7 +15325,7 @@ "project_name": "bert-japanese-aozora", "stargazers_count": 40, "source": "GitHub", - "score": -0.21735507028731, + "score": -0.21720353616150959, "first_commit": "2020-03-08 10:20:43", "latest_commit": "2020-08-08 12:06:20", "languages": [], @@ -14901,7 +15337,7 @@ "project_name": "koniwa", "stargazers_count": 40, "source": "GitHub", - "score": -0.21735507028731, + "score": -0.21720353616150959, "first_commit": "2021-10-29 21:19:06", "latest_commit": "2024-08-02 10:48:21", "languages": [ @@ -14916,7 +15352,7 @@ "project_name": "DocumentClassificationUsingBERT-Japanese", "stargazers_count": 40, "source": "GitHub", - "score": -0.21735507028731, + "score": -0.21720353616150959, "first_commit": "2019-12-16 00:55:48", "latest_commit": "2021-01-29 10:59:18", "languages": [ @@ -14931,7 +15367,7 @@ "project_name": "Japanese-BPEEncoder", "stargazers_count": 39, "source": "GitHub", - "score": -0.22030143478686862, + "score": -0.220161588641153, "first_commit": "2020-10-03 12:21:03", "latest_commit": "2021-09-12 09:58:42", "languages": [ @@ -14945,7 +15381,7 @@ "project_name": "kanjigrid", "stargazers_count": 39, "source": "GitHub", - "score": -0.22030143478686862, + "score": -0.220161588641153, "first_commit": "2018-10-26 15:47:29", "latest_commit": "2018-11-19 14:14:00", "languages": [ @@ -14959,7 +15395,7 @@ "project_name": "t5-japanese", "stargazers_count": 39, "source": "GitHub", - "score": -0.22030143478686862, + "score": -0.220161588641153, "first_commit": "2021-08-25 09:55:16", "latest_commit": "2021-09-07 14:11:02", "languages": [ @@ -14973,7 +15409,7 @@ "project_name": "emoticon", "stargazers_count": 39, "source": "GitHub", - "score": -0.22030143478686862, + "score": -0.220161588641153, "first_commit": "2013-12-29 17:16:16", "latest_commit": "2020-05-07 13:36:42", "languages": [ @@ -14987,7 +15423,7 @@ "project_name": "llm-jp-corpus", "stargazers_count": 39, "source": "GitHub", - "score": -0.22030143478686862, + "score": -0.220161588641153, "first_commit": "2023-06-14 13:21:33", "latest_commit": "2023-10-23 10:04:18", "languages": [ @@ -15001,7 +15437,7 @@ "project_name": "xvector_jtubespeech", "stargazers_count": 38, "source": "GitHub", - "score": -0.22324779928642724, + "score": -0.22311964112079644, "first_commit": "2022-03-08 11:00:20", "latest_commit": "2023-11-05 14:48:26", "languages": [ @@ -15015,7 +15451,7 @@ "project_name": "IgakuQA", "stargazers_count": 38, "source": "GitHub", - "score": -0.22324779928642724, + "score": -0.22311964112079644, "first_commit": "2023-03-31 10:29:20", "latest_commit": "2023-03-31 10:29:20", "languages": [ @@ -15029,7 +15465,7 @@ "project_name": "Japanese-BPEEncoder_V2", "stargazers_count": 37, "source": "GitHub", - "score": -0.22619416378598586, + "score": -0.22607769360043986, "first_commit": "2021-08-23 19:09:11", "latest_commit": "2023-01-15 12:43:44", "languages": [ @@ -15043,7 +15479,7 @@ "project_name": "Mykytea-python", "stargazers_count": 36, "source": "GitHub", - "score": -0.2291405282855445, + "score": -0.22903574608008329, "first_commit": "2011-07-15 11:45:10", "latest_commit": "2024-01-15 17:30:17", "languages": [ @@ -15058,7 +15494,7 @@ "project_name": "handwritten-japanese-ocr", "stargazers_count": 36, "source": "GitHub", - "score": -0.2291405282855445, + "score": -0.22903574608008329, "first_commit": "2020-05-01 17:27:13", "latest_commit": "2022-04-05 11:16:31", "languages": [ @@ -15073,7 +15509,7 @@ "project_name": "japanese-llm-roleplay-benchmark", "stargazers_count": 36, "source": "GitHub", - "score": -0.2291405282855445, + "score": -0.22903574608008329, "first_commit": "2023-09-15 23:52:27", "latest_commit": "2023-11-03 22:01:24", "languages": [ @@ -15088,7 +15524,7 @@ "project_name": "python-vibrato", "stargazers_count": 35, "source": "GitHub", - "score": -0.2320868927851031, + "score": -0.2319937985597267, "first_commit": "2022-12-08 12:29:38", "latest_commit": "2023-09-05 22:16:59", "languages": [ @@ -15103,7 +15539,7 @@ "project_name": "mozcpy", "stargazers_count": 35, "source": "GitHub", - "score": -0.2320868927851031, + "score": -0.2319937985597267, "first_commit": "2022-08-21 02:20:37", "latest_commit": "2023-12-12 00:56:24", "languages": [ @@ -15117,7 +15553,7 @@ "project_name": "ja-tokenizer-docker-py", "stargazers_count": 35, "source": "GitHub", - "score": -0.2320868927851031, + "score": -0.2319937985597267, "first_commit": "2022-05-08 13:45:30", "latest_commit": "2022-05-10 16:55:48", "languages": [ @@ -15132,7 +15568,7 @@ "project_name": "JIWC-Dictionary", "stargazers_count": 35, "source": "GitHub", - "score": -0.2320868927851031, + "score": -0.2319937985597267, "first_commit": "2020-07-31 16:28:26", "latest_commit": "2021-01-27 17:39:40", "languages": [], @@ -15144,7 +15580,7 @@ "project_name": "wikihow_japanese", "stargazers_count": 35, "source": "GitHub", - "score": -0.2320868927851031, + "score": -0.2319937985597267, "first_commit": "2020-06-29 03:11:23", "latest_commit": "2020-12-18 03:54:55", "languages": [ @@ -15158,7 +15594,7 @@ "project_name": "react-native-japanese-tokenizer", "stargazers_count": 34, "source": "GitHub", - "score": -0.23503325728466173, + "score": -0.23495185103937014, "first_commit": "2018-03-14 15:07:47", "latest_commit": "2023-06-19 09:34:01", "languages": [ @@ -15173,7 +15609,7 @@ "project_name": "pytorch_bert_japanese", "stargazers_count": 34, "source": "GitHub", - "score": -0.23503325728466173, + "score": -0.23495185103937014, "first_commit": "2019-06-05 21:42:27", "latest_commit": "2019-06-07 14:27:41", "languages": [ @@ -15187,7 +15623,7 @@ "project_name": "nayose-wikipedia-ja", "stargazers_count": 34, "source": "GitHub", - "score": -0.23503325728466173, + "score": -0.23495185103937014, "first_commit": "2020-03-09 09:16:39", "latest_commit": "2020-03-10 11:04:36", "languages": [ @@ -15201,7 +15637,7 @@ "project_name": "python-npylm", "stargazers_count": 33, "source": "GitHub", - "score": -0.23797962178422036, + "score": -0.23790990351901356, "first_commit": "2016-12-18 21:03:18", "latest_commit": "2019-01-30 16:35:14", "languages": [ @@ -15216,7 +15652,7 @@ "project_name": "EN-JP-ML-Lexicon", "stargazers_count": 33, "source": "GitHub", - "score": -0.23797962178422036, + "score": -0.23790990351901356, "first_commit": "2019-05-27 16:29:35", "latest_commit": "2021-03-13 09:19:56", "languages": [], @@ -15228,7 +15664,7 @@ "project_name": "albert-japanese", "stargazers_count": 33, "source": "GitHub", - "score": -0.23797962178422036, + "score": -0.23790990351901356, "first_commit": "2018-12-27 20:05:33", "latest_commit": "2021-10-28 19:57:23", "languages": [ @@ -15243,7 +15679,7 @@ "project_name": "pva-aoai-integration-solution", "stargazers_count": 33, "source": "GitHub", - "score": -0.23797962178422036, + "score": -0.23790990351901356, "first_commit": "2023-06-20 13:50:54", "latest_commit": "2023-08-14 18:03:34", "languages": [], @@ -15255,7 +15691,7 @@ "project_name": "unidic-lite", "stargazers_count": 33, "source": "GitHub", - "score": -0.23797962178422036, + "score": -0.23790990351901356, "first_commit": "2020-04-07 16:24:18", "latest_commit": "2020-09-01 22:50:07", "languages": [ @@ -15269,7 +15705,7 @@ "project_name": "UniDic2UD", "stargazers_count": 32, "source": "GitHub", - "score": -0.24092598628377898, + "score": -0.24086795599865699, "first_commit": "2019-08-27 00:45:01", "latest_commit": "2024-01-31 23:53:51", "languages": [ @@ -15284,7 +15720,7 @@ "project_name": "ja-vicuna-qa-benchmark", "stargazers_count": 32, "source": "GitHub", - "score": -0.24092598628377898, + "score": -0.24086795599865699, "first_commit": "2023-08-11 15:38:05", "latest_commit": "2024-06-11 16:24:06", "languages": [ @@ -15298,7 +15734,7 @@ "project_name": "text2text-japanese", "stargazers_count": 32, "source": "GitHub", - "score": -0.24092598628377898, + "score": -0.24086795599865699, "first_commit": "2021-02-11 12:28:53", "latest_commit": "2021-07-22 14:26:45", "languages": [ @@ -15312,7 +15748,7 @@ "project_name": "simple-jppdb", "stargazers_count": 32, "source": "GitHub", - "score": -0.24092598628377898, + "score": -0.24086795599865699, "first_commit": "2017-03-09 19:29:19", "latest_commit": "2017-03-13 00:01:48", "languages": [ @@ -15326,7 +15762,7 @@ "project_name": "whisper-asr-finetune", "stargazers_count": 31, "source": "GitHub", - "score": -0.2438723507833376, + "score": -0.2438260084783004, "first_commit": "2022-10-27 20:22:00", "latest_commit": "2022-12-04 21:29:47", "languages": [ @@ -15340,7 +15776,7 @@ "project_name": "moji4j", "stargazers_count": 31, "source": "GitHub", - "score": -0.2438723507833376, + "score": -0.2438260084783004, "first_commit": "2016-07-08 09:13:16", "latest_commit": "2022-06-24 09:00:44", "languages": [ @@ -15355,7 +15791,7 @@ "project_name": "jvs_hiho", "stargazers_count": 31, "source": "GitHub", - "score": -0.2438723507833376, + "score": -0.2438260084783004, "first_commit": "2020-02-02 01:24:43", "latest_commit": "2021-02-11 15:29:19", "languages": [], @@ -15367,7 +15803,7 @@ "project_name": "janome-tutorial", "stargazers_count": 31, "source": "GitHub", - "score": -0.2438723507833376, + "score": -0.2438260084783004, "first_commit": "2019-03-03 15:25:08", "latest_commit": "2019-03-03 16:46:00", "languages": [ @@ -15383,7 +15819,7 @@ "project_name": "marine", "stargazers_count": 30, "source": "GitHub", - "score": -0.24681871528289623, + "score": -0.24678406095794383, "first_commit": "2022-09-05 20:23:40", "latest_commit": "2022-09-20 10:26:24", "languages": [ @@ -15397,7 +15833,7 @@ "project_name": "ThreeLineSummaryDataset", "stargazers_count": 30, "source": "GitHub", - "score": -0.24681871528289623, + "score": -0.24678406095794383, "first_commit": "2018-01-25 02:27:22", "latest_commit": "2018-04-04 22:24:47", "languages": [], @@ -15409,7 +15845,7 @@ "project_name": "data_set", "stargazers_count": 30, "source": "GitHub", - "score": -0.24681871528289623, + "score": -0.24678406095794383, "first_commit": "2023-01-15 08:39:44", "latest_commit": "2024-05-14 17:37:05", "languages": [], @@ -15421,7 +15857,7 @@ "project_name": "rhoknp", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "2021-08-03 14:50:30", "latest_commit": "2024-07-16 09:26:45", "languages": [ @@ -15436,7 +15872,7 @@ "project_name": "knp", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "1999-03-12 09:09:08", "latest_commit": "2023-11-01 21:02:34", "languages": [ @@ -15456,7 +15892,7 @@ "project_name": "elasticsearch-analysis-japanese", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "2012-01-11 13:32:35", "latest_commit": "2012-03-06 23:36:46", "languages": [ @@ -15470,7 +15906,7 @@ "project_name": "nijisanji-ime-dic", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "2019-12-30 01:37:16", "latest_commit": "2024-08-14 12:35:13", "languages": [], @@ -15482,7 +15918,7 @@ "project_name": "anthy-unicode", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "2013-06-30 11:09:24", "latest_commit": "2024-05-02 14:12:10", "languages": [ @@ -15496,7 +15932,7 @@ "project_name": "BERT_Japanese_Google_Colaboratory", "stargazers_count": 29, "source": "GitHub", - "score": -0.24976507978245485, + "score": -0.24974211343758726, "first_commit": "2020-05-14 14:53:17", "latest_commit": "2022-01-25 11:58:44", "languages": [ @@ -15510,7 +15946,7 @@ "project_name": "bert", "stargazers_count": 28, "source": "GitHub", - "score": -0.2527114442820135, + "score": -0.25270016591723066, "first_commit": "2022-04-05 17:03:10", "latest_commit": "2022-04-05 17:03:55", "languages": [ @@ -15524,7 +15960,7 @@ "project_name": "UD_Japanese-GSD", "stargazers_count": 28, "source": "GitHub", - "score": -0.2527114442820135, + "score": -0.25270016591723066, "first_commit": "2016-10-12 10:33:42", "latest_commit": "2022-05-29 11:52:21", "languages": [ @@ -15538,7 +15974,7 @@ "project_name": "comet-atomic-ja", "stargazers_count": 28, "source": "GitHub", - "score": -0.2527114442820135, + "score": -0.25270016591723066, "first_commit": "2023-02-04 10:39:44", "latest_commit": "2024-03-08 22:31:37", "languages": [ @@ -15552,7 +15988,7 @@ "project_name": "transformer-copy", "stargazers_count": 27, "source": "GitHub", - "score": -0.25565780878157207, + "score": -0.2556582183968741, "first_commit": "2019-08-24 23:18:00", "latest_commit": "2020-09-25 23:12:54", "languages": [ @@ -15568,7 +16004,7 @@ "project_name": "crawdad", "stargazers_count": 27, "source": "GitHub", - "score": -0.25565780878157207, + "score": -0.2556582183968741, "first_commit": "2022-03-20 23:22:50", "latest_commit": "2023-02-20 22:23:22", "languages": [ @@ -15582,7 +16018,7 @@ "project_name": "jpreprocess", "stargazers_count": 27, "source": "GitHub", - "score": -0.25565780878157207, + "score": -0.2556582183968741, "first_commit": "2022-12-18 17:32:12", "latest_commit": "2024-08-14 09:22:07", "languages": [ @@ -15598,7 +16034,7 @@ "project_name": "Japanese_nlp_scripts", "stargazers_count": 26, "source": "GitHub", - "score": -0.2586041732811307, + "score": -0.2586162708765175, "first_commit": "2015-05-18 17:15:00", "latest_commit": "2019-06-30 18:33:13", "languages": [ @@ -15612,7 +16048,7 @@ "project_name": "yoin", "stargazers_count": 26, "source": "GitHub", - "score": -0.2586041732811307, + "score": -0.2586162708765175, "first_commit": "2017-01-15 23:54:52", "latest_commit": "2017-10-27 17:44:55", "languages": [ @@ -15620,13 +16056,27 @@ ], "model_or_dataset": null }, + { + "description": "Convert romaji into hiragana", + "url": "https://github.com/koozaki/romaji-conv", + "project_name": "romaji-conv", + "stargazers_count": 26, + "source": "GitHub", + "score": -0.2586162708765175, + "first_commit": "2020-07-05 01:29:36", + "latest_commit": "2024-09-01 19:50:38", + "languages": [ + "JavaScript" + ], + "model_or_dataset": null + }, { "description": "THE IDOLM@STER words dictionary for Japanese IME (by imas-db.jp)", "url": "https://github.com/maruamyu/imas-ime-dic", "project_name": "imas-ime-dic", "stargazers_count": 26, "source": "GitHub", - "score": -0.2586041732811307, + "score": -0.2586162708765175, "first_commit": "2018-04-25 02:07:26", "latest_commit": "2024-04-19 01:05:34", "languages": [ @@ -15640,7 +16090,7 @@ "project_name": "security_words", "stargazers_count": 26, "source": "GitHub", - "score": -0.2586041732811307, + "score": -0.2586162708765175, "first_commit": "2020-04-27 12:23:28", "latest_commit": "2023-08-18 10:02:08", "languages": [], @@ -15652,7 +16102,7 @@ "project_name": "yomikata", "stargazers_count": 25, "source": "GitHub", - "score": -0.2615505377806893, + "score": -0.26157432335616093, "first_commit": "2023-02-21 01:43:00", "latest_commit": "2023-10-03 09:10:45", "languages": [ @@ -15667,7 +16117,7 @@ "project_name": "nksnd", "stargazers_count": 25, "source": "GitHub", - "score": -0.2615505377806893, + "score": -0.26157432335616093, "first_commit": "2016-05-24 18:52:03", "latest_commit": "2018-05-17 08:54:24", "languages": [ @@ -15681,7 +16131,7 @@ "project_name": "tweetMapping", "stargazers_count": 25, "source": "GitHub", - "score": -0.2615505377806893, + "score": -0.26157432335616093, "first_commit": "2021-02-17 07:55:27", "latest_commit": "2023-12-08 21:42:44", "languages": [ @@ -15695,7 +16145,7 @@ "project_name": "ipadic-py", "stargazers_count": 25, "source": "GitHub", - "score": -0.2615505377806893, + "score": -0.26157432335616093, "first_commit": "2020-07-16 16:19:26", "latest_commit": "2021-10-31 04:47:19", "languages": [ @@ -15709,7 +16159,7 @@ "project_name": "JMRD", "stargazers_count": 25, "source": "GitHub", - "score": -0.2615505377806893, + "score": -0.26157432335616093, "first_commit": "2022-07-15 09:43:44", "latest_commit": "2022-07-19 10:02:29", "languages": [], @@ -15721,7 +16171,7 @@ "project_name": "vrchatbot", "stargazers_count": 24, "source": "GitHub", - "score": -0.26449690228024797, + "score": -0.26453237583580436, "first_commit": "2022-12-10 22:48:39", "latest_commit": "2022-12-20 17:18:44", "languages": [ @@ -15735,7 +16185,7 @@ "project_name": "llm-jp-tokenizer", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2023-07-13 12:52:22", "latest_commit": "2024-07-05 13:56:54", "languages": [ @@ -15749,7 +16199,7 @@ "project_name": "python_asa", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2018-07-25 04:13:21", "latest_commit": "2020-01-14 17:09:11", "languages": [ @@ -15763,7 +16213,7 @@ "project_name": "hatsuon", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2018-04-06 12:06:12", "latest_commit": "2022-03-14 18:06:39", "languages": [ @@ -15777,7 +16227,7 @@ "project_name": "AISisterAIChan", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2023-03-30 18:40:44", "latest_commit": "2023-05-18 22:14:24", "languages": [], @@ -15789,7 +16239,7 @@ "project_name": "cl-skkserv", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2017-12-23 10:20:54", "latest_commit": "2024-02-11 10:42:23", "languages": [], @@ -15801,7 +16251,7 @@ "project_name": "asdc", "stargazers_count": 23, "source": "GitHub", - "score": -0.26744326677980657, + "score": -0.2674904283154478, "first_commit": "2022-06-16 09:43:01", "latest_commit": "2023-08-09 12:03:48", "languages": [ @@ -15815,7 +16265,7 @@ "project_name": "rakutenma-python", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": "2015-01-02 06:52:27", "latest_commit": "2017-05-22 16:45:10", "languages": [ @@ -15829,7 +16279,7 @@ "project_name": "bertknp", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": "2021-02-10 09:20:17", "latest_commit": "2021-10-02 14:45:12", "languages": [ @@ -15843,7 +16293,7 @@ "project_name": "ILYS-aoba-chatbot", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": "2020-10-26 13:13:33", "latest_commit": "2021-10-01 22:55:52", "languages": [ @@ -15859,7 +16309,7 @@ "project_name": "RoBERTa-japanese", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": "2020-11-28 17:36:50", "latest_commit": "2021-11-13 10:37:23", "languages": [ @@ -15873,7 +16323,7 @@ "project_name": "jisyo", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": null, "latest_commit": null, "languages": [], @@ -15885,7 +16335,7 @@ "project_name": "instruction_ja", "stargazers_count": 22, "source": "GitHub", - "score": -0.27038963127936516, + "score": -0.2704484807950912, "first_commit": "2023-06-22 15:52:12", "latest_commit": "2023-07-13 16:02:15", "languages": [ @@ -15899,7 +16349,7 @@ "project_name": "sengiri", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2019-10-05 03:46:43", "latest_commit": "2022-08-10 20:45:00", "languages": [ @@ -15913,7 +16363,7 @@ "project_name": "rake-ja", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2018-10-11 19:07:50", "latest_commit": "2018-10-11 19:27:37", "languages": [ @@ -15927,7 +16377,7 @@ "project_name": "rime-jaroomaji", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2023-03-21 21:07:42", "latest_commit": "2024-08-15 11:40:00", "languages": [ @@ -15941,7 +16391,7 @@ "project_name": "TwitterCorpus", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2016-03-05 19:20:15", "latest_commit": "2016-03-14 19:55:35", "languages": [ @@ -15955,7 +16405,7 @@ "project_name": "do-not-answer-ja", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2023-09-09 08:21:26", "latest_commit": "2023-12-16 02:34:12", "languages": [ @@ -15970,7 +16420,7 @@ "project_name": "kantan-regex-book", "stargazers_count": 21, "source": "GitHub", - "score": -0.2733359957789238, + "score": -0.27340653327473463, "first_commit": "2024-01-28 10:37:12", "latest_commit": "2024-03-23 21:55:27", "languages": [ @@ -15985,7 +16435,7 @@ "project_name": "python-vaporetto", "stargazers_count": 20, "source": "GitHub", - "score": -0.2762823602784824, + "score": -0.27636458575437806, "first_commit": "2022-06-09 13:37:22", "latest_commit": "2023-09-05 22:15:48", "languages": [ @@ -16000,7 +16450,7 @@ "project_name": "darts-clone-python", "stargazers_count": 20, "source": "GitHub", - "score": -0.2762823602784824, + "score": -0.27636458575437806, "first_commit": "2018-11-17 00:57:52", "latest_commit": "2022-04-05 21:28:21", "languages": [ @@ -16014,7 +16464,7 @@ "project_name": "text-generation", "stargazers_count": 20, "source": "GitHub", - "score": -0.2762823602784824, + "score": -0.27636458575437806, "first_commit": "2022-07-17 11:55:29", "latest_commit": "2022-07-24 13:48:32", "languages": [ @@ -16029,7 +16479,7 @@ "project_name": "keigo_transfer_task", "stargazers_count": 20, "source": "GitHub", - "score": -0.2762823602784824, + "score": -0.27636458575437806, "first_commit": "2022-06-14 23:06:57", "latest_commit": "2022-11-24 13:01:06", "languages": [], @@ -16041,7 +16491,7 @@ "project_name": "JMMLU", "stargazers_count": 20, "source": "GitHub", - "score": -0.2762823602784824, + "score": -0.27636458575437806, "first_commit": "2024-01-09 16:43:45", "latest_commit": "2024-02-27 14:19:14", "languages": [], @@ -16053,7 +16503,7 @@ "project_name": "jntajis-python", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2021-08-20 17:32:20", "latest_commit": "2023-06-16 22:43:41", "languages": [ @@ -16068,7 +16518,7 @@ "project_name": "pygeonlp", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2021-06-26 17:45:22", "latest_commit": "2024-07-24 01:39:27", "languages": [ @@ -16083,7 +16533,7 @@ "project_name": "unicode-jp-rs", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2016-05-21 16:39:49", "latest_commit": "2020-04-11 12:01:21", "languages": [ @@ -16097,7 +16547,7 @@ "project_name": "kyujitai.js", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2014-09-06 17:03:02", "latest_commit": "2020-08-30 23:28:58", "languages": [ @@ -16111,7 +16561,7 @@ "project_name": "go-moji", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2018-01-15 23:57:06", "latest_commit": "2019-04-17 10:29:44", "languages": [ @@ -16125,7 +16575,7 @@ "project_name": "huriganacorpus-ndlbib", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2021-09-01 13:36:53", "latest_commit": "2021-09-21 14:20:03", "languages": [], @@ -16137,7 +16587,7 @@ "project_name": "japanese-toxic-dataset", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2023-01-10 17:19:58", "latest_commit": "2023-01-11 13:55:08", "languages": [], @@ -16149,7 +16599,7 @@ "project_name": "camera", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2023-02-22 10:33:39", "latest_commit": "2024-08-13 09:20:33", "languages": [], @@ -16161,7 +16611,7 @@ "project_name": "Japanese-Fakenews-Dataset", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2021-05-02 15:40:10", "latest_commit": "2021-05-02 15:40:10", "languages": [], @@ -16173,7 +16623,7 @@ "project_name": "aozorasearch", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2016-10-15 17:22:45", "latest_commit": "2020-09-04 14:28:15", "languages": [ @@ -16188,7 +16638,7 @@ "project_name": "jqara", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2024-03-02 09:09:08", "latest_commit": "2024-08-10 11:54:50", "languages": [ @@ -16202,7 +16652,7 @@ "project_name": "jacwir", "stargazers_count": 19, "source": "GitHub", - "score": -0.27922872477804106, + "score": -0.2793226382340215, "first_commit": "2024-03-19 15:31:13", "latest_commit": "2024-07-24 11:09:03", "languages": [ @@ -16216,7 +16666,7 @@ "project_name": "japanese-numbers-python", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2016-07-29 09:20:05", "latest_commit": "2020-04-04 10:36:27", "languages": [ @@ -16230,7 +16680,7 @@ "project_name": "Grongish", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2012-02-10 22:28:59", "latest_commit": "2022-04-21 21:31:37", "languages": [ @@ -16245,7 +16695,7 @@ "project_name": "termextract", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2018-09-26 22:20:04", "latest_commit": "2018-09-26 23:01:36", "languages": [ @@ -16259,7 +16709,7 @@ "project_name": "minimal-search-engine", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2019-06-25 01:58:26", "latest_commit": "2019-07-06 01:26:57", "languages": [ @@ -16273,7 +16723,7 @@ "project_name": "mozcdic-ut-jawiki", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2023-01-15 17:43:20", "latest_commit": "2024-07-29 01:27:13", "languages": [ @@ -16287,7 +16737,7 @@ "project_name": "analyze-desumasu-dearu", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2015-10-10 01:42:33", "latest_commit": "2021-12-10 12:01:50", "languages": [ @@ -16301,7 +16751,7 @@ "project_name": "JaNLI", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2021-11-02 20:26:27", "latest_commit": "2023-05-31 17:50:04", "languages": [ @@ -16315,7 +16765,7 @@ "project_name": "technological-book-corpus-ja", "stargazers_count": 18, "source": "GitHub", - "score": -0.28217508927759966, + "score": -0.2822806907136649, "first_commit": "2017-03-26 13:28:13", "latest_commit": "2023-07-12 03:26:27", "languages": [ @@ -16329,7 +16779,7 @@ "project_name": "rinna_gpt-neox_ggml-lora", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2023-05-24 03:21:40", "latest_commit": "2023-05-25 05:38:04", "languages": [ @@ -16343,7 +16793,7 @@ "project_name": "chainer_nic", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2018-07-02 19:57:17", "latest_commit": "2018-12-14 17:26:49", "languages": [ @@ -16357,7 +16807,7 @@ "project_name": "yomigana-ebook", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2023-04-25 18:21:37", "latest_commit": "2024-02-21 01:18:17", "languages": [ @@ -16372,7 +16822,7 @@ "project_name": "jmteb", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2024-03-15 10:28:52", "latest_commit": "2024-06-20 20:58:44", "languages": [ @@ -16386,7 +16836,7 @@ "project_name": "AnnotatedFKCCorpus", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2021-01-18 19:32:38", "latest_commit": "2023-12-18 14:27:29", "languages": [ @@ -16400,7 +16850,7 @@ "project_name": "ebe-dataset", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2020-10-14 08:15:37", "latest_commit": "2020-12-17 13:39:55", "languages": [], @@ -16412,7 +16862,7 @@ "project_name": "loanwords_gairaigo", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2019-10-21 16:49:45", "latest_commit": "2021-01-08 12:40:02", "languages": [ @@ -16426,19 +16876,31 @@ "project_name": "hurigana-speech-corpus-aozora", "stargazers_count": 17, "source": "GitHub", - "score": -0.2851214537771583, + "score": -0.28523874319330833, "first_commit": "2024-01-16 11:30:12", "latest_commit": "2024-01-31 16:04:24", "languages": [], "model_or_dataset": "dataset" }, + { + "description": "NIILC QA data", + "url": "https://github.com/mynlp/niilc-qa", + "project_name": "niilc-qa", + "stargazers_count": 17, + "source": "GitHub", + "score": -0.28523874319330833, + "first_commit": "2015-10-13 11:42:53", + "latest_commit": "2015-11-20 10:35:39", + "languages": [], + "model_or_dataset": "dataset" + }, { "description": "JASS: Japanese-specific Sequence to Sequence Pre-training for Neural Machine Translation (LREC2020) & Linguistically Driven Multi-Task Pre-Training for Low-Resource Neural Machine Translation (ACM TALLIP)", "url": "https://github.com/Mao-KU/JASS", "project_name": "JASS", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2020-02-19 11:19:42", "latest_commit": "2022-01-25 15:24:53", "languages": [], @@ -16450,7 +16912,7 @@ "project_name": "ginza-transformers", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2021-06-23 17:42:12", "latest_commit": "2022-08-09 18:19:33", "languages": [ @@ -16464,7 +16926,7 @@ "project_name": "MedNER-J", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2020-07-28 18:27:41", "latest_commit": "2022-05-17 20:01:05", "languages": [ @@ -16478,7 +16940,7 @@ "project_name": "AIO2_DPR_baseline", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2021-09-13 16:33:09", "latest_commit": "2022-01-08 23:15:11", "languages": [ @@ -16492,7 +16954,7 @@ "project_name": "unsupervised-pos-tagging", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2017-01-07 09:24:36", "latest_commit": "2017-10-11 23:23:04", "languages": [ @@ -16507,7 +16969,7 @@ "project_name": "furigana4epub", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2021-08-29 20:14:15", "latest_commit": "2021-09-11 14:03:11", "languages": [ @@ -16521,7 +16983,7 @@ "project_name": "Laboro-DistilBERT-Japanese", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2020-11-30 16:10:16", "latest_commit": "2020-12-17 15:26:01", "languages": [ @@ -16536,7 +16998,7 @@ "project_name": "character_chat", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2023-03-19 03:36:06", "latest_commit": "2023-06-03 23:30:23", "languages": [ @@ -16550,7 +17012,7 @@ "project_name": "TEDxJP-10K", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2021-01-14 13:14:28", "latest_commit": "2021-01-14 15:33:14", "languages": [ @@ -16564,7 +17026,7 @@ "project_name": "japanese-family-names", "stargazers_count": 16, "source": "GitHub", - "score": -0.2880678182767169, + "score": -0.28819679567295176, "first_commit": "2017-06-08 18:00:02", "latest_commit": "2017-06-09 01:50:19", "languages": [ @@ -16578,7 +17040,7 @@ "project_name": "SuPar-UniDic", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2021-02-21 09:42:27", "latest_commit": "2024-06-28 15:56:24", "languages": [ @@ -16593,7 +17055,7 @@ "project_name": "furiganapad", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2019-05-14 02:56:41", "latest_commit": "2024-05-23 04:55:05", "languages": [ @@ -16607,7 +17069,7 @@ "project_name": "japanese_llm_simple_webui", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2023-06-04 23:06:13", "latest_commit": "2024-05-12 22:10:56", "languages": [ @@ -16621,7 +17083,7 @@ "project_name": "gpt4-autoeval", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2023-12-18 13:38:36", "latest_commit": "2024-06-06 13:50:35", "languages": [ @@ -16637,7 +17099,7 @@ "project_name": "llm-translator", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2023-12-31 00:47:57", "latest_commit": "2023-12-31 00:53:57", "languages": [ @@ -16651,7 +17113,7 @@ "project_name": "kana2ipa", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2019-12-23 00:07:17", "latest_commit": "2020-10-25 04:42:14", "languages": [ @@ -16665,7 +17127,7 @@ "project_name": "hololive-dictionary", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2021-06-13 09:49:48", "latest_commit": "2024-05-02 16:25:29", "languages": [ @@ -16679,7 +17141,7 @@ "project_name": "pixiv-yomitan", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2024-02-09 19:59:45", "latest_commit": "2024-07-23 22:00:16", "languages": [ @@ -16693,7 +17155,7 @@ "project_name": "Laboro-ParaCorpus", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2021-09-29 18:37:50", "latest_commit": "2021-11-09 10:18:54", "languages": [ @@ -16708,7 +17170,7 @@ "project_name": "ginza-examples", "stargazers_count": 15, "source": "GitHub", - "score": -0.29101418277627555, + "score": -0.2911548481525952, "first_commit": "2020-08-25 02:24:01", "latest_commit": "2021-01-29 00:04:15", "languages": [ @@ -16722,7 +17184,7 @@ "project_name": "japanese-sentence-breaker", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2021-02-28 21:40:27", "latest_commit": "2021-02-28 22:47:20", "languages": [ @@ -16730,13 +17192,27 @@ ], "model_or_dataset": null }, + { + "description": "Examples to finetune encoder-only and encoder-decoder transformers for Japanese language (Hugging Face) Resources", + "url": "https://github.com/tsmatz/huggingface-finetune-japanese", + "project_name": "huggingface-finetune-japanese", + "stargazers_count": 14, + "source": "GitHub", + "score": -0.2941129006322386, + "first_commit": "2022-10-24 18:13:22", + "latest_commit": "2023-10-06 17:11:54", + "languages": [ + "Jupyter Notebook" + ], + "model_or_dataset": null + }, { "description": "Joint source channel model for Japanese Kana Kanji conversion, Chinese pinyin input and CJE mixed input.", "url": "https://github.com/yohokuno/jsc", "project_name": "jsc", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2012-08-23 16:39:41", "latest_commit": "2012-12-19 18:36:09", "languages": [ @@ -16750,7 +17226,7 @@ "project_name": "aMLP-japanese", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2021-11-13 16:17:10", "latest_commit": "2022-05-10 14:16:55", "languages": [ @@ -16764,7 +17240,7 @@ "project_name": "openai-chatfriend", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2023-01-17 15:19:27", "latest_commit": "2023-04-03 10:19:35", "languages": [ @@ -16779,7 +17255,7 @@ "project_name": "mozcdic-ut-place-names", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2023-01-15 18:01:48", "latest_commit": "2024-08-07 03:05:50", "languages": [ @@ -16793,7 +17269,7 @@ "project_name": "oasst1-89k-ja", "stargazers_count": 14, "source": "GitHub", - "score": -0.29396054727583415, + "score": -0.2941129006322386, "first_commit": "2023-05-07 05:27:23", "latest_commit": "2023-11-20 00:23:10", "languages": [ @@ -16807,7 +17283,7 @@ "project_name": "dango", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2021-06-20 13:18:31", "latest_commit": "2021-11-21 19:41:04", "languages": [ @@ -16821,7 +17297,7 @@ "project_name": "PheMT", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2020-10-27 17:05:01", "latest_commit": "2021-02-18 14:05:26", "languages": [ @@ -16835,7 +17311,7 @@ "project_name": "entitypedia", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2017-11-08 13:54:21", "latest_commit": "2018-12-10 12:53:58", "languages": [ @@ -16851,7 +17327,7 @@ "project_name": "gector-ja", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2021-05-11 04:51:42", "latest_commit": "2021-07-16 18:55:03", "languages": [ @@ -16865,7 +17341,7 @@ "project_name": "negima", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2018-06-27 01:49:31", "latest_commit": "2018-08-20 12:31:43", "languages": [ @@ -16879,7 +17355,7 @@ "project_name": "clip-japanese", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2022-02-24 20:42:33", "latest_commit": "2023-03-12 18:43:17", "languages": [ @@ -16893,7 +17369,7 @@ "project_name": "fcitx5-hazkey", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2024-05-21 22:15:24", "latest_commit": "2024-08-01 12:39:04", "languages": [ @@ -16908,7 +17384,7 @@ "project_name": "ndlngramdata", "stargazers_count": 13, "source": "GitHub", - "score": -0.2969069117753928, + "score": -0.29707095311188203, "first_commit": "2022-12-06 10:20:12", "latest_commit": "2023-01-10 10:38:37", "languages": [], @@ -16920,7 +17396,7 @@ "project_name": "alphabet2kana", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2021-03-21 12:29:41", "latest_commit": "2024-08-10 11:10:20", "languages": [ @@ -16934,7 +17410,7 @@ "project_name": "allennlp-shiba-model", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2021-06-26 18:37:25", "latest_commit": "2021-06-27 00:42:45", "languages": [ @@ -16948,7 +17424,7 @@ "project_name": "NTM", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2019-07-24 20:14:25", "latest_commit": "2019-07-24 22:26:58", "languages": [ @@ -16962,7 +17438,7 @@ "project_name": "ClipCap-for-Japanese", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2022-10-05 00:44:54", "latest_commit": "2022-10-05 02:08:58", "languages": [ @@ -16976,7 +17452,7 @@ "project_name": "easynovelassistant", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2024-04-22 15:59:35", "latest_commit": "2024-07-05 17:29:36", "languages": [ @@ -16990,7 +17466,7 @@ "project_name": "deep-question-generation", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2021-03-07 17:01:23", "latest_commit": "2023-03-12 18:47:52", "languages": [ @@ -17004,7 +17480,7 @@ "project_name": "kanaria", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2019-01-07 22:16:32", "latest_commit": "2024-06-28 09:48:18", "languages": [ @@ -17020,7 +17496,7 @@ "project_name": "medbertjp", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2020-10-23 03:07:15", "latest_commit": "2020-11-22 08:37:27", "languages": [ @@ -17035,7 +17511,7 @@ "project_name": "albert-japanese-tinysegmenter", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2023-06-04 15:21:32", "latest_commit": "2023-09-26 23:07:55", "languages": [ @@ -17049,7 +17525,7 @@ "project_name": "easylightchatassistant", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2024-04-06 17:40:16", "latest_commit": "2024-04-24 00:49:12", "languages": [], @@ -17061,7 +17537,7 @@ "project_name": "Ayashiy-Nipongo-Dic", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2021-12-14 11:50:25", "latest_commit": "2024-05-01 15:23:24", "languages": [], @@ -17073,7 +17549,7 @@ "project_name": "Web-Crawled-Corpus-for-Japanese-Chinese-NMT", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2021-11-12 23:19:26", "latest_commit": "2023-10-01 03:13:18", "languages": [], @@ -17085,7 +17561,7 @@ "project_name": "CourseraParallelCorpusMining", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2020-01-09 18:09:50", "latest_commit": "2022-06-14 14:18:29", "languages": [ @@ -17100,7 +17576,7 @@ "project_name": "honkoku-data", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2020-02-23 16:47:55", "latest_commit": "2024-06-14 17:43:19", "languages": [], @@ -17112,7 +17588,7 @@ "project_name": "commonsense-moral-ja", "stargazers_count": 12, "source": "GitHub", - "score": -0.2998532762749514, + "score": -0.30002900559152546, "first_commit": "2022-12-29 13:18:05", "latest_commit": "2023-12-18 11:41:00", "languages": [ @@ -17120,13 +17596,28 @@ ], "model_or_dataset": "dataset" }, + { + "description": "Japanese semantic test suite (FraCaS counterpart and extensions)", + "url": "https://github.com/DaisukeBekki/JSeM", + "project_name": "JSeM", + "stargazers_count": 12, + "source": "GitHub", + "score": -0.30002900559152546, + "first_commit": "2019-09-08 18:33:50", + "latest_commit": "2023-05-05 14:03:37", + "languages": [ + "Haskell", + "JavaScript" + ], + "model_or_dataset": "dataset" + }, { "description": "A powerful text cleaner for Japanese web texts", "url": "https://github.com/ku-nlp/text-cleaning", "project_name": "text-cleaning", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2020-02-10 15:31:27", "latest_commit": "2022-11-21 10:21:56", "languages": [ @@ -17140,7 +17631,7 @@ "project_name": "VISA", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2022-03-25 17:15:42", "latest_commit": "2022-10-17 10:34:12", "languages": [], @@ -17152,7 +17643,7 @@ "project_name": "bert-japanese-ner-finetuning", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2021-11-14 22:57:16", "latest_commit": "2022-06-19 16:24:15", "languages": [ @@ -17167,7 +17658,7 @@ "project_name": "evaluate_japanese_w2v", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2020-02-07 08:02:49", "latest_commit": "2023-11-03 21:09:04", "languages": [ @@ -17181,7 +17672,7 @@ "project_name": "prefix-tuning-gpt", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2022-09-05 16:56:23", "latest_commit": "2023-03-22 14:13:22", "languages": [ @@ -17195,7 +17686,7 @@ "project_name": "asa-python", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2017-09-13 22:02:47", "latest_commit": "2019-02-16 23:52:13", "languages": [ @@ -17209,7 +17700,7 @@ "project_name": "jel", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2021-05-01 15:53:49", "latest_commit": "2021-07-25 13:01:46", "languages": [ @@ -17223,7 +17714,7 @@ "project_name": "python-npycrf", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2017-07-01 18:00:52", "latest_commit": "2018-03-23 00:04:16", "languages": [ @@ -17238,7 +17729,7 @@ "project_name": "aozora_classification", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2017-03-05 12:30:50", "latest_commit": "2017-09-03 12:01:40", "languages": [ @@ -17253,7 +17744,7 @@ "project_name": "ja-law-parser", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2023-11-26 19:38:36", "latest_commit": "2024-01-14 19:59:45", "languages": [ @@ -17267,7 +17758,7 @@ "project_name": "jawiki_word_vector_updater", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2019-02-13 16:18:58", "latest_commit": "2020-05-07 02:25:19", "languages": [], @@ -17279,7 +17770,7 @@ "project_name": "sbert-ja", "stargazers_count": 11, "source": "GitHub", - "score": -0.30279964077451005, + "score": -0.3029870580711689, "first_commit": "2021-07-31 01:11:17", "latest_commit": "2021-08-08 15:47:30", "languages": [ @@ -17287,13 +17778,27 @@ ], "model_or_dataset": "model" }, + { + "description": "This is a Japanese text corpus that consists of Wikipedia articles with various linguistic annotations.", + "url": "https://github.com/ku-nlp/WikipediaAnnotatedCorpus", + "project_name": "WikipediaAnnotatedCorpus", + "stargazers_count": 11, + "source": "GitHub", + "score": -0.3029870580711689, + "first_commit": "2022-09-20 18:33:20", + "latest_commit": "2024-09-06 23:21:41", + "languages": [ + "Python" + ], + "model_or_dataset": "dataset" + }, { "description": "A processor for KyotoCorpus, KWDLC, and AnnotatedFKCCorpus", "url": "https://github.com/ku-nlp/kyoto-reader", "project_name": "kyoto-reader", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2019-11-15 20:46:54", "latest_commit": "2024-06-26 12:45:20", "languages": [ @@ -17307,7 +17812,7 @@ "project_name": "zunda-python", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2019-02-24 01:08:40", "latest_commit": "2019-11-30 18:44:15", "languages": [ @@ -17321,7 +17826,7 @@ "project_name": "wikipedia-passages-jawiki-embeddings-utils", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2023-11-14 11:34:23", "latest_commit": "2024-03-29 08:18:44", "languages": [ @@ -17336,7 +17841,7 @@ "project_name": "jisho", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": null, "latest_commit": null, "languages": [], @@ -17348,7 +17853,7 @@ "project_name": "j-liwc2015", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2021-09-04 09:11:35", "latest_commit": "2022-11-15 15:37:27", "languages": [], @@ -17360,7 +17865,7 @@ "project_name": "huriganacorpus-aozora", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2021-08-31 16:37:20", "latest_commit": "2024-01-17 18:05:54", "languages": [], @@ -17372,7 +17877,7 @@ "project_name": "python-nlp-book", "stargazers_count": 10, "source": "GitHub", - "score": -0.30574600527406864, + "score": -0.3059451105508123, "first_commit": "2023-01-28 18:34:33", "latest_commit": "2023-05-07 23:55:27", "languages": [ @@ -17386,7 +17891,7 @@ "project_name": "Jusho", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2020-11-12 03:39:08", "latest_commit": "2024-06-04 16:03:08", "languages": [ @@ -17400,7 +17905,7 @@ "project_name": "ja-senter-benchmark", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2023-02-10 12:58:42", "latest_commit": "2023-02-27 17:58:52", "languages": [ @@ -17414,7 +17919,7 @@ "project_name": "japanese_text_classification", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2019-07-01 18:33:17", "latest_commit": "2020-01-15 19:12:53", "languages": [ @@ -17429,7 +17934,7 @@ "project_name": "pyknp-eventgraph", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2019-11-21 21:46:46", "latest_commit": "2022-09-26 12:21:44", "languages": [ @@ -17443,7 +17948,7 @@ "project_name": "japanese_summarizer", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2020-08-18 20:44:35", "latest_commit": "2022-08-01 20:28:21", "languages": [ @@ -17457,7 +17962,7 @@ "project_name": "WordCloud-Japanese", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2019-05-11 13:59:53", "latest_commit": "2020-01-02 06:45:49", "languages": [ @@ -17471,7 +17976,7 @@ "project_name": "snark", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2018-12-22 18:37:03", "latest_commit": "2020-03-11 22:01:51", "languages": [ @@ -17485,7 +17990,7 @@ "project_name": "PyKatsuyou", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2022-01-01 01:25:25", "latest_commit": "2023-09-04 17:26:36", "languages": [ @@ -17499,7 +18004,7 @@ "project_name": "JaSPICE", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2023-03-01 19:14:54", "latest_commit": "2023-11-08 21:16:53", "languages": [ @@ -17513,7 +18018,7 @@ "project_name": "bertjsc", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2023-03-16 11:05:59", "latest_commit": "2024-08-03 12:15:17", "languages": [ @@ -17528,7 +18033,7 @@ "project_name": "awabi", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2020-03-13 08:08:26", "latest_commit": "2024-07-01 16:35:36", "languages": [ @@ -17542,7 +18047,7 @@ "project_name": "dependency-based-japanese-word-embeddings", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2019-07-31 15:42:14", "latest_commit": "2019-08-14 11:39:35", "languages": [], @@ -17554,7 +18059,7 @@ "project_name": "AMI-Meeting-Parallel-Corpus", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2020-12-11 14:22:19", "latest_commit": "2020-12-11 16:41:42", "languages": [], @@ -17566,7 +18071,7 @@ "project_name": "huggingface-datasets_JGLUE", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2023-02-25 13:33:13", "latest_commit": "2024-05-21 11:23:51", "languages": [ @@ -17580,7 +18085,7 @@ "project_name": "meconaudio", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2023-04-12 13:43:26", "latest_commit": "2023-10-26 08:40:27", "languages": [], @@ -17592,7 +18097,7 @@ "project_name": "GEC-Info-ja", "stargazers_count": 9, "source": "GitHub", - "score": -0.30869236977362724, + "score": -0.30890316303045573, "first_commit": "2022-07-02 01:07:27", "latest_commit": "2024-03-27 01:15:21", "languages": [], @@ -17604,7 +18109,7 @@ "project_name": "jagger-python", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2023-12-27 22:09:07", "latest_commit": "2024-03-12 02:15:59", "languages": [ @@ -17618,7 +18123,7 @@ "project_name": "text2phoneme", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2023-04-25 22:53:08", "latest_commit": "2023-05-17 00:44:01", "languages": [ @@ -17632,7 +18137,7 @@ "project_name": "DNorm-J", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2020-05-07 13:47:43", "latest_commit": "2022-06-30 12:09:11", "languages": [ @@ -17646,7 +18151,7 @@ "project_name": "JaMIE", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2019-10-09 11:44:21", "latest_commit": "2023-05-18 05:19:18", "languages": [ @@ -17660,7 +18165,7 @@ "project_name": "BLIP2-Japanese", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2023-05-31 22:24:24", "latest_commit": "2024-01-16 08:54:50", "languages": [ @@ -17675,7 +18180,7 @@ "project_name": "shisa-v2", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2024-03-29 15:12:36", "latest_commit": "2024-07-30 00:26:10", "languages": [ @@ -17689,7 +18194,7 @@ "project_name": "mecab-ipadic-seed", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2016-07-18 01:26:14", "latest_commit": "2016-07-30 19:09:57", "languages": [ @@ -17703,7 +18208,7 @@ "project_name": "AcademicRoBERTa", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2022-09-05 15:58:57", "latest_commit": "2023-05-16 13:48:22", "languages": [ @@ -17717,7 +18222,7 @@ "project_name": "mecab-mozcdic", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2017-07-21 20:37:42", "latest_commit": "2018-01-12 16:07:57", "languages": [], @@ -17729,7 +18234,7 @@ "project_name": "anthy", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2010-05-14 14:19:43", "latest_commit": "2023-02-25 21:37:47", "languages": [ @@ -17744,7 +18249,7 @@ "project_name": "jcms", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2022-11-07 15:44:38", "latest_commit": "2022-11-07 16:40:03", "languages": [ @@ -17758,7 +18263,7 @@ "project_name": "ndl-minhon-ocrdataset", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2023-01-19 15:22:00", "latest_commit": "2024-02-07 14:16:11", "languages": [ @@ -17772,7 +18277,7 @@ "project_name": "WLSP-familiarity", "stargazers_count": 8, "source": "GitHub", - "score": -0.3116387342731859, + "score": -0.3118612155100991, "first_commit": "2019-03-13 10:26:42", "latest_commit": "2024-06-30 15:05:18", "languages": [], @@ -17784,7 +18289,7 @@ "project_name": "jamorasep", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2023-03-08 22:54:34", "latest_commit": "2023-09-09 01:14:14", "languages": [ @@ -17798,7 +18303,7 @@ "project_name": "text_recognition", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2022-03-30 13:32:26", "latest_commit": "2023-07-10 18:03:39", "languages": [ @@ -17812,7 +18317,7 @@ "project_name": "kantan", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2015-07-15 12:45:17", "latest_commit": "2022-07-20 20:10:04", "languages": [ @@ -17826,7 +18331,7 @@ "project_name": "aozora-corpus-generator", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2017-10-09 16:11:26", "latest_commit": "2022-04-04 13:15:59", "languages": [ @@ -17840,7 +18345,7 @@ "project_name": "AugLy-jp", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2021-06-13 18:48:45", "latest_commit": "2021-09-30 13:16:24", "languages": [ @@ -17855,7 +18360,7 @@ "project_name": "julius4seg", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2018-02-20 23:19:39", "latest_commit": "2021-08-22 18:57:51", "languages": [ @@ -17869,7 +18374,7 @@ "project_name": "sentiment_ja_js", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2021-12-08 22:48:44", "latest_commit": "2021-12-10 00:56:29", "languages": [ @@ -17883,7 +18388,7 @@ "project_name": "manbyo-sudachi", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2021-04-05 15:59:36", "latest_commit": "2021-04-06 18:04:00", "languages": [ @@ -17897,7 +18402,7 @@ "project_name": "aws_dic_for_google_ime", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2019-11-24 03:47:55", "latest_commit": "2019-11-30 09:28:33", "languages": [ @@ -17912,7 +18417,7 @@ "project_name": "hirakanadic", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2021-06-22 21:38:13", "latest_commit": "2023-07-08 17:22:15", "languages": [ @@ -17926,7 +18431,7 @@ "project_name": "GazeVQA", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2024-02-22 06:53:00", "latest_commit": "2024-05-20 11:17:25", "languages": [], @@ -17938,7 +18443,7 @@ "project_name": "JapaneseNLI", "stargazers_count": 7, "source": "GitHub", - "score": -0.3145850987727445, + "score": -0.3148192679897425, "first_commit": "2020-03-10 22:43:24", "latest_commit": "2021-06-08 23:48:55", "languages": [ @@ -17952,7 +18457,7 @@ "project_name": "cabocha", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2011-07-29 04:08:14", "latest_commit": "2022-08-17 22:24:13", "languages": [ @@ -17971,7 +18476,7 @@ "project_name": "noyaki", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2021-05-23 18:41:17", "latest_commit": "2022-08-25 18:42:55", "languages": [ @@ -17985,7 +18490,7 @@ "project_name": "bert-ner-japanese", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2022-09-26 18:20:34", "latest_commit": "2022-09-26 21:44:38", "languages": [ @@ -17999,7 +18504,7 @@ "project_name": "jmlm_scoring", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2021-08-13 09:18:33", "latest_commit": "2022-02-20 22:39:25", "languages": [ @@ -18013,7 +18518,7 @@ "project_name": "swallow-evaluation", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2023-10-30 14:34:19", "latest_commit": "2024-07-13 21:29:54", "languages": [ @@ -18029,7 +18534,7 @@ "project_name": "showcase", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2018-06-26 13:19:53", "latest_commit": "2018-06-26 16:53:20", "languages": [ @@ -18043,7 +18548,7 @@ "project_name": "desuwa", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2021-04-20 16:37:43", "latest_commit": "2022-05-23 12:27:37", "languages": [ @@ -18057,7 +18562,7 @@ "project_name": "chirptext", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2014-12-01 20:39:06", "latest_commit": "2022-10-04 21:57:00", "languages": [ @@ -18071,7 +18576,7 @@ "project_name": "mixture-of-unigram-model", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2016-01-05 15:33:24", "latest_commit": "2017-06-16 13:33:57", "languages": [ @@ -18085,7 +18590,7 @@ "project_name": "mlm-scoring-transformers", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2022-10-03 14:27:09", "latest_commit": "2022-12-14 20:07:24", "languages": [ @@ -18099,7 +18604,7 @@ "project_name": "tra-fugu", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2023-02-28 21:41:49", "latest_commit": "2023-03-02 03:10:38", "languages": [ @@ -18113,7 +18618,7 @@ "project_name": "wikipedia-japanese-open-rag", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2023-12-28 07:55:40", "latest_commit": "2024-01-06 19:46:00", "languages": [ @@ -18127,7 +18632,7 @@ "project_name": "pydomino", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2024-05-27 10:52:07", "latest_commit": "2024-07-23 11:16:10", "languages": [ @@ -18143,7 +18648,7 @@ "project_name": "magpie-nemotron", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2024-07-05 21:07:54", "latest_commit": "2024-07-05 23:35:16", "languages": [ @@ -18157,7 +18662,7 @@ "project_name": "kana", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2022-06-28 22:39:35", "latest_commit": "2023-02-10 19:03:27", "languages": [], @@ -18169,7 +18674,7 @@ "project_name": "niinii", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2021-06-27 18:31:51", "latest_commit": "2024-08-11 11:54:57", "languages": [ @@ -18183,7 +18688,7 @@ "project_name": "AITuberDegikkoMirii", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2023-03-07 21:35:01", "latest_commit": "2023-03-18 06:59:43", "languages": [ @@ -18197,7 +18702,7 @@ "project_name": "Winograd-Schema-Challenge-Ja", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2019-01-25 19:14:29", "latest_commit": "2019-01-25 23:52:18", "languages": [ @@ -18211,7 +18716,7 @@ "project_name": "GeneralPolicySpeechOfPrimeMinisterOfJapan", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2014-10-28 00:56:59", "latest_commit": "2020-01-14 09:53:30", "languages": [], @@ -18223,7 +18728,7 @@ "project_name": "dcsg-ja", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2023-01-04 17:36:44", "latest_commit": "2023-03-10 17:29:26", "languages": [], @@ -18235,7 +18740,7 @@ "project_name": "ramendb", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2018-03-24 21:21:59", "latest_commit": "2024-08-06 10:47:04", "languages": [ @@ -18249,7 +18754,7 @@ "project_name": "J-CRe3", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2024-03-04 01:40:27", "latest_commit": "2024-06-14 18:24:05", "languages": [], @@ -18261,7 +18766,7 @@ "project_name": "pdmocrdataset-part2", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2022-05-24 10:37:30", "latest_commit": "2024-06-26 16:10:28", "languages": [], @@ -18273,7 +18778,7 @@ "project_name": "nlpbook", "stargazers_count": 6, "source": "GitHub", - "score": -0.31753146327230314, + "score": -0.31777732046938595, "first_commit": "2024-05-16 08:44:03", "latest_commit": "2024-07-03 14:25:21", "languages": [], @@ -18285,7 +18790,7 @@ "project_name": "jptranstokenizer", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2022-08-24 19:35:03", "latest_commit": "2024-02-03 03:07:59", "languages": [ @@ -18299,7 +18804,7 @@ "project_name": "compare-ja-tokenizer", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2023-06-13 18:08:18", "latest_commit": "2023-06-16 10:18:55", "languages": [ @@ -18313,7 +18818,7 @@ "project_name": "jawiki-cleaner", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2021-02-21 20:04:59", "latest_commit": "2021-02-21 21:41:44", "languages": [ @@ -18327,7 +18832,7 @@ "project_name": "hidden-markov-model", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2016-10-17 14:33:56", "latest_commit": "2017-06-16 13:33:24", "languages": [ @@ -18341,7 +18846,7 @@ "project_name": "Ngram-language-model", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2017-12-05 14:45:11", "latest_commit": "2017-12-05 15:03:52", "languages": [ @@ -18355,19 +18860,33 @@ "project_name": "akaza", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": null, "latest_commit": null, "languages": [], "model_or_dataset": null }, + { + "description": "Whisperのデコーダをllm-jp-1.3b-v1.0に置き換えた音声認識モデルを学習させるためのコード", + "url": "https://github.com/tosiyuki/llm-jp-asr", + "project_name": "llm-jp-asr", + "stargazers_count": 5, + "source": "GitHub", + "score": -0.3207353729490294, + "first_commit": "2024-09-07 22:07:43", + "latest_commit": "2024-09-07 22:57:13", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, { "description": "elmo-japanese", "url": "https://github.com/cl-tohoku/elmo-japanese", "project_name": "elmo-japanese", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2019-10-01 12:16:29", "latest_commit": "2019-10-07 10:37:31", "languages": [ @@ -18381,7 +18900,7 @@ "project_name": "japagen", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2024-01-12 10:53:54", "latest_commit": "2024-08-09 17:41:21", "languages": [], @@ -18393,7 +18912,7 @@ "project_name": "giant_ja-en_parallel_corpus", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2019-08-04 12:01:19", "latest_commit": "2019-08-04 17:40:02", "languages": [ @@ -18407,7 +18926,7 @@ "project_name": "BPersona-chat", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2022-10-10 14:15:10", "latest_commit": "2023-01-12 17:39:24", "languages": [], @@ -18419,7 +18938,7 @@ "project_name": "ita-corpus-chuwa", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2021-07-16 21:19:53", "latest_commit": "2021-08-25 12:22:06", "languages": [ @@ -18433,7 +18952,7 @@ "project_name": "ProSub", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2021-09-12 18:55:13", "latest_commit": "2024-06-02 19:06:13", "languages": [ @@ -18447,7 +18966,7 @@ "project_name": "allennlp-NER-ja", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2022-05-05 10:28:23", "latest_commit": "2022-05-10 00:52:35", "languages": [ @@ -18461,7 +18980,7 @@ "project_name": "chariot-PyTorch-Japanese-text-classification", "stargazers_count": 5, "source": "GitHub", - "score": -0.32047782777186173, + "score": -0.3207353729490294, "first_commit": "2019-03-02 15:04:41", "latest_commit": "2019-03-19 02:51:30", "languages": [ @@ -18475,7 +18994,7 @@ "project_name": "hasami", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2020-12-30 21:32:13", "latest_commit": "2021-02-21 14:39:11", "languages": [ @@ -18489,7 +19008,7 @@ "project_name": "nagisa_bert", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2022-09-26 03:45:52", "latest_commit": "2023-12-23 16:14:09", "languages": [ @@ -18504,7 +19023,7 @@ "project_name": "toEmoji", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2018-02-25 14:52:07", "latest_commit": "2018-04-16 00:59:20", "languages": [ @@ -18518,7 +19037,7 @@ "project_name": "jinf", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2022-02-03 17:59:01", "latest_commit": "2022-12-27 10:28:22", "languages": [ @@ -18532,7 +19051,7 @@ "project_name": "japanese-nli-model", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2022-10-26 17:42:42", "latest_commit": "2022-10-26 17:42:42", "languages": [ @@ -18546,7 +19065,7 @@ "project_name": "go-kakasi", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2024-02-11 19:08:03", "latest_commit": "2024-03-03 18:23:56", "languages": [ @@ -18560,7 +19079,7 @@ "project_name": "neologdn-java", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2017-02-20 18:07:05", "latest_commit": "2021-10-11 22:35:59", "languages": [ @@ -18574,7 +19093,7 @@ "project_name": "jp-azureopenai-samples", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2023-06-21 21:27:33", "latest_commit": "2024-08-16 11:31:48", "languages": [ @@ -18590,7 +19109,7 @@ "project_name": "kanji-flashcard-app-gpt4", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2023-10-17 23:33:19", "latest_commit": "2023-10-17 23:41:17", "languages": [ @@ -18605,7 +19124,7 @@ "project_name": "google-vs-deepl-je", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2020-03-22 19:45:11", "latest_commit": "2020-03-22 23:27:00", "languages": [ @@ -18619,7 +19138,7 @@ "project_name": "jawikicorpus", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2018-04-05 01:07:48", "latest_commit": "2018-11-24 16:44:02", "languages": [], @@ -18631,7 +19150,7 @@ "project_name": "huggingface-datasets_wrime", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2023-01-12 10:43:54", "latest_commit": "2023-01-15 12:39:01", "languages": [ @@ -18645,7 +19164,7 @@ "project_name": "japanese-technical-dict", "stargazers_count": 4, "source": "GitHub", - "score": -0.3234241922714204, + "score": -0.3236934254286728, "first_commit": "2024-01-08 14:44:52", "latest_commit": "2024-06-18 00:54:03", "languages": [ @@ -18659,7 +19178,7 @@ "project_name": "mecab-text-cleaner", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2023-09-01 16:18:34", "latest_commit": "2024-03-29 00:06:20", "languages": [ @@ -18674,7 +19193,7 @@ "project_name": "python-habachen", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2023-10-04 07:40:00", "latest_commit": "2024-01-21 10:29:31", "languages": [ @@ -18691,7 +19210,7 @@ "project_name": "kuzukiri", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-11-20 00:05:51", "latest_commit": "2024-06-11 16:43:31", "languages": [ @@ -18707,7 +19226,7 @@ "project_name": "jrte-corpus_example", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-08-23 09:46:30", "latest_commit": "2021-11-19 13:11:47", "languages": [ @@ -18721,7 +19240,7 @@ "project_name": "mbart-finetuning", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-10-14 00:05:39", "latest_commit": "2021-10-14 00:16:10", "languages": [ @@ -18735,7 +19254,7 @@ "project_name": "tweet_extructor", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2018-06-20 16:23:11", "latest_commit": "2022-08-28 13:30:18", "languages": [ @@ -18749,19 +19268,33 @@ "project_name": "japanese_chatbot", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": null, "latest_commit": null, "languages": [], "model_or_dataset": null }, + { + "description": "A state-of-the-art open-source Japanese <--> English machine translation system based on the latest NMT research.", + "url": "https://github.com/matthewbieda/jp-translate.cloud", + "project_name": "jp-translate.cloud", + "stargazers_count": 3, + "source": "GitHub", + "score": -0.3266514779083162, + "first_commit": "2022-02-21 12:23:42", + "latest_commit": "2024-09-05 09:24:02", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, { "description": "Comparison code of various tokenizers", "url": "https://github.com/legalforce-research/tokenizer-speed-bench", "project_name": "tokenizer-speed-bench", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-10-28 21:38:49", "latest_commit": "2023-03-01 14:07:29", "languages": [ @@ -18778,7 +19311,7 @@ "project_name": "listup_precedent", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2023-01-15 08:01:44", "latest_commit": "2024-05-11 22:30:27", "languages": [ @@ -18792,7 +19325,7 @@ "project_name": "juman-bin", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2017-05-11 13:18:41", "latest_commit": "2017-05-11 13:53:25", "languages": [ @@ -18806,7 +19339,7 @@ "project_name": "Japanese-Word-Of-The-Day", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-07-10 00:03:05", "latest_commit": "2021-08-11 03:03:38", "languages": [ @@ -18820,7 +19353,7 @@ "project_name": "jawikivec", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2018-06-04 01:22:54", "latest_commit": "2018-11-24 17:10:01", "languages": [], @@ -18832,7 +19365,7 @@ "project_name": "mh-dict-jp", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2023-04-27 20:01:36", "latest_commit": "2023-11-27 13:46:48", "languages": [ @@ -18846,7 +19379,7 @@ "project_name": "jesc_small", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2019-07-06 12:36:45", "latest_commit": "2019-07-06 12:49:24", "languages": [], @@ -18858,7 +19391,7 @@ "project_name": "japanese-corpus", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2018-08-19 04:18:55", "latest_commit": "2018-10-09 19:08:10", "languages": [], @@ -18870,7 +19403,7 @@ "project_name": "speechBSD", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2022-05-30 11:17:29", "latest_commit": "2024-02-07 22:15:37", "languages": [], @@ -18882,7 +19415,7 @@ "project_name": "anlp-jp-history", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2015-08-12 17:40:32", "latest_commit": "2024-04-05 19:33:15", "languages": [ @@ -18896,7 +19429,7 @@ "project_name": "handson-language-models", "stargazers_count": 3, "source": "GitHub", - "score": -0.326370556770979, + "score": -0.3266514779083162, "first_commit": "2021-03-13 15:29:37", "latest_commit": "2021-03-18 19:33:34", "languages": [ @@ -18910,7 +19443,7 @@ "project_name": "jdepp-python", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2024-01-01 23:34:44", "latest_commit": "2024-02-14 22:09:31", "languages": [ @@ -18924,7 +19457,7 @@ "project_name": "wiredify", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-09-03 09:17:14", "latest_commit": "2023-12-19 01:01:46", "languages": [ @@ -18938,7 +19471,7 @@ "project_name": "utsuho", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-03-27 11:07:55", "latest_commit": "2023-11-20 08:53:43", "languages": [ @@ -18952,7 +19485,7 @@ "project_name": "Japanese-BERT-Sentiment-Analyzer", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2021-03-17 22:22:31", "latest_commit": "2021-04-20 00:41:29", "languages": [ @@ -18966,7 +19499,7 @@ "project_name": "ishi", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2019-12-21 17:24:25", "latest_commit": "2020-05-15 22:18:26", "languages": [ @@ -18980,7 +19513,7 @@ "project_name": "unihan-lm", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2020-09-14 16:41:10", "latest_commit": "2020-11-06 10:12:50", "languages": [ @@ -18994,7 +19527,7 @@ "project_name": "japanese_qa_demo_with_haystack_and_es", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-12-18 19:21:06", "latest_commit": "2022-12-19 03:57:34", "languages": [ @@ -19008,7 +19541,7 @@ "project_name": "trimatch", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2021-08-30 17:26:15", "latest_commit": "2024-02-08 10:06:06", "languages": [ @@ -19022,7 +19555,7 @@ "project_name": "tantivy-vibrato", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-08-25 13:31:30", "latest_commit": "2023-01-19 10:12:17", "languages": [ @@ -19036,7 +19569,7 @@ "project_name": "find-simdoc", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-08-31 12:56:10", "latest_commit": "2022-09-27 11:39:27", "languages": [ @@ -19051,7 +19584,7 @@ "project_name": "stringmatch-bench", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-09-29 12:24:29", "latest_commit": "2022-09-30 11:36:55", "languages": [ @@ -19066,7 +19599,7 @@ "project_name": "japanki", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-10-04 17:43:27", "latest_commit": "2023-10-17 01:00:00", "languages": [ @@ -19080,7 +19613,7 @@ "project_name": "pitch-accent", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-08-09 13:36:52", "latest_commit": "2023-09-08 08:19:43", "languages": [ @@ -19096,7 +19629,7 @@ "project_name": "gpt-ja", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2021-06-13 13:14:18", "latest_commit": "2021-09-27 21:08:39", "languages": [ @@ -19110,7 +19643,7 @@ "project_name": "AcademicBART", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-01-11 16:19:21", "latest_commit": "2024-07-11 22:09:11", "languages": [ @@ -19124,7 +19657,7 @@ "project_name": "denonbu-ime-dic", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2019-12-30 01:37:16", "latest_commit": "2022-11-13 23:09:25", "languages": [], @@ -19136,7 +19669,7 @@ "project_name": "anthy", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2013-06-30 11:09:24", "latest_commit": "2013-07-27 22:45:26", "languages": [ @@ -19150,7 +19683,7 @@ "project_name": "Data-on-Japanese-Diet-Members", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-07-02 22:58:11", "latest_commit": "2022-09-29 13:37:11", "languages": [], @@ -19162,7 +19695,7 @@ "project_name": "ndlngramviewer_v2", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2022-12-26 13:29:58", "latest_commit": "2023-07-20 11:05:53", "languages": [ @@ -19180,7 +19713,7 @@ "project_name": "huggingface-datasets_livedoor-news-corpus", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-01-17 23:16:18", "latest_commit": "2023-10-28 14:40:17", "languages": [ @@ -19194,7 +19727,7 @@ "project_name": "jpn_explainable_qa_dataset", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": null, "latest_commit": null, "languages": [], @@ -19206,7 +19739,7 @@ "project_name": "jemhopqa", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2023-08-01 02:07:45", "latest_commit": "2024-06-05 14:54:08", "languages": [ @@ -19214,13 +19747,25 @@ ], "model_or_dataset": "dataset" }, + { + "description": "Dataset of paper \"Verification of Chain-of-Thought Prompting in Japanese\"", + "url": "https://github.com/nlp-waseda/chain-of-thought-ja-dataset", + "project_name": "chain-of-thought-ja-dataset", + "stargazers_count": 2, + "source": "GitHub", + "score": -0.32960953038795965, + "first_commit": "2023-03-03 13:17:07", + "latest_commit": "2023-09-14 10:14:39", + "languages": [], + "model_or_dataset": "dataset" + }, { "description": "【2024年版】BERTによるテキスト分類", "url": "https://github.com/hpprc/bert-classification-tutorial-2024", "project_name": "bert-classification-tutorial-2024", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": "2024-05-28 10:50:41", "latest_commit": "2024-07-08 17:10:46", "languages": [ @@ -19234,7 +19779,7 @@ "project_name": "Gemma2_2b_Japanese_finetuning_colab.ipynb", "stargazers_count": 2, "source": "GitHub", - "score": -0.32931692127053763, + "score": -0.32960953038795965, "first_commit": null, "latest_commit": null, "languages": [], @@ -19246,7 +19791,7 @@ "project_name": "joint-information-extraction-hs", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2021-11-05 12:32:31", "latest_commit": "2021-11-17 12:29:39", "languages": [ @@ -19261,7 +19806,7 @@ "project_name": "t5_japanese_dialogue_generation", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2021-11-19 18:32:46", "latest_commit": "2021-11-28 10:48:04", "languages": [ @@ -19276,7 +19821,7 @@ "project_name": "lm-evaluation-harness-jp-stable", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2020-08-27 18:08:04", "latest_commit": "2023-06-19 10:48:40", "languages": [ @@ -19291,7 +19836,7 @@ "project_name": "japanese2phoneme", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2021-02-09 16:27:24", "latest_commit": "2022-02-24 16:11:04", "languages": [ @@ -19305,7 +19850,7 @@ "project_name": "anlp_nlp2021_d3-1", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2021-03-12 02:34:47", "latest_commit": "2022-03-08 13:40:28", "languages": [ @@ -19319,7 +19864,7 @@ "project_name": "JDT-with-KenLM-scoring", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2022-06-28 09:22:47", "latest_commit": "2022-07-01 21:29:00", "languages": [ @@ -19333,7 +19878,7 @@ "project_name": "japanese-word-aggregation", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2018-08-08 13:39:47", "latest_commit": "2018-08-20 07:51:01", "languages": [ @@ -19347,7 +19892,7 @@ "project_name": "SAT-for-Japanese", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2022-10-04 23:48:47", "latest_commit": "2022-10-05 00:40:09", "languages": [ @@ -19361,7 +19906,7 @@ "project_name": "japanese_llm_eval", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2024-03-28 14:56:01", "latest_commit": "2024-04-22 07:39:03", "languages": [ @@ -19376,7 +19921,7 @@ "project_name": "qlora_ja", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-09-10 15:04:15", "latest_commit": "2024-07-13 10:01:12", "languages": [ @@ -19384,13 +19929,27 @@ ], "model_or_dataset": null }, + { + "description": "A Japanese Parser (including historical Japanese)", + "url": "https://github.com/komiya-lab/monaka", + "project_name": "monaka", + "stargazers_count": 1, + "source": "GitHub", + "score": -0.3325675828676031, + "first_commit": "2024-01-24 10:26:06", + "latest_commit": "2024-07-16 14:16:00", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, { "description": "日本の住所を都道府県/市区町村/町名/その他に分割するライブラリです", "url": "https://github.com/yuukitoriyama/japanese-address-parser", "project_name": "japanese-address-parser", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-11-17 23:16:38", "latest_commit": "2024-08-12 18:23:09", "languages": [ @@ -19405,7 +19964,7 @@ "project_name": "friendly_JA-Model", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2022-01-10 12:03:59", "latest_commit": "2022-05-22 14:42:46", "languages": [], @@ -19417,7 +19976,7 @@ "project_name": "ChuanhuChatGPTJapanese", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-03-02 21:37:13", "latest_commit": "2023-03-07 14:10:10", "languages": [ @@ -19431,7 +19990,7 @@ "project_name": "chrome-ext-translate-to-hiragana-with-chatgpt", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-03-25 20:09:31", "latest_commit": "2023-04-01 16:05:53", "languages": [ @@ -19445,7 +20004,7 @@ "project_name": "chatvrm", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-04-28 17:25:29", "latest_commit": "2024-07-18 13:49:25", "languages": [ @@ -19460,7 +20019,7 @@ "project_name": "sftly-replace", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2023-05-04 20:51:22", "latest_commit": "2023-05-24 02:03:16", "languages": [ @@ -19474,7 +20033,7 @@ "project_name": "JumanDIC", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2015-12-07 17:42:10", "latest_commit": "2022-08-18 19:01:36", "languages": [ @@ -19490,7 +20049,7 @@ "project_name": "uchinaaguchi_dict", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2024-03-12 06:53:05", "latest_commit": "2024-08-12 17:05:46", "languages": [ @@ -19505,7 +20064,7 @@ "project_name": "skk-jisyo.emoji-ja", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2018-03-13 01:04:56", "latest_commit": "2018-03-13 02:01:32", "languages": [ @@ -19519,7 +20078,7 @@ "project_name": "kokkosho_data", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2017-04-16 15:04:26", "latest_commit": "2019-07-09 23:36:27", "languages": [], @@ -19531,7 +20090,7 @@ "project_name": "isbn4groups", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2022-08-03 16:31:28", "latest_commit": "2024-06-25 14:11:40", "languages": [], @@ -19543,7 +20102,7 @@ "project_name": "jacred", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2024-01-11 16:26:25", "latest_commit": "2024-03-08 17:58:20", "languages": [], @@ -19555,7 +20114,7 @@ "project_name": "jades", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2022-10-15 09:32:35", "latest_commit": "2022-12-13 15:57:29", "languages": [], @@ -19567,7 +20126,7 @@ "project_name": "japanese-ir-tutorial", "stargazers_count": 1, "source": "GitHub", - "score": -0.33226328577009623, + "score": -0.3325675828676031, "first_commit": "2024-04-29 10:52:11", "latest_commit": "2024-06-05 18:56:44", "languages": [ @@ -19581,7 +20140,7 @@ "project_name": "pynormalizenumexp", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2021-10-11 21:02:26", "latest_commit": "2024-04-28 19:55:49", "languages": [ @@ -19589,27 +20148,13 @@ ], "model_or_dataset": null }, - { - "description": "Examples to finetune encoder-only and encoder-decoder transformers for Japanese language (Hugging Face) Resources", - "url": "https://github.com/tsmatz/huggingface-finetune-japanese", - "project_name": "huggingface-finetune-japanese", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2022-10-24 18:13:22", - "latest_commit": "2023-10-06 17:11:54", - "languages": [ - "Jupyter Notebook" - ], - "model_or_dataset": null - }, { "description": "OCR system for recognizing modern Japanese magazines", "url": "https://github.com/ducanh841988/Kindai-OCR", "project_name": "Kindai-OCR", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2020-07-08 10:12:27", "latest_commit": "2023-07-12 12:14:52", "languages": [ @@ -19623,7 +20168,7 @@ "project_name": "JGLUE-benchmark", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-03-18 01:19:37", "latest_commit": "2024-08-09 18:31:33", "languages": [ @@ -19637,7 +20182,7 @@ "project_name": "yubin", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2019-10-28 07:11:54", "latest_commit": "2019-10-28 07:20:26", "languages": [ @@ -19651,7 +20196,7 @@ "project_name": "ASRDeepSpeech", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2020-03-03 15:08:25", "latest_commit": "2022-09-26 00:11:29", "languages": [ @@ -19666,7 +20211,7 @@ "project_name": "radicalchar", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2022-11-29 19:17:38", "latest_commit": "2022-12-30 01:40:44", "languages": [ @@ -19682,7 +20227,7 @@ "project_name": "natsume", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": null, "latest_commit": null, "languages": [], @@ -19694,7 +20239,7 @@ "project_name": "RAG-Japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-11-14 14:06:31", "latest_commit": "2023-11-29 19:47:20", "languages": [ @@ -19708,7 +20253,7 @@ "project_name": "jglue-evaluation-scripts", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-03-18 01:19:37", "latest_commit": "2024-08-09 18:31:33", "languages": [ @@ -19716,27 +20261,13 @@ ], "model_or_dataset": null }, - { - "description": "Whisperのデコーダをllm-jp-1.3b-v1.0に置き換えた音声認識モデルを学習させるためのコード", - "url": "https://github.com/tosiyuki/llm-jp-asr", - "project_name": "llm-jp-asr", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2024-09-07 22:07:43", - "latest_commit": "2024-09-07 22:57:13", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, { "description": "Open source RAG with Llama Index for Japanese LLM in low resource settting", "url": "https://github.com/akimfromparis/rag-japanese", "project_name": "rag-japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-11-14 14:06:31", "latest_commit": "2023-11-29 19:47:20", "languages": [ @@ -19744,41 +20275,13 @@ ], "model_or_dataset": null }, - { - "description": "A Japanese Parser (including historical Japanese)", - "url": "https://github.com/komiya-lab/monaka", - "project_name": "monaka", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2024-01-24 10:26:06", - "latest_commit": "2024-07-16 14:16:00", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, - { - "description": "A state-of-the-art open-source Japanese <--> English machine translation system based on the latest NMT research.", - "url": "https://github.com/matthewbieda/jp-translate.cloud", - "project_name": "jp-translate.cloud", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2022-02-21 12:23:42", - "latest_commit": "2024-09-05 09:24:02", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, { "description": "連続部分文字列の単語判定を行います", "url": "https://github.com/toufu-24/substring-word-finder", "project_name": "substring-word-finder", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-05-08 01:40:50", "latest_commit": "2024-09-15 02:45:58", "languages": [ @@ -19786,43 +20289,13 @@ ], "model_or_dataset": null }, - { - "description": "日本語文字変換ライブラリ (javascript)", - "url": "https://github.com/kazuhikoarase/jaconv", - "project_name": "jaconv", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2016-10-22 05:22:02", - "latest_commit": "2024-01-20 05:35:13", - "languages": [ - "TypeScript", - "JavaScript", - "Java" - ], - "model_or_dataset": null - }, - { - "description": "Convert romaji into hiragana", - "url": "https://github.com/koozaki/romaji-conv", - "project_name": "romaji-conv", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2020-07-05 01:29:36", - "latest_commit": "2024-09-01 19:50:38", - "languages": [ - "JavaScript" - ], - "model_or_dataset": null - }, { "description": "Extend GNOME On-Screen Keyboard for Input Methods", "url": "https://github.com/esrille/oskim", "project_name": "oskim", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-02-24 15:08:36", "latest_commit": "2023-02-24 15:43:20", "languages": [ @@ -19837,7 +20310,7 @@ "project_name": "aovec", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2021-06-22 16:51:15", "latest_commit": "2023-02-01 02:27:34", "languages": [ @@ -19851,7 +20324,7 @@ "project_name": "BERT-Japan-vaccination", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2022-04-05 16:46:38", "latest_commit": "2022-05-23 00:47:45", "languages": [ @@ -19865,7 +20338,7 @@ "project_name": "VRChatGPT", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-03-21 19:53:54", "latest_commit": "2023-03-22 21:04:37", "languages": [ @@ -19879,7 +20352,7 @@ "project_name": "chatgpt-prompt-sample-japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-04-13 14:22:50", "latest_commit": "2024-08-09 15:15:17", "languages": [], @@ -19891,7 +20364,7 @@ "project_name": "pokemon-ime-dic", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2020-01-10 23:13:19", "latest_commit": "2020-01-10 23:25:48", "languages": [], @@ -19903,7 +20376,7 @@ "project_name": "jitenbot", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": null, "latest_commit": null, "languages": [], @@ -19915,7 +20388,7 @@ "project_name": "azookey-desktop", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2021-09-07 21:50:12", "latest_commit": "2024-08-13 22:08:17", "languages": [ @@ -19929,7 +20402,7 @@ "project_name": "azookeykanakanjiconverter", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-05-28 11:51:25", "latest_commit": "2024-08-11 00:53:16", "languages": [ @@ -19937,41 +20410,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "Japanese Kana Kanji conversion input method library", - "url": "https://github.com/ueno/libkkc", - "project_name": "libkkc", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2012-08-07 17:45:52", - "latest_commit": "2024-09-02 12:08:48", - "languages": [ - "Python" - ], - "model_or_dataset": "dataset" - }, - { - "description": "Japanese SKK input method library", - "url": "https://github.com/ueno/libskk", - "project_name": "libskk", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3352096502696549, - "first_commit": "2011-10-05 18:18:07", - "latest_commit": "2024-09-02 12:09:00", - "languages": [ - "C" - ], - "model_or_dataset": "dataset" - }, { "description": "Parallel Universal Dependencies.", "url": "https://github.com/megagonlabs/UD_Japanese-PUD", "project_name": "UD_Japanese-PUD", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2017-05-23 10:31:45", "latest_commit": "2020-05-16 10:57:47", "languages": [], @@ -19983,7 +20428,7 @@ "project_name": "graded-enja-corpus", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2021-06-05 11:55:23", "latest_commit": "2023-03-14 00:24:51", "languages": [ @@ -19997,7 +20442,7 @@ "project_name": "WikipediaWordFrequencyList", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2022-04-17 16:35:32", "latest_commit": "2022-04-17 16:44:19", "languages": [ @@ -20011,7 +20456,7 @@ "project_name": "friendly_JA-Corpus", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": null, "latest_commit": null, "languages": [], @@ -20023,7 +20468,7 @@ "project_name": "copa-japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-01-13 09:04:08", "latest_commit": "2023-02-24 11:28:31", "languages": [], @@ -20035,7 +20480,7 @@ "project_name": "huggingface-datasets_CAMERA", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-03-17 23:02:32", "latest_commit": "2023-03-17 23:49:35", "languages": [ @@ -20049,7 +20494,7 @@ "project_name": "FactCheckSentenceNLI-FCSNLI-", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2021-02-26 14:08:54", "latest_commit": "2021-03-03 11:15:47", "languages": [ @@ -20063,7 +20508,7 @@ "project_name": "EaST-MELD", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2023-04-12 00:16:46", "latest_commit": "2023-06-23 11:09:20", "languages": [], @@ -20075,7 +20520,7 @@ "project_name": "reazonspeech", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2022-10-19 10:08:01", "latest_commit": "2024-08-01 17:38:15", "languages": [ @@ -20090,7 +20535,7 @@ "project_name": "j-unimorph", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2024-01-10 20:05:15", "latest_commit": "2024-05-12 20:42:38", "languages": [ @@ -20104,7 +20549,7 @@ "project_name": "jmed-llm", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2024-07-11 20:01:36", "latest_commit": "2024-08-10 08:49:25", "languages": [ @@ -20118,7 +20563,7 @@ "project_name": "lawtext", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2017-12-06 23:09:02", "latest_commit": "2024-08-10 23:37:12", "languages": [ @@ -20133,19 +20578,31 @@ "project_name": "japanesetopicwsd", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2018-09-10 16:40:34", "latest_commit": "2018-09-27 13:45:10", "languages": [], "model_or_dataset": "dataset" }, + { + "description": "Jamp: Controlled Japanese Temporal Inference Dataset for Evaluating Generalization Capacity of Language Models", + "url": "https://github.com/tomo-vv/temporalNLI_dataset", + "project_name": "temporalNLI_dataset", + "stargazers_count": 0, + "source": "GitHub", + "score": -0.3355256353472465, + "first_commit": "2022-05-13 20:55:04", + "latest_commit": "2023-07-22 20:27:45", + "languages": [], + "model_or_dataset": "dataset" + }, { "description": "環境構築手順とソースコード", "url": "https://github.com/hiroshi-matsuda-rit/nlp2024-tutorial-3", "project_name": "nlp2024-tutorial-3", "stargazers_count": 0, "source": "GitHub", - "score": -0.3352096502696549, + "score": -0.3355256353472465, "first_commit": "2024-03-05 09:03:21", "latest_commit": "2024-04-02 14:38:06", "languages": [],