diff --git "a/awesome-japanese-nlp-resources-search.json" "b/awesome-japanese-nlp-resources-search.json" --- "a/awesome-japanese-nlp-resources-search.json" +++ "b/awesome-japanese-nlp-resources-search.json" @@ -3,13 +3,14 @@ "description": "Fine-tuned XLSR-53 large model for speech recognition in Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the train and validation splits of Common Voice 6.1, CSS10 and JSUT.", "url": "https://huggingface.co./jonatasgrosman/wav2vec2-large-xlsr-53-japanese", "project_name": "wav2vec2-large-xlsr-53-japanese", - "downloads": 1355666, + "downloads": 1329767, "source": "Hugging Face", - "score": 19.064569260281203, + "score": 19.31870691627456, "first_commit": "2021-04-16 00:20:03", "latest_commit": "2022-12-14 01:58:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Official Implementation of OCR-free Document Understanding Transformer (Donut) and Synthetic Document Generator (SynthDoG), ECCV 2022", @@ -17,7 +18,7 @@ "project_name": "donut", "stargazers_count": 5553, "source": "GitHub", - "score": 15.86610807642248, + "score": 15.893071179885453, "first_commit": "2022-07-20 23:15:30", "latest_commit": "2023-07-31 15:14:20", "languages": [ @@ -29,25 +30,27 @@ "description": "xlm-roberta-ner-japanese (Japanese caption : 日本語の固有表現抽出のモデル)", "url": "https://huggingface.co./tsmatz/xlm-roberta-ner-japanese", "project_name": "xlm-roberta-ner-japanese", - "downloads": 1075727, + "downloads": 999230, "source": "Hugging Face", - "score": 15.106197384145005, + "score": 14.490236493553173, "first_commit": "2022-10-24 02:08:37", "latest_commit": "2024-07-12 00:01:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.277 }, { "description": "BERT base Japanese (IPA dictionary)", "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese", "project_name": "bert-base-japanese", - "downloads": 980111, + "downloads": 932920, "source": "Hugging Face", - "score": 13.754175388965205, + "score": 13.521582836834998, "first_commit": "2020-04-28 21:34:23", "latest_commit": "2024-02-22 00:57:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Neologism dictionary based on the language resources on the Web for mecab-ipadic", @@ -55,7 +58,7 @@ "project_name": "mecab-ipadic-neologd", "stargazers_count": 2695, "source": "GitHub", - "score": 7.526161240209716, + "score": 7.539021437395139, "first_commit": "2015-03-09 16:46:28", "latest_commit": "2020-09-14 19:56:40", "languages": [ @@ -69,7 +72,7 @@ "project_name": "voicevox", "stargazers_count": 2411, "source": "GitHub", - "score": 6.697419147318035, + "score": 6.708877936042008, "first_commit": "2021-08-01 02:41:10", "latest_commit": "2024-08-16 01:45:33", "languages": [ @@ -84,7 +87,7 @@ "project_name": "mozc", "stargazers_count": 2333, "source": "GitHub", - "score": 6.469806882368911, + "score": 6.480880777219669, "first_commit": "2010-05-10 12:05:41", "latest_commit": "2024-08-15 07:37:57", "languages": [ @@ -92,37 +95,38 @@ ], "model_or_dataset": null }, + { + "description": "BERT base Japanese (IPA dictionary, whole word masking enabled)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-whole-word-masking", + "project_name": "bert-base-japanese-whole-word-masking", + "downloads": 352906, + "source": "Hugging Face", + "score": 5.048763020099434, + "first_commit": "2020-04-28 21:34:35", + "latest_commit": "2024-02-22 00:57:37", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "Engineer Vocabulary List in Japanese/English", "url": "https://github.com/mercari/engineer-vocabulary-list", "project_name": "engineer-vocabulary-list", "stargazers_count": 1766, "source": "GitHub", - "score": 4.815240802546435, + "score": 4.82351681501113, "first_commit": "2020-09-30 14:16:14", "latest_commit": "2020-11-04 08:32:10", "languages": [], "model_or_dataset": "dataset" }, - { - "description": "BERT base Japanese (IPA dictionary, whole word masking enabled)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-whole-word-masking", - "project_name": "bert-base-japanese-whole-word-masking", - "downloads": 341294, - "source": "Hugging Face", - "score": 4.721224471809333, - "first_commit": "2020-04-28 21:34:35", - "latest_commit": "2024-02-22 00:57:37", - "languages": [], - "model_or_dataset": "model" - }, { "description": "About Optical character recognition for Japanese text, with the main focus being Japanese manga", "url": "https://github.com/kha-white/manga-ocr", "project_name": "manga-ocr", "stargazers_count": 1517, "source": "GitHub", - "score": 4.088632418285771, + "score": 4.095679731078279, "first_commit": "2022-01-15 18:18:35", "latest_commit": "2024-06-29 11:23:04", "languages": [ @@ -136,7 +140,7 @@ "project_name": "N46Whisper", "stargazers_count": 1515, "source": "GitHub", - "score": 4.082796206363999, + "score": 4.089833650082834, "first_commit": "2022-10-25 16:27:20", "latest_commit": "2024-06-30 15:22:02", "languages": [ @@ -149,13 +153,14 @@ "description": "rinna/japanese-cloob-vit-b-16", "url": "https://huggingface.co./rinna/japanese-cloob-vit-b-16", "project_name": "japanese-cloob-vit-b-16", - "downloads": 283198, + "downloads": 283049, "source": "Hugging Face", - "score": 3.8997398893967063, + "score": 4.02829493840193, "first_commit": "2022-04-27 08:29:29", "latest_commit": "2024-07-22 08:09:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.197 }, { "description": "無料で使える中品質なテキスト読み上げソフトウェア、VOICEVOXの音声合成エンジン", @@ -163,7 +168,7 @@ "project_name": "voicevox_engine", "stargazers_count": 1257, "source": "GitHub", - "score": 3.329924868455359, + "score": 3.335689201670483, "first_commit": "2021-08-02 02:39:41", "latest_commit": "2024-08-15 01:08:26", "languages": [ @@ -177,7 +182,7 @@ "project_name": "ojichat", "stargazers_count": 1249, "source": "GitHub", - "score": 3.306580020768269, + "score": 3.3123048776887045, "first_commit": "2019-05-25 03:44:08", "latest_commit": "2023-04-23 16:02:15", "languages": [ @@ -189,13 +194,27 @@ "description": "BERT large Japanese (unidic-lite with whole word masking, jawiki-20200831)", "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese", "project_name": "bert-large-japanese", - "downloads": 203169, + "downloads": 202023, "source": "Hugging Face", - "score": 2.768119990506935, + "score": 2.8446705802471004, "first_commit": "2021-03-05 06:17:13", "latest_commit": "2021-09-23 15:45:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "GLuCoSE (General Luke-based Contrastive Sentence Embedding)-base-Japanese 日本語のREADME/Japanese README GLuCoSE (General LUke-based COntrastive Sentence Embedding, \"glucose\") is a Japanese text embedding model based on LUKE.", + "url": "https://huggingface.co./pkshatech/GLuCoSE-base-ja", + "project_name": "GLuCoSE-base-ja", + "downloads": 200923, + "source": "Hugging Face", + "score": 2.828601827081816, + "first_commit": "2023-07-16 07:28:46", + "latest_commit": "2023-08-25 02:53:22", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Japanese pop-up dictionary extension for Chrome and Firefox.", @@ -203,7 +222,7 @@ "project_name": "yomichan", "stargazers_count": 1044, "source": "GitHub", - "score": 2.7083682987865982, + "score": 2.7130815756556346, "first_commit": "2016-03-16 20:33:15", "latest_commit": "2023-02-25 12:43:18", "languages": [ @@ -212,25 +231,13 @@ ], "model_or_dataset": null }, - { - "description": "GLuCoSE (General Luke-based Contrastive Sentence Embedding)-base-Japanese 日本語のREADME/Japanese README GLuCoSE (General LUke-based COntrastive Sentence Embedding, \"glucose\") is a Japanese text embedding model based on LUKE.", - "url": "https://huggingface.co./pkshatech/GLuCoSE-base-ja", - "project_name": "GLuCoSE-base-ja", - "downloads": 187729, - "source": "Hugging Face", - "score": 2.549796492289313, - "first_commit": "2023-07-16 07:28:46", - "latest_commit": "2023-08-25 02:53:22", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Kuromoji is a self-contained and very easy to use Japanese morphological analyzer designed for search", "url": "https://github.com/atilika/kuromoji", "project_name": "kuromoji", "stargazers_count": 941, "source": "GitHub", - "score": 2.4078033848153195, + "score": 2.412008404390239, "first_commit": "2011-01-20 15:26:44", "latest_commit": "2019-09-23 11:13:04", "languages": [ @@ -244,7 +251,7 @@ "project_name": "mecab", "stargazers_count": 908, "source": "GitHub", - "score": 2.311505888106075, + "score": 2.315548067965403, "first_commit": "2011-10-15 15:16:30", "latest_commit": "2023-05-24 16:04:25", "languages": [ @@ -264,7 +271,7 @@ "project_name": "awesome-japanese-llm", "stargazers_count": 879, "source": "GitHub", - "score": 2.226880815240375, + "score": 2.2307798935314564, "first_commit": "2023-07-09 13:36:38", "latest_commit": "2024-08-10 19:11:35", "languages": [ @@ -278,7 +285,7 @@ "project_name": "janome", "stargazers_count": 843, "source": "GitHub", - "score": 2.121829000648472, + "score": 2.125550435613454, "first_commit": "2015-02-14 18:45:54", "latest_commit": "2023-07-01 20:31:23", "languages": [ @@ -292,7 +299,7 @@ "project_name": "voicevox_core", "stargazers_count": 839, "source": "GitHub", - "score": 2.110156576804927, + "score": 2.1138582736225646, "first_commit": "2021-08-31 23:19:33", "latest_commit": "2024-08-14 00:00:48", "languages": [ @@ -310,7 +317,7 @@ "project_name": "kuromoji.js", "stargazers_count": 831, "source": "GitHub", - "score": 2.086811729117837, + "score": 2.0904739496407863, "first_commit": "2014-12-04 17:31:39", "latest_commit": "2018-11-24 16:05:09", "languages": [ @@ -324,7 +331,7 @@ "project_name": "budoux", "stargazers_count": 815, "source": "GitHub", - "score": 2.0401220337436583, + "score": 2.04370530167723, "first_commit": "2021-11-18 09:36:21", "latest_commit": "2024-08-16 15:09:25", "languages": [ @@ -341,7 +348,7 @@ "project_name": "kagome", "stargazers_count": 803, "source": "GitHub", - "score": 2.0051047622130236, + "score": 2.0086288157045624, "first_commit": "2014-06-26 13:52:06", "latest_commit": "2024-08-13 10:35:54", "languages": [ @@ -356,7 +363,7 @@ "project_name": "kuroshiro", "stargazers_count": 787, "source": "GitHub", - "score": 1.9584150668388445, + "score": 1.9618601677410055, "first_commit": "2016-01-03 17:16:40", "latest_commit": "2021-06-07 23:02:39", "languages": [ @@ -370,7 +377,7 @@ "project_name": "mokuro", "stargazers_count": 781, "source": "GitHub", - "score": 1.9409064310735273, + "score": 1.944321924754672, "first_commit": "2022-04-16 14:44:52", "latest_commit": "2024-07-09 17:10:55", "languages": [ @@ -386,7 +393,7 @@ "project_name": "WanaKana", "stargazers_count": 738, "source": "GitHub", - "score": 1.8154278747554207, + "score": 1.8186311833526132, "first_commit": "2013-08-27 12:57:41", "latest_commit": "2023-11-20 12:22:09", "languages": [ @@ -401,7 +408,7 @@ "project_name": "ginza", "stargazers_count": 731, "source": "GitHub", - "score": 1.7950011330292173, + "score": 1.7981698998685571, "first_commit": "2019-03-11 16:49:15", "latest_commit": "2024-03-31 07:29:06", "languages": [ @@ -415,7 +422,7 @@ "project_name": "shuwa", "stargazers_count": 726, "source": "GitHub", - "score": 1.7804106032247862, + "score": 1.7835546973799457, "first_commit": "2021-04-20 14:25:55", "latest_commit": "2022-12-22 19:41:35", "languages": [ @@ -424,13 +431,26 @@ ], "model_or_dataset": null }, + { + "description": "This is a Japanese sentence-BERT model.", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens-v2", + "project_name": "sentence-bert-base-ja-mean-tokens-v2", + "downloads": 125121, + "source": "Hugging Face", + "score": 1.7212894385046624, + "first_commit": "2021-12-14 11:18:19", + "latest_commit": "2024-04-17 11:39:38", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111 + }, { "description": "- Language Understanding with Knowledge-based Embeddings", "url": "https://github.com/studio-ousia/luke", "project_name": "luke", "stargazers_count": 699, "source": "GitHub", - "score": 1.7016217422808588, + "score": 1.704632603941444, "first_commit": "2020-03-31 21:56:47", "latest_commit": "2023-06-16 23:11:54", "languages": [ @@ -445,7 +465,7 @@ "project_name": "japanese-addresses", "stargazers_count": 698, "source": "GitHub", - "score": 1.6987036363199726, + "score": 1.7017095634437216, "first_commit": "2020-07-12 13:35:51", "latest_commit": "2024-01-15 09:28:19", "languages": [ @@ -453,25 +473,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "This is a Japanese sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens-v2", - "project_name": "sentence-bert-base-ja-mean-tokens-v2", - "downloads": 125299, - "source": "Hugging Face", - "score": 1.6670286170479824, - "first_commit": "2021-12-14 11:18:19", - "latest_commit": "2024-04-17 11:39:38", - "languages": [], - "model_or_dataset": "model" - }, { "description": "ChatdollKit enables you to make your 3D model into a chatbot", "url": "https://github.com/uezo/ChatdollKit", "project_name": "ChatdollKit", "stargazers_count": 685, "source": "GitHub", - "score": 1.660768258828452, + "score": 1.6637100369733318, "first_commit": "2020-03-21 22:01:11", "latest_commit": "2024-07-21 01:41:49", "languages": [ @@ -485,7 +493,7 @@ "project_name": "normalize-japanese-addresses", "stargazers_count": 672, "source": "GitHub", - "score": 1.6228328813369315, + "score": 1.625710510502942, "first_commit": "2020-05-30 10:53:34", "latest_commit": "2024-07-02 10:44:21", "languages": [ @@ -498,13 +506,14 @@ "description": "BERT base Japanese (character tokenization)", "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char", "project_name": "bert-base-japanese-char", - "downloads": 116149, + "downloads": 114951, "source": "Hugging Face", - "score": 1.5376464921327466, + "score": 1.572726511512899, "first_commit": "2020-04-28 21:34:05", "latest_commit": "2024-02-22 00:57:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Automatically exported from code.google.com/p/mozc-morse", @@ -512,7 +521,7 @@ "project_name": "mozc-devices", "stargazers_count": 635, "source": "GitHub", - "score": 1.514862960784142, + "score": 1.5175580120872172, "first_commit": "2012-06-06 06:06:16", "latest_commit": "2024-02-28 20:08:36", "languages": [ @@ -522,29 +531,31 @@ ], "model_or_dataset": null }, - { - "description": "BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v2", - "project_name": "bert-base-japanese-char-v2", - "downloads": 104036, - "source": "Hugging Face", - "score": 1.366367182941681, - "first_commit": "2021-03-05 04:05:08", - "latest_commit": "2021-09-23 15:45:24", - "languages": [], - "model_or_dataset": "model" - }, { "description": "JMTEB:", "url": "https://huggingface.co./datasets/sbintuitions/JMTEB", "project_name": "JMTEB", - "downloads": 103169, + "downloads": 106868, "source": "Hugging Face", - "score": 1.354107696351352, + "score": 1.4546503916629066, "first_commit": "2024-02-22 18:15:27", "latest_commit": "2024-06-28 15:18:20", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "BERT base Japanese (character-level tokenization with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v2", + "project_name": "bert-base-japanese-char-v2", + "downloads": 103373, + "source": "Hugging Face", + "score": 1.403595580469572, + "first_commit": "2021-03-05 04:05:08", + "latest_commit": "2021-09-23 15:45:24", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Code for producing Japanese pretrained models provided by rinna Co., Ltd.", @@ -552,7 +563,7 @@ "project_name": "japanese-pretrained-models", "stargazers_count": 576, "source": "GitHub", - "score": 1.3426947090918562, + "score": 1.3450986227216017, "first_commit": "2021-04-06 13:48:14", "latest_commit": "2022-10-28 16:08:26", "languages": [ @@ -560,25 +571,13 @@ ], "model_or_dataset": "model" }, - { - "description": "This is a Japanese sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens", - "project_name": "sentence-bert-base-ja-mean-tokens", - "downloads": 98522, - "source": "Hugging Face", - "score": 1.2883985450419422, - "first_commit": "2021-07-22 06:11:37", - "latest_commit": "2024-04-17 11:40:03", - "languages": [], - "model_or_dataset": "model" - }, { "description": "mecab-python. mecab-python. you can find original version here:http://taku910.github.io/mecab/", "url": "https://github.com/SamuraiT/mecab-python3", "project_name": "mecab-python3", "stargazers_count": 515, "source": "GitHub", - "score": 1.1646902454777979, + "score": 1.166793152360542, "first_commit": "2014-05-31 17:47:48", "latest_commit": "2024-04-15 21:32:53", "languages": [ @@ -593,7 +592,7 @@ "project_name": "bert-japanese", "stargazers_count": 502, "source": "GitHub", - "score": 1.1267548679862773, + "score": 1.128793625890152, "first_commit": "2019-03-24 22:50:33", "latest_commit": "2024-03-24 00:08:38", "languages": [ @@ -608,7 +607,7 @@ "project_name": "bert-japanese", "stargazers_count": 497, "source": "GitHub", - "score": 1.1121643381818462, + "score": 1.1141784234015408, "first_commit": "2018-12-27 20:05:33", "latest_commit": "2021-02-15 10:20:28", "languages": [ @@ -618,28 +617,30 @@ "model_or_dataset": "model" }, { - "description": "BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v3", - "project_name": "bert-base-japanese-v3", - "downloads": 83189, + "description": "This is a Japanese sentence-BERT model.", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-mean-tokens", + "project_name": "sentence-bert-base-ja-mean-tokens", + "downloads": 83483, "source": "Hugging Face", - "score": 1.0715880399790665, - "first_commit": "2023-05-19 00:13:53", - "latest_commit": "2023-05-19 00:31:53", + "score": 1.1130433073263886, + "first_commit": "2021-07-22 06:11:37", + "latest_commit": "2024-04-17 11:40:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-instruct", "project_name": "ELYZA-japanese-Llama-2-7b-instruct", - "downloads": 81623, + "downloads": 81543, "source": "Hugging Face", - "score": 1.04944460745259, + "score": 1.0847038699257965, "first_commit": "2023-08-28 12:58:25", "latest_commit": "2023-08-29 03:46:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "morphological analyzer (word segmentor + PoS Tagger) for Chinese and Japanese written purely in JavaScript.", @@ -647,7 +648,7 @@ "project_name": "rakutenma", "stargazers_count": 471, "source": "GitHub", - "score": 1.036293583198805, + "score": 1.0381793704607611, "first_commit": "2014-08-05 19:05:12", "latest_commit": "2015-01-29 18:28:18", "languages": [ @@ -661,7 +662,7 @@ "project_name": "bangumi-data", "stargazers_count": 471, "source": "GitHub", - "score": 1.036293583198805, + "score": 1.0381793704607611, "first_commit": "2016-09-05 10:17:46", "latest_commit": "2024-08-12 10:50:17", "languages": [ @@ -670,17 +671,31 @@ ], "model_or_dataset": null }, + { + "description": "BERT base Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v3", + "project_name": "bert-base-japanese-v3", + "downloads": 78327, + "source": "Hugging Face", + "score": 1.0377246788534749, + "first_commit": "2023-05-19 00:13:53", + "latest_commit": "2023-05-19 00:31:53", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", "url": "https://huggingface.co./kha-white/manga-ocr-base", "project_name": "manga-ocr-base", - "downloads": 73954, + "downloads": 73052, "source": "Hugging Face", - "score": 0.9410040046137985, + "score": 0.9606677034472259, "first_commit": "2022-01-15 17:39:06", "latest_commit": "2022-06-22 15:34:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Lightweight converter from Japanese Kana-kanji sentences into Kana-Roman.", @@ -688,7 +703,7 @@ "project_name": "pykakasi", "stargazers_count": 407, "source": "GitHub", - "score": 0.8495348017020882, + "score": 0.8511047786065344, "first_commit": "2011-01-26 09:56:25", "latest_commit": "2022-07-22 12:36:37", "languages": [ @@ -702,7 +717,7 @@ "project_name": "ojosama", "stargazers_count": 380, "source": "GitHub", - "score": 0.7707459407581607, + "score": 0.7721826851680325, "first_commit": "2022-06-16 07:21:02", "latest_commit": "2024-08-07 00:15:14", "languages": [ @@ -716,7 +731,7 @@ "project_name": "fugashi", "stargazers_count": 376, "source": "GitHub", - "score": 0.7590735169146159, + "score": 0.7604905231771434, "first_commit": "2019-10-14 13:33:36", "latest_commit": "2024-04-15 21:15:01", "languages": [ @@ -730,7 +745,7 @@ "project_name": "nagisa", "stargazers_count": 376, "source": "GitHub", - "score": 0.7590735169146159, + "score": 0.7604905231771434, "first_commit": "2018-02-14 23:56:05", "latest_commit": "2024-06-15 01:55:19", "languages": [ @@ -738,13 +753,26 @@ ], "model_or_dataset": null }, + { + "description": "Sentence BERT base Japanese model This repository contains a Sentence BERT base model for Japanese.", + "url": "https://huggingface.co./colorfulscoop/sbert-base-ja", + "project_name": "sbert-base-ja", + "downloads": 58845, + "source": "Hugging Face", + "score": 0.7531324523388695, + "first_commit": "2021-08-01 04:12:28", + "latest_commit": "2021-08-08 15:47:42", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "Juman++ (a Morphological Analyzer Toolkit)", "url": "https://github.com/ku-nlp/jumanpp", "project_name": "jumanpp", "stargazers_count": 372, "source": "GitHub", - "score": 0.747401093071071, + "score": 0.7487983611862542, "first_commit": "2016-10-17 14:48:20", "latest_commit": "2023-03-06 09:27:42", "languages": [ @@ -761,7 +789,7 @@ "project_name": "lindera", "stargazers_count": 369, "source": "GitHub", - "score": 0.7386467751884124, + "score": 0.7400292396930873, "first_commit": "2020-01-22 23:29:22", "latest_commit": "2024-07-15 23:23:30", "languages": [ @@ -770,16 +798,17 @@ "model_or_dataset": null }, { - "description": "Sentence BERT base Japanese model This repository contains a Sentence BERT base model for Japanese.", - "url": "https://huggingface.co./colorfulscoop/sbert-base-ja", - "project_name": "sbert-base-ja", - "downloads": 59475, + "description": "shisa-gamma-7b-v1 For more information see our main Shisa 7B model We applied a version of our fine-tune data set onto Japanese Stable LM Base Gamma 7B and it performed pretty well, just sharing since it might be of interest.", + "url": "https://huggingface.co./augmxnt/shisa-gamma-7b-v1", + "project_name": "shisa-gamma-7b-v1", + "downloads": 55945, "source": "Hugging Face", - "score": 0.7362691645430116, - "first_commit": "2021-08-01 04:12:28", - "latest_commit": "2021-08-08 15:47:42", + "score": 0.7107693758122112, + "first_commit": "2023-12-23 20:21:44", + "latest_commit": "2024-05-19 06:07:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { "description": "NDLOCRのアプリケーション", @@ -787,7 +816,7 @@ "project_name": "ndlocr_cli", "stargazers_count": 344, "source": "GitHub", - "score": 0.6656941261662573, + "score": 0.6669532272500299, "first_commit": "2022-03-30 11:12:01", "latest_commit": "2024-07-29 09:20:46", "languages": [ @@ -801,7 +830,7 @@ "project_name": "camphr", "stargazers_count": 340, "source": "GitHub", - "score": 0.6540217023227126, + "score": 0.6552610652591409, "first_commit": "2019-08-27 07:22:43", "latest_commit": "2021-08-18 15:06:51", "languages": [ @@ -815,7 +844,7 @@ "project_name": "aquaskk", "stargazers_count": 339, "source": "GitHub", - "score": 0.6511035963618264, + "score": 0.6523380247614186, "first_commit": "2013-05-12 07:03:33", "latest_commit": "2023-07-10 00:35:46", "languages": [ @@ -829,7 +858,7 @@ "project_name": "kanji-data-media", "stargazers_count": 336, "source": "GitHub", - "score": 0.6423492784791678, + "score": 0.6435689032682517, "first_commit": "2014-08-30 19:06:48", "latest_commit": "2023-11-15 20:28:16", "languages": [], @@ -841,7 +870,7 @@ "project_name": "emoji-ime-dictionary", "stargazers_count": 331, "source": "GitHub", - "score": 0.6277587486747368, + "score": 0.6289537007796402, "first_commit": "2018-10-13 21:37:28", "latest_commit": "2023-01-16 12:01:31", "languages": [ @@ -855,7 +884,7 @@ "project_name": "animedb", "stargazers_count": 321, "source": "GitHub", - "score": 0.5985776890658747, + "score": 0.5997232958024172, "first_commit": "2016-10-11 23:05:11", "latest_commit": "2023-01-04 16:56:30", "languages": [ @@ -869,7 +898,7 @@ "project_name": "gpt2-japanese", "stargazers_count": 313, "source": "GitHub", - "score": 0.5752328413787852, + "score": 0.5763389718206389, "first_commit": "2019-12-12 11:07:23", "latest_commit": "2023-09-02 17:23:50", "languages": [ @@ -883,7 +912,7 @@ "project_name": "vibrato", "stargazers_count": 311, "source": "GitHub", - "score": 0.5693966294570127, + "score": 0.5704928908251943, "first_commit": "2022-07-06 17:50:53", "latest_commit": "2024-05-30 19:17:07", "languages": [ @@ -898,7 +927,7 @@ "project_name": "accel-brain-code", "stargazers_count": 305, "source": "GitHub", - "score": 0.5518879936916955, + "score": 0.5529546478388606, "first_commit": "2016-01-26 21:16:08", "latest_commit": "2023-12-26 12:01:37", "languages": [ @@ -909,25 +938,13 @@ ], "model_or_dataset": null }, - { - "description": "shisa-gamma-7b-v1 For more information see our main Shisa 7B model We applied a version of our fine-tune data set onto Japanese Stable LM Base Gamma 7B and it performed pretty well, just sharing since it might be of interest.", - "url": "https://huggingface.co./augmxnt/shisa-gamma-7b-v1", - "project_name": "shisa-gamma-7b-v1", - "downloads": 45294, - "source": "Hugging Face", - "score": 0.5357480811088084, - "first_commit": "2023-12-23 20:21:44", - "latest_commit": "2024-05-19 06:07:36", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Pure-Python Japanese character interconverter for Hiragana, Katakana, Hankaku, and Zenkaku", "url": "https://github.com/ikegami-yukino/jaconv", "project_name": "jaconv", "stargazers_count": 299, "source": "GitHub", - "score": 0.5343793579263784, + "score": 0.5354164048525268, "first_commit": "2014-03-22 20:09:24", "latest_commit": "2024-08-13 14:26:29", "languages": [ @@ -941,7 +958,7 @@ "project_name": "yomitan-dictionaries", "stargazers_count": 295, "source": "GitHub", - "score": 0.5227069340828335, + "score": 0.5237242428616377, "first_commit": "2022-08-07 01:28:52", "latest_commit": "2024-08-04 01:00:08", "languages": [ @@ -955,7 +972,7 @@ "project_name": "JGLUE", "stargazers_count": 294, "source": "GitHub", - "score": 0.5197888281219474, + "score": 0.5208012023639154, "first_commit": "2022-05-27 13:35:39", "latest_commit": "2023-06-24 10:50:30", "languages": [ @@ -969,7 +986,7 @@ "project_name": "sudachi.rs", "stargazers_count": 290, "source": "GitHub", - "score": 0.5081164042784025, + "score": 0.5091090403730262, "first_commit": "2019-11-23 21:06:49", "latest_commit": "2024-06-28 10:11:04", "languages": [ @@ -984,7 +1001,7 @@ "project_name": "Poricom", "stargazers_count": 283, "source": "GitHub", - "score": 0.48768966255219914, + "score": 0.4886477568889701, "first_commit": "2021-08-31 16:21:12", "latest_commit": "2023-06-04 18:55:23", "languages": [ @@ -998,7 +1015,7 @@ "project_name": "japanese-stable-diffusion", "stargazers_count": 280, "source": "GitHub", - "score": 0.4789353446695405, + "score": 0.4798786353958032, "first_commit": "2022-09-09 10:11:41", "latest_commit": "2023-03-20 08:21:57", "languages": [ @@ -1013,7 +1030,7 @@ "project_name": "cutlet", "stargazers_count": 278, "source": "GitHub", - "score": 0.4730991327477681, + "score": 0.4740325544003587, "first_commit": "2020-04-16 22:59:42", "latest_commit": "2024-06-28 00:40:28", "languages": [ @@ -1028,7 +1045,7 @@ "project_name": "corvusskk", "stargazers_count": 277, "source": "GitHub", - "score": 0.4701810267868819, + "score": 0.4711095139026364, "first_commit": "2012-06-15 14:12:35", "latest_commit": "2024-08-04 19:22:13", "languages": [ @@ -1044,7 +1061,7 @@ "project_name": "llm-book", "stargazers_count": 273, "source": "GitHub", - "score": 0.4585086029433371, + "score": 0.4594173519117472, "first_commit": "2023-07-13 16:50:00", "latest_commit": "2024-07-27 21:21:13", "languages": [ @@ -1058,7 +1075,7 @@ "project_name": "HotPepperGourmetDialogue", "stargazers_count": 272, "source": "GitHub", - "score": 0.4555904969824509, + "score": 0.4564943114140249, "first_commit": "2016-02-13 11:37:07", "latest_commit": "2016-05-19 13:05:29", "languages": [ @@ -1072,7 +1089,7 @@ "project_name": "tacotron2-japanese", "stargazers_count": 269, "source": "GitHub", - "score": 0.4468361790997923, + "score": 0.447725189920858, "first_commit": "2022-07-25 11:42:50", "latest_commit": "2022-09-04 12:57:11", "languages": [ @@ -1087,7 +1104,7 @@ "project_name": "neologdn", "stargazers_count": 267, "source": "GitHub", - "score": 0.4409999671780199, + "score": 0.44187910892541343, "first_commit": "2015-07-22 01:15:07", "latest_commit": "2024-05-03 00:56:43", "languages": [ @@ -1097,29 +1114,18 @@ ], "model_or_dataset": null }, - { - "description": "hotchpotch/japanese-reranker-cross-encoder-xsmall-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-xsmall-v1", - "project_name": "japanese-reranker-cross-encoder-xsmall-v1", - "downloads": 38304, - "source": "Hugging Face", - "score": 0.4369086217473332, - "first_commit": "2024-03-28 04:29:26", - "latest_commit": "2024-06-10 03:57:05", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese", "project_name": "deberta-v2-base-japanese", - "downloads": 35680, + "downloads": 35526, "source": "Hugging Face", - "score": 0.3998049391486907, + "score": 0.4124894931922685, "first_commit": "2023-01-05 08:04:14", "latest_commit": "2023-05-12 14:13:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.137 }, { "description": "「BERTによる自然言語処理入門: Transformersを使った実践プログラミング」サポートページ", @@ -1127,7 +1133,7 @@ "project_name": "bert-book", "stargazers_count": 250, "source": "GitHub", - "score": 0.3913921658429545, + "score": 0.39218742046413446, "first_commit": "2021-02-04 22:11:11", "latest_commit": "2024-02-13 13:36:21", "languages": [ @@ -1141,7 +1147,7 @@ "project_name": "azooKey", "stargazers_count": 247, "source": "GitHub", - "score": 0.3826378479602959, + "score": 0.3834182989709676, "first_commit": "2020-09-03 16:35:44", "latest_commit": "2024-07-04 11:03:08", "languages": [ @@ -1154,13 +1160,14 @@ "description": "rinna/japanese-clip-vit-b-16", "url": "https://huggingface.co./rinna/japanese-clip-vit-b-16", "project_name": "japanese-clip-vit-b-16", - "downloads": 34163, + "downloads": 33425, "source": "Hugging Face", - "score": 0.3783543726463505, + "score": 0.3817981746465758, "first_commit": "2022-04-27 07:52:33", "latest_commit": "2024-07-20 08:42:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.197 }, { "description": "pdf-translator translates English PDF files into Japanese, preserving the original layout.", @@ -1168,7 +1175,7 @@ "project_name": "pdf-translator", "stargazers_count": 245, "source": "GitHub", - "score": 0.37680163603852346, + "score": 0.377572217975523, "first_commit": "2023-04-17 14:55:03", "latest_commit": "2024-05-07 16:25:33", "languages": [ @@ -1180,13 +1187,14 @@ "description": "JMMLU Japanese Massive Multitask Language Understanding Benchmark JMMLU is a four-choice question set consisting of Japanese-translated questions of a portion of MMLU (Paper, Github) (Translated questions) and questions based on unique Japanese cultural context (Japanese questions).", "url": "https://huggingface.co./datasets/nlp-waseda/JMMLU", "project_name": "JMMLU", - "downloads": 33881, + "downloads": 33054, "source": "Hugging Face", - "score": 0.3743668579768318, + "score": 0.37637862244264814, "first_commit": "2024-02-09 12:19:13", "latest_commit": "2024-02-27 05:22:30", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Code for evaluating Japanese pretrained models provided by NTT Ltd.", @@ -1194,7 +1202,7 @@ "project_name": "japanese-dialog-transformers", "stargazers_count": 240, "source": "GitHub", - "score": 0.3622111062340925, + "score": 0.3629570154869115, "first_commit": "2021-09-17 22:13:57", "latest_commit": "2023-06-21 09:24:35", "languages": [ @@ -1208,7 +1216,7 @@ "project_name": "namedivider-python", "stargazers_count": 237, "source": "GitHub", - "score": 0.35345678835143385, + "score": 0.3541878939937446, "first_commit": "2020-11-08 00:41:44", "latest_commit": "2024-05-03 15:28:00", "languages": [ @@ -1222,7 +1230,7 @@ "project_name": "mecab", "stargazers_count": 232, "source": "GitHub", - "score": 0.33886625854700286, + "score": 0.33957269150513314, "first_commit": "2011-10-15 15:16:30", "latest_commit": "2024-05-30 20:11:11", "languages": [ @@ -1242,7 +1250,7 @@ "project_name": "nlplot", "stargazers_count": 232, "source": "GitHub", - "score": 0.33886625854700286, + "score": 0.33957269150513314, "first_commit": "2020-05-07 00:10:04", "latest_commit": "2022-09-21 23:19:20", "languages": [ @@ -1257,7 +1265,7 @@ "project_name": "vime", "stargazers_count": 228, "source": "GitHub", - "score": 0.32719383470345803, + "score": 0.327880529514244, "first_commit": "2022-12-03 22:32:24", "latest_commit": "2022-12-03 22:32:24", "languages": [ @@ -1269,13 +1277,27 @@ "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of May 31, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", "url": "https://huggingface.co./sazyou-roukaku/BracingEvoMix", "project_name": "BracingEvoMix", - "downloads": 30474, + "downloads": 29698, "source": "Hugging Face", - "score": 0.32619145911495107, + "score": 0.3273543173311085, "first_commit": "2023-05-31 10:29:16", "latest_commit": "2023-10-01 08:58:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "hotchpotch/japanese-reranker-cross-encoder-xsmall-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-xsmall-v1", + "project_name": "japanese-reranker-cross-encoder-xsmall-v1", + "downloads": 29634, + "source": "Hugging Face", + "score": 0.3264194080560374, + "first_commit": "2024-03-28 04:29:26", + "latest_commit": "2024-06-10 03:57:05", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.107 }, { "description": "A lexicon for Sudachi", @@ -1283,7 +1305,7 @@ "project_name": "SudachiDict", "stargazers_count": 225, "source": "GitHub", - "score": 0.31843951682079946, + "score": 0.3191114080210771, "first_commit": "2019-04-01 17:14:39", "latest_commit": "2024-07-19 18:45:35", "languages": [ @@ -1298,31 +1320,19 @@ "project_name": "Jitendex", "stargazers_count": 225, "source": "GitHub", - "score": 0.31843951682079946, + "score": 0.3191114080210771, "first_commit": "2023-10-09 17:45:11", "latest_commit": "2024-07-09 16:43:23", "languages": [], "model_or_dataset": "dataset" }, - { - "description": "Llama-3-ELYZA-JP-8B Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B", - "project_name": "Llama-3-ELYZA-JP-8B", - "downloads": 29831, - "source": "Hugging Face", - "score": 0.3170993600635307, - "first_commit": "2024-06-25 06:32:13", - "latest_commit": "2024-06-26 02:56:23", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Konoha: Simple wrapper of Japanese Tokenizers", "url": "https://github.com/himkt/konoha", "project_name": "konoha", "stargazers_count": 224, "source": "GitHub", - "score": 0.31552141085991325, + "score": 0.31618836752335483, "first_commit": "2018-08-22 13:55:37", "latest_commit": "2024-05-16 00:01:37", "languages": [ @@ -1337,7 +1347,7 @@ "project_name": "vaporetto", "stargazers_count": 223, "source": "GitHub", - "score": 0.31260330489902705, + "score": 0.31326532702563253, "first_commit": "2021-08-18 22:59:40", "latest_commit": "2024-07-14 16:33:38", "languages": [ @@ -1345,13 +1355,26 @@ ], "model_or_dataset": null }, + { + "description": "Llama-3-ELYZA-JP-8B Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B", + "project_name": "Llama-3-ELYZA-JP-8B", + "downloads": 28053, + "source": "Hugging Face", + "score": 0.3033242273702972, + "first_commit": "2024-06-25 06:32:13", + "latest_commit": "2024-06-26 02:56:23", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03 + }, { "description": "Building AI-based conversational avatars lightning fast", "url": "https://github.com/uezo/aiavatarkit", "project_name": "aiavatarkit", "stargazers_count": 218, "source": "GitHub", - "score": 0.29801277509459606, + "score": 0.29865012453702106, "first_commit": "2023-05-27 23:14:32", "latest_commit": "2024-06-08 17:55:22", "languages": [ @@ -1359,25 +1382,13 @@ ], "model_or_dataset": null }, - { - "description": "BERT base Japanese (unidic-lite with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v2", - "project_name": "bert-base-japanese-v2", - "downloads": 28107, - "source": "Hugging Face", - "score": 0.2927217881122884, - "first_commit": "2021-03-05 03:37:30", - "latest_commit": "2021-09-23 15:45:31", - "languages": [], - "model_or_dataset": "model" - }, { "description": "日本語OCR", "url": "https://github.com/tanreinama/OCR_Japanease", "project_name": "OCR_Japanease", "stargazers_count": 216, "source": "GitHub", - "score": 0.29217656317282364, + "score": 0.29280404354157646, "first_commit": "2020-04-08 09:25:03", "latest_commit": "2021-04-30 19:26:24", "languages": [ @@ -1386,16 +1397,17 @@ "model_or_dataset": null }, { - "description": "llm-book/bert-base-japanese-v3-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-ner-wikipedia-dataset", - "project_name": "bert-base-japanese-v3-ner-wikipedia-dataset", - "downloads": 27828, + "description": "BERT base Japanese (unidic-lite with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-v2", + "project_name": "bert-base-japanese-v2", + "downloads": 27313, "source": "Hugging Face", - "score": 0.2887766938115943, - "first_commit": "2023-05-28 08:06:41", - "latest_commit": "2023-07-25 13:32:15", + "score": 0.2925143388772879, + "first_commit": "2021-03-05 03:37:30", + "latest_commit": "2021-09-23 15:45:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Yet another Japanese IME for IBus/Linux", @@ -1403,7 +1415,7 @@ "project_name": "akaza", "stargazers_count": 212, "source": "GitHub", - "score": 0.28050413932927887, + "score": 0.28111188155068734, "first_commit": "2020-09-03 01:11:08", "latest_commit": "2023-05-28 23:41:07", "languages": [ @@ -1419,7 +1431,7 @@ "project_name": "jtubespeech", "stargazers_count": 208, "source": "GitHub", - "score": 0.26883171548573404, + "score": 0.26941971955979815, "first_commit": "2021-07-01 00:11:55", "latest_commit": "2023-03-02 15:50:30", "languages": [ @@ -1427,25 +1439,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "LaBSE Model description Language-agnostic BERT Sentence Encoder (LaBSE) is a BERT-based model trained for sentence embedding for 109 languages.", - "url": "https://huggingface.co./setu4993/LaBSE", - "project_name": "LaBSE", - "downloads": 26075, - "source": "Hugging Face", - "score": 0.263989058295047, - "first_commit": "2021-01-11 06:06:51", - "latest_commit": "2023-10-18 23:23:16", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Tutorial to train fastText with Japanese corpus", "url": "https://github.com/icoxfog417/fastTextJapaneseTutorial", "project_name": "fastTextJapaneseTutorial", "stargazers_count": 202, "source": "GitHub", - "score": 0.25132307972041684, + "score": 0.2518814765734644, "first_commit": "2016-09-28 13:06:13", "latest_commit": "2016-09-29 14:21:51", "languages": [ @@ -1454,16 +1454,30 @@ "model_or_dataset": null }, { - "description": "Please feel free to open an issue or pull request.", - "url": "https://huggingface.co./datasets/shunk031/JGLUE", - "project_name": "JGLUE", - "downloads": 24856, + "description": "LaBSE Model description Language-agnostic BERT Sentence Encoder (LaBSE) is a BERT-based model trained for sentence embedding for 109 languages.", + "url": "https://huggingface.co./setu4993/LaBSE", + "project_name": "LaBSE", + "downloads": 24436, "source": "Hugging Face", - "score": 0.24675224842929042, - "first_commit": "2023-02-25 13:33:13", - "latest_commit": "2024-05-21 11:23:51", + "score": 0.2504872453713583, + "first_commit": "2021-01-11 06:06:51", + "latest_commit": "2023-10-18 23:23:16", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.47100000000000003 + }, + { + "description": "llm-book/bert-base-japanese-v3-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-ner-wikipedia-dataset", + "project_name": "bert-base-japanese-v3-ner-wikipedia-dataset", + "downloads": 24169, + "source": "Hugging Face", + "score": 0.24658692073942118, + "first_commit": "2023-05-28 08:06:41", + "latest_commit": "2023-07-25 13:32:15", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Japanese postal code data.", @@ -1471,19 +1485,32 @@ "project_name": "posuto", "stargazers_count": 200, "source": "GitHub", - "score": 0.24548686779864443, + "score": 0.2460353955780198, "first_commit": null, "latest_commit": null, "languages": [], "model_or_dataset": null }, + { + "description": "Please feel free to open an issue or pull request.", + "url": "https://huggingface.co./datasets/shunk031/JGLUE", + "project_name": "JGLUE", + "downloads": 24097, + "source": "Hugging Face", + "score": 0.24553514780496621, + "first_commit": "2023-02-25 13:33:13", + "latest_commit": "2024-05-21 11:23:51", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, { "description": "The Kyoto Text Analysis Toolkit for word segmentation and pronunciation estimation, etc.", "url": "https://github.com/neubig/kytea", "project_name": "kytea", "stargazers_count": 199, "source": "GitHub", - "score": 0.24256876183775822, + "score": 0.24311235508029752, "first_commit": "2011-03-03 14:53:14", "latest_commit": "2020-04-03 09:46:01", "languages": [ @@ -1497,13 +1524,14 @@ "description": "This is a Japanese sentence-LUKE model.", "url": "https://huggingface.co./sonoisa/sentence-luke-japanese-base-lite", "project_name": "sentence-luke-japanese-base-lite", - "downloads": 23851, + "downloads": 23780, "source": "Hugging Face", - "score": 0.2325414248730268, + "score": 0.24090442530187978, "first_commit": "2023-03-19 14:44:42", "latest_commit": "2023-03-20 01:32:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Python wrapper for OpenJTalk", @@ -1511,7 +1539,7 @@ "project_name": "pyopenjtalk", "stargazers_count": 192, "source": "GitHub", - "score": 0.22214202011155482, + "score": 0.22265107159624145, "first_commit": "2018-08-07 00:37:37", "latest_commit": "2024-07-13 16:04:09", "languages": [ @@ -1526,7 +1554,7 @@ "project_name": "daachorse", "stargazers_count": 192, "source": "GitHub", - "score": 0.22214202011155482, + "score": 0.22265107159624145, "first_commit": "2021-09-02 11:37:02", "latest_commit": "2024-06-06 18:11:39", "languages": [ @@ -1540,7 +1568,7 @@ "project_name": "EJDict", "stargazers_count": 189, "source": "GitHub", - "score": 0.21338770222889622, + "score": 0.2138819501030746, "first_commit": "2016-01-04 23:23:52", "latest_commit": "2024-02-15 09:49:00", "languages": [ @@ -1554,7 +1582,7 @@ "project_name": "kanjitomo-ocr", "stargazers_count": 184, "source": "GitHub", - "score": 0.1987971724244652, + "score": 0.1992667476144631, "first_commit": "2019-12-08 14:09:53", "latest_commit": "2021-05-03 21:38:06", "languages": [ @@ -1569,7 +1597,7 @@ "project_name": "bunkai", "stargazers_count": 181, "source": "GitHub", - "score": 0.1900428545418066, + "score": 0.19049762612129625, "first_commit": "2021-04-21 12:36:10", "latest_commit": "2023-08-10 14:13:25", "languages": [ @@ -1583,7 +1611,7 @@ "project_name": "ita-corpus", "stargazers_count": 178, "source": "GitHub", - "score": 0.18128853665914801, + "score": 0.18172850462812937, "first_commit": "2021-06-03 11:22:38", "latest_commit": "2023-12-04 00:50:44", "languages": [], @@ -1595,7 +1623,7 @@ "project_name": "elasticsearch-sudachi", "stargazers_count": 177, "source": "GitHub", - "score": 0.1783704306982618, + "score": 0.17880546413040707, "first_commit": "2017-11-09 19:21:47", "latest_commit": "2024-07-04 14:46:30", "languages": [ @@ -1606,15 +1634,41 @@ "model_or_dataset": null }, { - "description": "Topologically ordered lists of kanji for effective learning", - "url": "https://github.com/scriptin/topokanji", - "project_name": "topokanji", - "stargazers_count": 174, - "source": "GitHub", - "score": 0.1696161128156032, - "first_commit": "2015-05-18 01:55:58", - "latest_commit": "2016-01-24 05:18:22", - "languages": [ + "description": "FuguMT", + "url": "https://huggingface.co./staka/fugumt-en-ja", + "project_name": "fugumt-en-ja", + "downloads": 19468, + "source": "Hugging Face", + "score": 0.1779149128939659, + "first_commit": "2022-05-08 04:23:57", + "latest_commit": "2023-08-15 17:45:04", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "FuguMT", + "url": "https://huggingface.co./staka/fugumt-ja-en", + "project_name": "fugumt-ja-en", + "downloads": 18959, + "source": "Hugging Face", + "score": 0.17047946256566626, + "first_commit": "2022-05-08 04:32:09", + "latest_commit": "2023-08-15 17:40:58", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Topologically ordered lists of kanji for effective learning", + "url": "https://github.com/scriptin/topokanji", + "project_name": "topokanji", + "stargazers_count": 174, + "source": "GitHub", + "score": 0.17003634263724018, + "first_commit": "2015-05-18 01:55:58", + "latest_commit": "2016-01-24 05:18:22", + "languages": [ "JavaScript" ], "model_or_dataset": "dataset" @@ -1625,7 +1679,7 @@ "project_name": "neural_japanese_transliterator", "stargazers_count": 173, "source": "GitHub", - "score": 0.166698006854717, + "score": 0.16711330213951792, "first_commit": "2017-01-01 13:20:54", "latest_commit": "2017-09-17 15:21:27", "languages": [ @@ -1639,7 +1693,7 @@ "project_name": "summarize_arxv", "stargazers_count": 173, "source": "GitHub", - "score": 0.166698006854717, + "score": 0.16711330213951792, "first_commit": "2023-05-21 17:03:22", "latest_commit": "2023-05-23 01:54:10", "languages": [ @@ -1653,7 +1707,7 @@ "project_name": "house-of-representatives", "stargazers_count": 167, "source": "GitHub", - "score": 0.14918937108939978, + "score": 0.14957505915318414, "first_commit": "2022-04-18 14:41:05", "latest_commit": "2024-08-15 14:09:44", "languages": [ @@ -1668,7 +1722,7 @@ "project_name": "jmdict-simplified", "stargazers_count": 166, "source": "GitHub", - "score": 0.1462712651285136, + "score": 0.14665201865546185, "first_commit": "2016-02-07 00:30:44", "latest_commit": "2024-08-12 12:21:03", "languages": [ @@ -1682,13 +1736,14 @@ "description": "Llama3 Swallow", "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-Instruct-v0.1", "project_name": "Llama-3-Swallow-8B-Instruct-v0.1", - "downloads": 16861, + "downloads": 16768, "source": "Hugging Face", - "score": 0.13370196551155156, + "score": 0.13847342785190483, "first_commit": "2024-06-26 04:11:25", "latest_commit": "2024-07-06 15:02:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "ディープラーニングモデルの性能を体系的に最大化するためのプレイブック", @@ -1696,7 +1751,7 @@ "project_name": "tuning_playbook_ja", "stargazers_count": 161, "source": "GitHub", - "score": 0.1316807353240826, + "score": 0.1320368161668504, "first_commit": "2023-01-22 02:19:42", "latest_commit": "2023-01-22 22:10:48", "languages": [], @@ -1708,7 +1763,7 @@ "project_name": "DistilBERT-base-jp", "stargazers_count": 160, "source": "GitHub", - "score": 0.12876262936319638, + "score": 0.1291137756691281, "first_commit": "2020-04-22 16:17:15", "latest_commit": "2020-04-22 16:24:26", "languages": [], @@ -1720,7 +1775,7 @@ "project_name": "inappropriate-words-ja", "stargazers_count": 159, "source": "GitHub", - "score": 0.12584452340231017, + "score": 0.1261907351714058, "first_commit": "2020-08-17 13:38:34", "latest_commit": "2021-12-02 00:33:00", "languages": [ @@ -1734,7 +1789,7 @@ "project_name": "JapaneseEmbeddingEval", "stargazers_count": 157, "source": "GitHub", - "score": 0.12000831148053778, + "score": 0.12034465417596123, "first_commit": "2023-01-07 13:35:18", "latest_commit": "2024-04-06 12:45:12", "languages": [ @@ -1748,7 +1803,7 @@ "project_name": "Jotoba", "stargazers_count": 157, "source": "GitHub", - "score": 0.12000831148053778, + "score": 0.12034465417596123, "first_commit": "2021-04-15 11:08:23", "latest_commit": "2024-01-22 17:06:42", "languages": [ @@ -1758,28 +1813,17 @@ "model_or_dataset": null }, { - "description": "FuguMT", - "url": "https://huggingface.co./staka/fugumt-en-ja", - "project_name": "fugumt-en-ja", - "downloads": 14911, - "source": "Hugging Face", - "score": 0.1061287257755177, - "first_commit": "2022-05-08 04:23:57", - "latest_commit": "2023-08-15 17:45:04", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese", - "project_name": "t5-base-japanese", - "downloads": 14766, + "description": "nlp-waseda/roberta-base-japanese-with-auto-jumanpp Model description", + "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese-with-auto-jumanpp", + "project_name": "roberta-base-japanese-with-auto-jumanpp", + "downloads": 15383, "source": "Hugging Face", - "score": 0.1040784079489921, - "first_commit": "2021-03-28 10:54:32", - "latest_commit": "2022-07-31 08:20:41", + "score": 0.1182414068210698, + "first_commit": "2022-10-15 05:09:36", + "latest_commit": "2022-10-21 10:57:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Japanese negative positive classification.日本語文書のネガポジを判定。", @@ -1787,7 +1831,7 @@ "project_name": "negapoji", "stargazers_count": 151, "source": "GitHub", - "score": 0.10249967571522058, + "score": 0.10280641118962747, "first_commit": "2017-08-17 17:23:49", "latest_commit": "2017-08-20 18:12:51", "languages": [ @@ -1801,7 +1845,7 @@ "project_name": "dictation-kit", "stargazers_count": 151, "source": "GitHub", - "score": 0.10249967571522058, + "score": 0.10280641118962747, "first_commit": "2015-10-03 17:08:52", "latest_commit": "2019-04-18 11:41:47", "languages": [ @@ -1816,7 +1860,7 @@ "project_name": "chiVe", "stargazers_count": 151, "source": "GitHub", - "score": 0.10249967571522058, + "score": 0.10280641118962747, "first_commit": "2019-11-18 15:39:08", "latest_commit": "2024-03-01 17:50:40", "languages": [ @@ -1824,13 +1868,26 @@ ], "model_or_dataset": "model" }, + { + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese", + "project_name": "t5-base-japanese", + "downloads": 14157, + "source": "Hugging Face", + "score": 0.10033205102048948, + "first_commit": "2021-03-28 10:54:32", + "latest_commit": "2022-07-31 08:20:41", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "Phishing URL dataset from JPCERT/CC", "url": "https://github.com/JPCERTCC/phishurl-list", "project_name": "phishurl-list", "stargazers_count": 150, "source": "GitHub", - "score": 0.09958156975433438, + "score": 0.09988337069190517, "first_commit": "2022-08-05 15:20:50", "latest_commit": "2024-05-02 04:51:47", "languages": [ @@ -1838,61 +1895,25 @@ ], "model_or_dataset": "dataset" }, - { - "description": "FuguMT", - "url": "https://huggingface.co./staka/fugumt-ja-en", - "project_name": "fugumt-ja-en", - "downloads": 14242, - "source": "Hugging Face", - "score": 0.09666898352761684, - "first_commit": "2022-05-08 04:32:09", - "latest_commit": "2023-08-15 17:40:58", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "nlp-waseda/roberta-base-japanese-with-auto-jumanpp Model description", - "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese-with-auto-jumanpp", - "project_name": "roberta-base-japanese-with-auto-jumanpp", - "downloads": 14166, - "source": "Hugging Face", - "score": 0.0955943341840586, - "first_commit": "2022-10-15 05:09:36", - "latest_commit": "2022-10-21 10:57:40", - "languages": [], - "model_or_dataset": "model" - }, { "description": "WRIME: 主観と客観の感情分析データセット", "url": "https://github.com/ids-cv/wrime", "project_name": "wrime", "stargazers_count": 145, "source": "GitHub", - "score": 0.08499103994990337, + "score": 0.0852681682032937, "first_commit": "2020-08-18 14:13:44", "latest_commit": "2023-01-15 13:44:21", "languages": [], "model_or_dataset": "dataset" }, - { - "description": "BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-v2", - "project_name": "bert-large-japanese-v2", - "downloads": 13344, - "source": "Hugging Face", - "score": 0.0839711531260997, - "first_commit": "2023-05-19 00:40:35", - "latest_commit": "2023-05-19 00:47:40", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Japanese sentiment analyzer implemented in Python.", "url": "https://github.com/Hironsan/asari", "project_name": "asari", "stargazers_count": 143, "source": "GitHub", - "score": 0.07915482802813097, + "score": 0.07942208720784913, "first_commit": "2019-02-06 14:17:47", "latest_commit": "2022-10-19 07:38:31", "languages": [ @@ -1906,7 +1927,7 @@ "project_name": "wanna", "stargazers_count": 143, "source": "GitHub", - "score": 0.07915482802813097, + "score": 0.07942208720784913, "first_commit": "2023-03-01 18:10:59", "latest_commit": "2023-04-02 00:25:56", "languages": [ @@ -1914,13 +1935,26 @@ ], "model_or_dataset": null }, + { + "description": "Japanese Stable LM Base Gamma 7B Model Description", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-gamma-7b", + "project_name": "japanese-stablelm-base-gamma-7b", + "downloads": 12623, + "source": "Hugging Face", + "score": 0.07792344433362958, + "first_commit": "2023-10-16 08:15:14", + "latest_commit": "2024-01-25 08:05:12", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 + }, { "description": "A fast converter between Japanese hankaku and zenkaku characters", "url": "https://github.com/studio-ousia/mojimoji", "project_name": "mojimoji", "stargazers_count": 142, "source": "GitHub", - "score": 0.07623672206724477, + "score": 0.07649904671012683, "first_commit": "2013-11-03 01:38:05", "latest_commit": "2024-01-12 19:24:55", "languages": [ @@ -1934,7 +1968,7 @@ "project_name": "jp-stable", "stargazers_count": 142, "source": "GitHub", - "score": 0.07623672206724477, + "score": 0.07649904671012683, "first_commit": null, "latest_commit": null, "languages": [], @@ -1946,7 +1980,7 @@ "project_name": "Japanese-Alpaca-LoRA", "stargazers_count": 142, "source": "GitHub", - "score": 0.07623672206724477, + "score": 0.07649904671012683, "first_commit": "2023-03-22 23:04:56", "latest_commit": "2023-04-03 05:31:02", "languages": [ @@ -1955,40 +1989,69 @@ "model_or_dataset": "model" }, { - "description": "Japanese Stable LM Base Gamma 7B Model Description", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-gamma-7b", - "project_name": "japanese-stablelm-base-gamma-7b", - "downloads": 12709, + "description": "このドキュメントの日本語版はまだ作成中です。", + "url": "https://huggingface.co./bclavie/JaColBERT", + "project_name": "JaColBERT", + "downloads": 12465, "source": "Hugging Face", - "score": 0.07499217505821176, - "first_commit": "2023-10-16 08:15:14", - "latest_commit": "2024-01-25 08:05:12", + "score": 0.07561538706079786, + "first_commit": "2023-12-25 22:43:54", + "latest_commit": "2024-01-27 15:30:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "Swallow-MX-8x7b-NVE-v0.1 Our Swallow-MX-8x7b-NVE-v0.1 model has undergone continuous pre-training from the Mixtral-8x7B-Instruct-v0.1, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-MX-8x7b-NVE-v0.1", - "project_name": "Swallow-MX-8x7b-NVE-v0.1", - "downloads": 12513, + "description": "LINE DistilBERT Japanese This is a DistilBERT model pre-trained on 131 GB of Japanese web text.", + "url": "https://huggingface.co./line-corporation/line-distilbert-base-japanese", + "project_name": "line-distilbert-base-japanese", + "downloads": 12461, "source": "Hugging Face", - "score": 0.07222071096166682, - "first_commit": "2024-02-22 04:44:42", - "latest_commit": "2024-05-03 18:51:12", + "score": 0.07555695523110592, + "first_commit": "2023-03-10 10:23:54", + "latest_commit": "2023-12-01 09:50:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Fugaku-LLM-13B-instruct-gguf Fugaku-LLMさんが公開しているFugaku-LLM-13B-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Fugaku-LLM-13B-instruct-gguf", + "project_name": "Fugaku-LLM-13B-instruct-gguf", + "downloads": 12435, + "source": "Hugging Face", + "score": 0.07517714833810829, + "first_commit": "2024-05-10 16:43:49", + "latest_commit": "2024-05-12 06:06:51", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.4 }, { "description": "Llama-3.1-70B-Japanese-Instruct-2407-gguf cyberagentさんが公開しているLlama-3.1-70B-Japanese-Instruct-2407のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Japanese-Instruct-2407-gguf", "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-gguf", - "downloads": 11650, + "downloads": 12158, "source": "Hugging Face", - "score": 0.060017784863104134, + "score": 0.07113074413194129, "first_commit": "2024-07-26 09:05:34", "latest_commit": "2024-07-27 05:59:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-hf", + "project_name": "Swallow-7b-instruct-hf", + "downloads": 11766, + "source": "Hugging Face", + "score": 0.06540442482213094, + "first_commit": "2023-12-07 02:18:36", + "latest_commit": "2024-06-29 08:56:26", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.83 }, { "description": "A set of metrics for feature selection from text data", @@ -1996,7 +2059,7 @@ "project_name": "JapaneseTokenizers", "stargazers_count": 136, "source": "GitHub", - "score": 0.05872808630192756, + "score": 0.05896080372379308, "first_commit": "2015-09-01 19:27:42", "latest_commit": "2019-03-25 16:52:52", "languages": [ @@ -2004,37 +2067,13 @@ ], "model_or_dataset": null }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-hf", - "project_name": "Swallow-7b-instruct-hf", - "downloads": 11326, - "source": "Hugging Face", - "score": 0.055436385030040045, - "first_commit": "2023-12-07 02:18:36", - "latest_commit": "2024-06-29 08:56:26", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Fugaku-LLM-13B-instruct-gguf Fugaku-LLMさんが公開しているFugaku-LLM-13B-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Fugaku-LLM-13B-instruct-gguf", - "project_name": "Fugaku-LLM-13B-instruct-gguf", - "downloads": 11282, - "source": "Hugging Face", - "score": 0.054814219620611594, - "first_commit": "2024-05-10 16:43:49", - "latest_commit": "2024-05-12 06:06:51", - "languages": [], - "model_or_dataset": "model" - }, { "description": "chakki's Aspect-Based Sentiment Analysis dataset", "url": "https://github.com/chakki-works/chABSA-dataset", "project_name": "chABSA-dataset", "stargazers_count": 134, "source": "GitHub", - "score": 0.052891874380155156, + "score": 0.053114722728348496, "first_commit": "2017-12-27 11:51:01", "latest_commit": "2018-09-11 12:28:06", "languages": [ @@ -2045,28 +2084,17 @@ "model_or_dataset": "dataset" }, { - "description": "このドキュメントの日本語版はまだ作成中です。", - "url": "https://huggingface.co./bclavie/JaColBERT", - "project_name": "JaColBERT", - "downloads": 11070, - "source": "Hugging Face", - "score": 0.05181651355700176, - "first_commit": "2023-12-25 22:43:54", - "latest_commit": "2024-01-27 15:30:00", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "japanese-sentiment-analysis This model was trained from scratch on the chABSA dataset.", - "url": "https://huggingface.co./jarvisx17/japanese-sentiment-analysis", - "project_name": "japanese-sentiment-analysis", - "downloads": 10760, + "description": "luke-japanese-large luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-large", + "project_name": "luke-japanese-large", + "downloads": 10863, "source": "Hugging Face", - "score": 0.047433075445119446, - "first_commit": "2022-11-15 06:28:39", - "latest_commit": "2024-01-20 14:45:14", + "score": 0.05221343926917495, + "first_commit": "2022-11-07 14:25:53", + "latest_commit": "2022-11-09 11:18:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "自然言語で書かれた時間情報表現を抽出/規格化するルールベースの解析器", @@ -2074,7 +2102,7 @@ "project_name": "ja-timex", "stargazers_count": 132, "source": "GitHub", - "score": 0.04705566245838275, + "score": 0.04726864173290391, "first_commit": "2021-07-18 20:38:04", "latest_commit": "2023-11-04 14:58:31", "languages": [ @@ -2083,16 +2111,30 @@ "model_or_dataset": null }, { - "description": "LINE DistilBERT Japanese This is a DistilBERT model pre-trained on 131 GB of Japanese web text.", - "url": "https://huggingface.co./line-corporation/line-distilbert-base-japanese", - "project_name": "line-distilbert-base-japanese", - "downloads": 10488, + "description": "Swallow-MX-8x7b-NVE-v0.1 Our Swallow-MX-8x7b-NVE-v0.1 model has undergone continuous pre-training from the Mixtral-8x7B-Instruct-v0.1, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-MX-8x7b-NVE-v0.1", + "project_name": "Swallow-MX-8x7b-NVE-v0.1", + "downloads": 10471, "source": "Hugging Face", - "score": 0.043586962005016265, - "first_commit": "2023-03-10 10:23:54", - "latest_commit": "2023-12-01 09:50:34", + "score": 0.046487119959364595, + "first_commit": "2024-02-22 04:44:42", + "latest_commit": "2024-05-03 18:51:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 46.7 + }, + { + "description": "[Llama-3.1-8B-EZO-1.1-it] Model Card モデル情報 / Model Information このモデルは、Meta AI の Llama 3.1 をベースに、日本語タスクでの性能を向上させるためにファインチューニングを行ったものです。 ", + "url": "https://huggingface.co./HODACHI/Llama-3.1-8B-EZO-1.1-it", + "project_name": "Llama-3.1-8B-EZO-1.1-it", + "downloads": 10197, + "source": "Hugging Face", + "score": 0.04248453962546654, + "first_commit": "2024-07-28 23:24:59", + "latest_commit": "2024-08-04 06:16:48", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "A JSON kanji dataset with updated JLPT levels and WaniKani information", @@ -2100,7 +2142,7 @@ "project_name": "kanji-data", "stargazers_count": 130, "source": "GitHub", - "score": 0.04121945053661035, + "score": 0.04142256073745932, "first_commit": "2019-02-28 05:53:36", "latest_commit": "2019-12-29 12:12:54", "languages": [ @@ -2115,19 +2157,32 @@ "project_name": "awesome-bert-japanese", "stargazers_count": 130, "source": "GitHub", - "score": 0.04121945053661035, + "score": 0.04142256073745932, "first_commit": "2020-07-27 00:10:39", "latest_commit": "2023-03-15 18:51:20", "languages": [], "model_or_dataset": null }, + { + "description": "japanese-sentiment-analysis This model was trained from scratch on the chABSA dataset.", + "url": "https://huggingface.co./jarvisx17/japanese-sentiment-analysis", + "project_name": "japanese-sentiment-analysis", + "downloads": 10094, + "source": "Hugging Face", + "score": 0.04097992001089903, + "first_commit": "2022-11-15 06:28:39", + "latest_commit": "2024-01-20 14:45:14", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111 + }, { "description": "Wikipediaを用いた日本語の固有表現抽出データセット", "url": "https://github.com/stockmarkteam/ner-wikipedia-dataset", "project_name": "ner-wikipedia-dataset", "stargazers_count": 127, "source": "GitHub", - "score": 0.03246513265395175, + "score": 0.03265343924429245, "first_commit": "2020-12-15 01:52:19", "latest_commit": "2023-09-02 23:44:38", "languages": [ @@ -2141,7 +2196,7 @@ "project_name": "PAX_SAPIENTICA", "stargazers_count": 127, "source": "GitHub", - "score": 0.03246513265395175, + "score": 0.03265343924429245, "first_commit": "2022-06-24 23:18:02", "latest_commit": "2024-08-13 01:06:14", "languages": [ @@ -2152,28 +2207,17 @@ "model_or_dataset": "dataset" }, { - "description": "OpenCALM-3B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-3b", - "project_name": "open-calm-3b", - "downloads": 9689, - "source": "Hugging Face", - "score": 0.03228900377471316, - "first_commit": "2023-05-15 07:14:36", - "latest_commit": "2023-05-18 01:11:50", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "luke-japanese-large luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-large", - "project_name": "luke-japanese-large", - "downloads": 9529, + "description": "DeBERTa V2 small Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", + "url": "https://huggingface.co./izumi-lab/deberta-v2-small-japanese", + "project_name": "deberta-v2-small-japanese", + "downloads": 9380, "source": "Hugging Face", - "score": 0.030026584104064225, - "first_commit": "2022-11-07 14:25:53", - "latest_commit": "2022-11-09 11:18:56", + "score": 0.030549838410887317, + "first_commit": "2023-10-21 13:24:28", + "latest_commit": "2024-07-19 03:08:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Node.js module for converting Japanese Hiragana and Katakana script to, and from, Romaji using Hepburn romanisation", @@ -2181,7 +2225,7 @@ "project_name": "hepburn", "stargazers_count": 126, "source": "GitHub", - "score": 0.02954702669306555, + "score": 0.029730398746570155, "first_commit": "2013-06-28 03:06:52", "latest_commit": "2023-09-08 09:11:19", "languages": [ @@ -2189,25 +2233,13 @@ ], "model_or_dataset": null }, - { - "description": "DeBERTa V2 small Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", - "url": "https://huggingface.co./izumi-lab/deberta-v2-small-japanese", - "project_name": "deberta-v2-small-japanese", - "downloads": 9412, - "source": "Hugging Face", - "score": 0.028372189719902192, - "first_commit": "2023-10-21 13:24:28", - "latest_commit": "2024-07-19 03:08:14", - "languages": [], - "model_or_dataset": "model" - }, { "description": "Kanji usage frequency data collected from various sources", "url": "https://github.com/scriptin/kanji-frequency", "project_name": "kanji-frequency", "stargazers_count": 123, "source": "GitHub", - "score": 0.020792708810406946, + "score": 0.020961277253403274, "first_commit": "2016-01-23 01:19:08", "latest_commit": "2024-07-13 03:51:19", "languages": [ @@ -2222,7 +2254,7 @@ "project_name": "deep-learning-with-pytorch-ja", "stargazers_count": 121, "source": "GitHub", - "score": 0.014956496888634544, + "score": 0.01511519625795869, "first_commit": "2020-12-05 15:15:16", "latest_commit": "2021-05-13 11:02:11", "languages": [ @@ -2234,16 +2266,17 @@ "model_or_dataset": null }, { - "description": "bert-base-japanese-v3-marc_ja 「大規模言語モデル入門」の第5章で紹介している(感情分析)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-marc_ja", - "project_name": "bert-base-japanese-v3-marc_ja", - "downloads": 8053, + "description": "BERT large Japanese (unidic-lite with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-v2", + "project_name": "bert-large-japanese-v2", + "downloads": 8009, "source": "Hugging Face", - "score": 0.009155762642327819, - "first_commit": "2023-06-01 14:29:06", - "latest_commit": "2023-07-24 06:49:13", + "score": 0.010522328783974074, + "first_commit": "2023-05-19 00:40:35", + "latest_commit": "2023-05-19 00:47:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "A unified language analyzer for Japanese", @@ -2251,7 +2284,7 @@ "project_name": "kwja", "stargazers_count": 119, "source": "GitHub", - "score": 0.009120284966862142, + "score": 0.009269115262514106, "first_commit": "2022-05-25 16:09:37", "latest_commit": "2024-08-06 23:13:59", "languages": [ @@ -2259,17 +2292,31 @@ ], "model_or_dataset": null }, + { + "description": "bert-base-japanese-v3-marc_ja 「大規模言語モデル入門」の第5章で紹介している(感情分析)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-marc_ja", + "project_name": "bert-base-japanese-v3-marc_ja", + "downloads": 7916, + "source": "Hugging Face", + "score": 0.009163788743636414, + "first_commit": "2023-06-01 14:29:06", + "latest_commit": "2023-07-24 06:49:13", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "CyberAgentLM2-7B (CALM2-7B)", "url": "https://huggingface.co./cyberagent/calm2-7b", "project_name": "calm2-7b", - "downloads": 7664, + "downloads": 7629, "source": "Hugging Face", - "score": 0.0036552548180626017, + "score": 0.00497130496323955, "first_commit": "2023-11-01 07:24:59", "latest_commit": "2023-11-02 05:46:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Python 3 library for manipulating Jim Breen's JMdict, KanjiDic2, JMnedict and kanji-radical mappings", @@ -2277,7 +2324,7 @@ "project_name": "jamdict", "stargazers_count": 117, "source": "GitHub", - "score": 0.0032840730450897394, + "score": 0.0034230342670695203, "first_commit": "2016-10-25 10:47:58", "latest_commit": "2021-06-06 12:04:03", "languages": [ @@ -2291,19 +2338,32 @@ "project_name": "dataset-list", "stargazers_count": 116, "source": "GitHub", - "score": 0.0003659670842035382, + "score": 0.0004999937693472276, "first_commit": "2015-11-21 22:38:56", "latest_commit": "2024-07-26 00:40:50", "languages": [], "model_or_dataset": null }, { - "description": "A comparison tool of Japanese tokenizers", - "url": "https://github.com/taishi-i/toiro", + "description": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fast-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", + "downloads": 7226, + "source": "Hugging Face", + "score": -0.0009157018782236427, + "first_commit": "2023-08-29 15:31:01", + "latest_commit": "2023-11-16 14:27:48", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.85 + }, + { + "description": "A comparison tool of Japanese tokenizers", + "url": "https://github.com/taishi-i/toiro", "project_name": "toiro", "stargazers_count": 115, "source": "GitHub", - "score": -0.002552138876682663, + "score": -0.002423046728375065, "first_commit": "2020-08-13 19:49:15", "latest_commit": "2023-07-31 23:55:55", "languages": [ @@ -2318,7 +2378,7 @@ "project_name": "ja_text_bert", "stargazers_count": 115, "source": "GitHub", - "score": -0.002552138876682663, + "score": -0.002423046728375065, "first_commit": "2018-10-31 18:23:04", "latest_commit": "2018-11-08 15:17:20", "languages": [ @@ -2333,7 +2393,7 @@ "project_name": "GPTSAN", "stargazers_count": 115, "source": "GitHub", - "score": -0.002552138876682663, + "score": -0.002423046728375065, "first_commit": "2022-02-11 14:38:55", "latest_commit": "2023-09-13 12:20:29", "languages": [ @@ -2345,73 +2405,66 @@ "description": "自動生成のマルチターンデータセット オープンなデータソースから、Calm3-22bを使ってQ&Aを自動生成したものです。 一部の計算には東京工業大学のスーパーコンピュータTSUBAME4.0を利用しました。 データソース はじめの質問(q1)を、種々のデータソースから収集しました。その後のやりとりはすべて、Calmが生成しました。質問文については、元データのライセンスに準拠します。 oasst2-33k-ja apache 2.0 databricks-dolly-15k-ja cc-by-sa-3.0 minnade CC0 cyberagent/chatbot-arena-ja-calm2-7b-chat-experimental cc-by-4.0", "url": "https://huggingface.co./datasets/kanhatakeyama/AutoMultiTurnByCalm3-22B", "project_name": "AutoMultiTurnByCalm3-22B", - "downloads": 7158, + "downloads": 7093, "source": "Hugging Face", - "score": -0.003499647390364648, + "score": -0.002858560215480726, "first_commit": "2024-07-17 09:53:20", "latest_commit": "2024-07-17 10:03:02", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fast-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-gguf", - "downloads": 7141, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-v0.1", + "project_name": "Swallow-7b-instruct-v0.1", + "downloads": 6894, "source": "Hugging Face", - "score": -0.0037400294803710973, - "first_commit": "2023-08-29 15:31:01", - "latest_commit": "2023-11-16 14:27:48", + "score": -0.005765543742654858, + "first_commit": "2024-03-04 08:46:03", + "latest_commit": "2024-07-06 15:18:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.83 }, { - "description": "japanese-gpt-neox-3.6b Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b", - "project_name": "japanese-gpt-neox-3.6b", - "downloads": 6939, + "description": "Llama-3.1-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-8B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-8B-Instruct-gguf", + "project_name": "Llama-3.1-8B-Instruct-gguf", + "downloads": 6847, "source": "Hugging Face", - "score": -0.006596334314565374, - "first_commit": "2023-05-17 02:16:45", - "latest_commit": "2024-07-20 07:55:19", + "score": -0.006452117741535181, + "first_commit": "2024-07-23 16:33:06", + "latest_commit": "2024-07-24 21:04:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Model Card for Japanese character-level DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese-char-wwm", - "project_name": "deberta-v2-large-japanese-char-wwm", - "downloads": 6742, + "description": "【告知】chilled_remix及びreversemixは2023年5月21日にVersion変更を行い、v2へ移行いたしました。", + "url": "https://huggingface.co./sazyou-roukaku/chilled_remix", + "project_name": "chilled_remix", + "downloads": 6648, "source": "Hugging Face", - "score": -0.009381938534051873, - "first_commit": "2023-03-09 10:13:05", - "latest_commit": "2023-09-15 03:48:28", + "score": -0.009359101268709313, + "first_commit": "2023-04-18 12:48:48", + "latest_commit": "2023-06-09 23:08:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "japanese-roberta-base This repository provides a base-sized Japanese RoBERTa model.", "url": "https://huggingface.co./rinna/japanese-roberta-base", "project_name": "japanese-roberta-base", - "downloads": 6663, + "downloads": 6584, "source": "Hugging Face", - "score": -0.010499008246434783, + "score": -0.010294010543780392, "first_commit": "2021-06-11 02:56:39", "latest_commit": "2024-07-20 07:44:40", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "【告知】chilled_remix及びreversemixは2023年5月21日にVersion変更を行い、v2へ移行いたしました。", - "url": "https://huggingface.co./sazyou-roukaku/chilled_remix", - "project_name": "chilled_remix", - "downloads": 6616, - "source": "Hugging Face", - "score": -0.011163594024687907, - "first_commit": "2023-04-18 12:48:48", - "latest_commit": "2023-06-09 23:08:31", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { "description": "複数の前処理を構成して管理するテキスト前処理ツール", @@ -2419,7 +2472,7 @@ "project_name": "HojiChar", "stargazers_count": 112, "source": "GitHub", - "score": -0.011306456759341266, + "score": -0.011192168221541942, "first_commit": "2023-01-31 20:37:40", "latest_commit": "2024-08-15 17:25:48", "languages": [ @@ -2433,7 +2486,7 @@ "project_name": "pymlask", "stargazers_count": 112, "source": "GitHub", - "score": -0.011306456759341266, + "score": -0.011192168221541942, "first_commit": "2017-02-10 21:33:23", "latest_commit": "2024-07-26 01:27:14", "languages": [ @@ -2442,40 +2495,43 @@ "model_or_dataset": null }, { - "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-instruct", - "project_name": "ELYZA-japanese-Llama-2-13b-instruct", - "downloads": 6531, + "description": "Model Card for Japanese character-level DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese-char-wwm", + "project_name": "deberta-v2-large-japanese-char-wwm", + "downloads": 6405, "source": "Hugging Face", - "score": -0.012365504474720153, - "first_commit": "2023-12-25 16:10:32", - "latest_commit": "2023-12-27 01:41:15", + "score": -0.01290883492249481, + "first_commit": "2023-03-09 10:13:05", + "latest_commit": "2023-09-15 03:48:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.33 }, { - "description": "Llama-3.1-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-8B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-8B-Instruct-gguf", - "project_name": "Llama-3.1-8B-Instruct-gguf", - "downloads": 6502, + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-v0.1", + "project_name": "Llama-3-Swallow-8B-v0.1", + "downloads": 6070, "source": "Hugging Face", - "score": -0.012775568040025272, - "first_commit": "2024-07-23 16:33:06", - "latest_commit": "2024-07-24 21:04:40", + "score": -0.017802500659194982, + "first_commit": "2024-05-20 06:36:00", + "latest_commit": "2024-07-01 06:24:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "stockmark/stockmark-100b Stockmark-100b is a 100 billion parameter LLM pretrained from scratch based on Japanese and English corpus of about 910 billion tokens.", - "url": "https://huggingface.co./stockmark/stockmark-100b", - "project_name": "stockmark-100b", - "downloads": 6115, + "description": "transformers-ud-japanese-electra-ginza-510 (sudachitra-wordpiece, mC4 Japanese)", + "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-510", + "project_name": "transformers-ud-japanese-electra-base-ginza-510", + "downloads": 6011, "source": "Hugging Face", - "score": -0.018247795618407378, - "first_commit": "2024-05-13 09:31:40", - "latest_commit": "2024-05-15 06:18:10", + "score": -0.018664370147151135, + "first_commit": "2021-12-05 11:31:57", + "latest_commit": "2021-12-05 21:12:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "A fast LSTM Language Model for large vocabulary language like Japanese and Chinese", @@ -2483,7 +2539,7 @@ "project_name": "JLM", "stargazers_count": 109, "source": "GitHub", - "score": -0.02006077464199987, + "score": -0.01996128971470882, "first_commit": "2018-01-10 14:12:41", "latest_commit": "2019-06-04 21:35:33", "languages": [ @@ -2497,7 +2553,7 @@ "project_name": "t5-japanese", "stargazers_count": 109, "source": "GitHub", - "score": -0.02006077464199987, + "score": -0.01996128971470882, "first_commit": "2021-03-28 22:12:19", "latest_commit": "2023-07-20 21:36:10", "languages": [ @@ -2511,7 +2567,7 @@ "project_name": "tdmelodic", "stargazers_count": 109, "source": "GitHub", - "score": -0.02006077464199987, + "score": -0.01996128971470882, "first_commit": "2020-09-14 18:12:46", "latest_commit": "2024-03-22 01:44:10", "languages": [ @@ -2519,29 +2575,44 @@ ], "model_or_dataset": "dataset" }, + { + "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-instruct", + "project_name": "ELYZA-japanese-Llama-2-13b-instruct", + "downloads": 5888, + "source": "Hugging Face", + "score": -0.02046114891017836, + "first_commit": "2023-12-25 16:10:32", + "latest_commit": "2023-12-27 01:41:15", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, { "description": "japanese-gpt2-medium This repository provides a medium-sized Japanese GPT-2 model.", "url": "https://huggingface.co./rinna/japanese-gpt2-medium", "project_name": "japanese-gpt2-medium", "downloads": 5858, "source": "Hugging Face", - "score": -0.021881807214387226, + "score": -0.020899387632867927, "first_commit": "2021-04-05 02:01:26", "latest_commit": "2024-07-20 07:50:47", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.361 }, { - "description": "transformers-ud-japanese-electra-ginza-510 (sudachitra-wordpiece, mC4 Japanese)", - "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-510", - "project_name": "transformers-ud-japanese-electra-base-ginza-510", - "downloads": 5820, + "description": "japanese-gpt-neox-3.6b Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b", + "project_name": "japanese-gpt-neox-3.6b", + "downloads": 5728, "source": "Hugging Face", - "score": -0.022419131886166347, - "first_commit": "2021-12-05 11:31:57", - "latest_commit": "2021-12-05 21:12:12", + "score": -0.022798422097856055, + "first_commit": "2023-05-17 02:16:45", + "latest_commit": "2024-07-20 07:55:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.76 }, { "description": "Yet Another Japanese Dependency Structure Analyzer", @@ -2549,7 +2620,7 @@ "project_name": "cabocha", "stargazers_count": 108, "source": "GitHub", - "score": -0.02297888060288607, + "score": -0.022884330212431112, "first_commit": "2011-07-29 04:08:14", "latest_commit": "2020-07-24 11:31:47", "languages": [ @@ -2568,7 +2639,7 @@ "project_name": "ja.text8", "stargazers_count": 108, "source": "GitHub", - "score": -0.02297888060288607, + "score": -0.022884330212431112, "first_commit": "2017-10-04 13:15:25", "latest_commit": "2017-10-04 13:38:23", "languages": [ @@ -2582,7 +2653,7 @@ "project_name": "JaQuAD", "stargazers_count": 106, "source": "GitHub", - "score": -0.028815092524658473, + "score": -0.028730411207875697, "first_commit": "2022-01-11 16:58:07", "latest_commit": "2022-02-04 11:42:51", "languages": [ @@ -2591,28 +2662,17 @@ "model_or_dataset": "dataset" }, { - "description": "PLaMo-13B Model Description PLaMo-13B is a LLaMA-based 13B model pre-trained on English and Japanese open datasets, developed by Preferred Networks, Inc. ", - "url": "https://huggingface.co./pfnet/plamo-13b", - "project_name": "plamo-13b", - "downloads": 5246, - "source": "Hugging Face", - "score": -0.030535562454619393, - "first_commit": "2023-09-25 12:47:05", - "latest_commit": "2023-10-10 15:24:54", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-instruct-v0.1", - "project_name": "Swallow-7b-instruct-v0.1", - "downloads": 5190, + "description": "Llama-3-ELYZA-JP-8B-GGUF Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-GGUF", + "project_name": "Llama-3-ELYZA-JP-8B-GGUF", + "downloads": 5240, "source": "Hugging Face", - "score": -0.03132740933934652, - "first_commit": "2024-03-04 08:46:03", - "latest_commit": "2024-07-06 15:18:14", + "score": -0.029927105320273022, + "first_commit": "2024-06-25 07:29:22", + "latest_commit": "2024-06-26 02:56:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "Yet another mecab wrapper for nodejs", @@ -2620,7 +2680,7 @@ "project_name": "node-mecab-ya", "stargazers_count": 105, "source": "GitHub", - "score": -0.03173319848554467, + "score": -0.03165345170559799, "first_commit": "2016-04-09 23:34:34", "latest_commit": "2021-06-18 18:30:43", "languages": [ @@ -2629,28 +2689,17 @@ "model_or_dataset": null }, { - "description": "Llama-3-ELYZA-JP-8B-GGUF Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-GGUF", - "project_name": "Llama-3-ELYZA-JP-8B-GGUF", - "downloads": 5148, - "source": "Hugging Face", - "score": -0.03192129450289186, - "first_commit": "2024-06-25 07:29:22", - "latest_commit": "2024-06-26 02:56:52", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-8B-v0.1", - "project_name": "Llama-3-Swallow-8B-v0.1", - "downloads": 4743, + "description": "PLaMo-13B Model Description PLaMo-13B is a LLaMA-based 13B model pre-trained on English and Japanese open datasets, developed by Preferred Networks, Inc. ", + "url": "https://huggingface.co./pfnet/plamo-13b", + "project_name": "plamo-13b", + "downloads": 4749, "source": "Hugging Face", - "score": -0.03764804429422198, - "first_commit": "2024-05-20 06:36:00", - "latest_commit": "2024-07-01 06:24:48", + "score": -0.037099612414958946, + "first_commit": "2023-09-25 12:47:05", + "latest_commit": "2023-10-10 15:24:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "An example usage of JParaCrawl pre-trained Neural Machine Translation (NMT) models.", @@ -2658,7 +2707,7 @@ "project_name": "jparacrawl-finetune", "stargazers_count": 102, "source": "GitHub", - "score": -0.040487516368203275, + "score": -0.04042257319876487, "first_commit": "2019-11-17 14:46:58", "latest_commit": "2021-04-29 14:27:00", "languages": [ @@ -2672,7 +2721,7 @@ "project_name": "Kamite", "stargazers_count": 102, "source": "GitHub", - "score": -0.040487516368203275, + "score": -0.04042257319876487, "first_commit": "2022-07-08 19:26:15", "latest_commit": "2024-02-22 23:41:52", "languages": [ @@ -2691,7 +2740,7 @@ "project_name": "house-of-councillors", "stargazers_count": 102, "source": "GitHub", - "score": -0.040487516368203275, + "score": -0.04042257319876487, "first_commit": "2022-06-16 16:06:39", "latest_commit": "2024-08-15 14:22:06", "languages": [ @@ -2700,37 +2749,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-hf", - "project_name": "Swallow-13b-instruct-hf", - "downloads": 4453, - "source": "Hugging Face", - "score": -0.041748679947273167, - "first_commit": "2023-12-07 03:10:55", - "latest_commit": "2024-06-29 08:56:29", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Model Card for Japanese character-level DeBERTa V2 tiny Model description This is a Japanese DeBERTa V2 tiny model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese-char-wwm", - "project_name": "deberta-v2-tiny-japanese-char-wwm", - "downloads": 4374, - "source": "Hugging Face", - "score": -0.042865749659656074, - "first_commit": "2023-01-05 08:48:29", - "latest_commit": "2023-03-23 07:31:19", - "languages": [], - "model_or_dataset": "model" - }, { "description": "CJK computer science terms comparison / 中日韓電腦科學術語對照 / 日中韓のコンピュータ科学の用語対照 / 한·중·일 전산학 용어 대조", "url": "https://github.com/dahlia/cjk-compsci-terms", "project_name": "cjk-compsci-terms", "stargazers_count": 101, "source": "GitHub", - "score": -0.043405622329089476, + "score": -0.04334561369648716, "first_commit": "2020-12-19 06:11:14", "latest_commit": "2022-09-16 01:27:59", "languages": [ @@ -2745,7 +2770,7 @@ "project_name": "JapaneseWordSimilarityDataset", "stargazers_count": 101, "source": "GitHub", - "score": -0.043405622329089476, + "score": -0.04334561369648716, "first_commit": "2016-01-08 16:25:16", "latest_commit": "2021-12-07 12:22:07", "languages": [ @@ -2759,71 +2784,102 @@ "project_name": "japanese-pitch-accent-resources", "stargazers_count": 101, "source": "GitHub", - "score": -0.043405622329089476, + "score": -0.04334561369648716, "first_commit": "2018-03-03 15:51:09", "latest_commit": "2024-02-11 00:55:27", "languages": [], "model_or_dataset": null }, { - "description": "gpt-neox-japanese-2.7b", - "url": "https://huggingface.co./abeja/gpt-neox-japanese-2.7b", - "project_name": "gpt-neox-japanese-2.7b", - "downloads": 4222, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-hf", + "project_name": "Swallow-13b-instruct-hf", + "downloads": 4238, "source": "Hugging Face", - "score": -0.045015048346772564, - "first_commit": "2022-08-29 02:15:44", - "latest_commit": "2023-04-10 05:12:30", + "score": -0.044564278658104584, + "first_commit": "2023-12-07 03:10:55", + "latest_commit": "2024-06-29 08:56:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "japanese-gpt2-xsmall", "url": "https://huggingface.co./rinna/japanese-gpt2-xsmall", "project_name": "japanese-gpt2-xsmall", - "downloads": 4042, + "downloads": 4068, "source": "Hugging Face", - "score": -0.04756027047625261, + "score": -0.047047631420012136, "first_commit": "2021-07-26 02:52:54", "latest_commit": "2024-07-20 07:48:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.0437 }, { - "description": "Llama 3 Youko 8B (rinna/llama-3-youko-8b)", - "url": "https://huggingface.co./rinna/llama-3-youko-8b", - "project_name": "llama-3-youko-8b", - "downloads": 3945, + "description": "Llama-3.1-70B-Japanese-Instruct-2407 Model Description This is a Japanese continually pre-trained model based on meta-llama/Meta-Llama-3.1-70B-Instruct.", + "url": "https://huggingface.co./cyberagent/Llama-3.1-70B-Japanese-Instruct-2407", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407", + "downloads": 4009, "source": "Hugging Face", - "score": -0.04893186240158353, - "first_commit": "2024-05-01 07:53:46", - "latest_commit": "2024-07-25 05:14:42", + "score": -0.047909500907968285, + "first_commit": "2024-07-26 01:30:21", + "latest_commit": "2024-07-26 02:30:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 + }, + { + "description": "stockmark/stockmark-100b Stockmark-100b is a 100 billion parameter LLM pretrained from scratch based on Japanese and English corpus of about 910 billion tokens.", + "url": "https://huggingface.co./stockmark/stockmark-100b", + "project_name": "stockmark-100b", + "downloads": 3996, + "source": "Hugging Face", + "score": -0.04809940435446709, + "first_commit": "2024-05-13 09:31:40", + "latest_commit": "2024-05-15 06:18:10", + "languages": [], + "model_or_dataset": "model", + "model_size": 96.2 + }, + { + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-v0.1", + "project_name": "Llama-3-Swallow-70B-v0.1", + "downloads": 3907, + "source": "Hugging Face", + "score": -0.04939951256511281, + "first_commit": "2024-06-14 05:56:33", + "latest_commit": "2024-07-01 06:24:32", + "languages": [], + "model_or_dataset": "model", + "model_size": 70.6 }, { "description": "Llama-3-Swallow-70B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-70B-Instruct-v0.1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Llama-3-Swallow-70B-Instruct-v0.1-gguf", "project_name": "Llama-3-Swallow-70B-Instruct-v0.1-gguf", - "downloads": 3807, + "downloads": 3775, "source": "Hugging Face", - "score": -0.05088319936751823, + "score": -0.05132776294494691, "first_commit": "2024-07-01 14:21:29", "latest_commit": "2024-07-07 05:04:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoさんが公開しているjapanese-novel-gpt-j-6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/aibuncho-japanese-novel-gpt-j-6b-gguf", - "project_name": "aibuncho-japanese-novel-gpt-j-6b-gguf", - "downloads": 3788, + "description": "Llama 3 Youko 8B (rinna/llama-3-youko-8b)", + "url": "https://huggingface.co./rinna/llama-3-youko-8b", + "project_name": "llama-3-youko-8b", + "downloads": 3774, "source": "Hugging Face", - "score": -0.05115186170340779, - "first_commit": "2023-09-03 17:32:44", - "latest_commit": "2023-09-11 01:10:36", + "score": -0.05134237090236989, + "first_commit": "2024-05-01 07:53:46", + "latest_commit": "2024-07-25 05:14:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "Asynchronous japanese morphological analyser using MeCab.", @@ -2831,7 +2887,7 @@ "project_name": "node-mecab-async", "stargazers_count": 98, "source": "GitHub", - "score": -0.05215994021174808, + "score": -0.05211473518965404, "first_commit": "2012-10-29 09:32:49", "latest_commit": "2017-10-29 14:56:11", "languages": [ @@ -2845,7 +2901,7 @@ "project_name": "jiten", "stargazers_count": 98, "source": "GitHub", - "score": -0.05215994021174808, + "score": -0.05211473518965404, "first_commit": "2020-06-15 02:09:21", "latest_commit": "2023-12-16 23:43:06", "languages": [ @@ -2856,196 +2912,225 @@ "model_or_dataset": "dataset" }, { - "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", - "url": "https://huggingface.co./fishaudio/fish-speech-1.2-sft", - "project_name": "fish-speech-1.2-sft", - "downloads": 3680, + "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoさんが公開しているjapanese-novel-gpt-j-6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/aibuncho-japanese-novel-gpt-j-6b-gguf", + "project_name": "aibuncho-japanese-novel-gpt-j-6b-gguf", + "downloads": 3704, "source": "Hugging Face", - "score": -0.05267899498109582, - "first_commit": "2024-07-18 08:00:29", - "latest_commit": "2024-08-02 08:13:06", + "score": -0.05236492792197889, + "first_commit": "2023-09-03 17:32:44", + "latest_commit": "2023-09-11 01:10:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.05 }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-v0.1", - "project_name": "Llama-3-Swallow-70B-v0.1", - "downloads": 3648, + "description": "Llama-3.1-70B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-70B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Instruct-gguf", + "project_name": "Llama-3.1-70B-Instruct-gguf", + "downloads": 3685, "source": "Hugging Face", - "score": -0.053131478915225606, - "first_commit": "2024-06-14 05:56:33", - "latest_commit": "2024-07-01 06:24:32", + "score": -0.05264247911301561, + "first_commit": "2024-07-23 17:25:23", + "latest_commit": "2024-07-24 21:04:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "Llama-3.1-70B-Japanese-Instruct-2407 Model Description This is a Japanese continually pre-trained model based on meta-llama/Meta-Llama-3.1-70B-Instruct.", - "url": "https://huggingface.co./cyberagent/Llama-3.1-70B-Japanese-Instruct-2407", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407", - "downloads": 3563, + "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b", + "project_name": "ELYZA-japanese-Llama-2-7b", + "downloads": 3655, "source": "Hugging Face", - "score": -0.054333389365257855, - "first_commit": "2024-07-26 01:30:21", - "latest_commit": "2024-07-26 02:30:17", - "languages": [], - "model_or_dataset": "model" + "score": -0.05308071783570518, + "first_commit": "2023-08-28 12:38:34", + "latest_commit": "2023-08-29 03:45:51", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { - "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b", - "project_name": "ELYZA-japanese-Llama-2-7b", - "downloads": 3560, + "description": "gpt-neox-japanese-2.7b", + "url": "https://huggingface.co./abeja/gpt-neox-japanese-2.7b", + "project_name": "gpt-neox-japanese-2.7b", + "downloads": 3469, "source": "Hugging Face", - "score": -0.05437580973408252, - "first_commit": "2023-08-28 12:38:34", - "latest_commit": "2023-08-29 03:45:51", + "score": -0.0557977979163805, + "first_commit": "2022-08-29 02:15:44", + "latest_commit": "2023-04-10 05:12:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "reazonspeech-nemo-v2 reazonspeech-nemo-v2 is an automatic speech recognition model trained on ReazonSpeech v2.0 corpus.", "url": "https://huggingface.co./reazon-research/reazonspeech-nemo-v2", "project_name": "reazonspeech-nemo-v2", - "downloads": 3527, + "downloads": 3415, "source": "Hugging Face", - "score": -0.05484243379115386, + "score": -0.05658662761722172, "first_commit": "2024-01-30 01:49:12", "latest_commit": "2024-02-14 01:32:45", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama-3.1-70B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3.1-70B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-70B-Instruct-gguf", - "project_name": "Llama-3.1-70B-Instruct-gguf", - "downloads": 3491, - "source": "Hugging Face", - "score": -0.055351478217049874, - "first_commit": "2024-07-23 17:25:23", - "latest_commit": "2024-07-24 21:04:27", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "BERTによる日本語固有表現抽出のモデル BertForTokenClassificationを用いて、日本語の文から固有表現を抽出します。 ", "url": "https://huggingface.co./jurabi/bert-ner-japanese", "project_name": "bert-ner-japanese", - "downloads": 3449, + "downloads": 3405, "source": "Hugging Face", - "score": -0.05594536338059522, + "score": -0.05673270719145158, "first_commit": "2022-09-26 07:46:38", "latest_commit": "2022-09-26 12:13:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "OpenCALM-7B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-7b", - "project_name": "open-calm-7b", - "downloads": 3438, + "description": "Model Card for Model ID 実験モデルです /", + "url": "https://huggingface.co./mmnga/Llama-3-70B-japanese-suzume-vector-v0.1", + "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1", + "downloads": 3228, "source": "Hugging Face", - "score": -0.05610090473295233, - "first_commit": "2023-05-15 07:53:34", - "latest_commit": "2023-05-18 01:12:08", + "score": -0.059318315655320024, + "first_commit": "2024-04-28 04:11:49", + "latest_commit": "2024-04-28 07:46:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-fast-instruct", "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct", - "downloads": 3358, + "downloads": 3220, "source": "Hugging Face", - "score": -0.0572321145682768, + "score": -0.05943517931470391, "first_commit": "2023-08-28 13:36:19", "latest_commit": "2023-08-29 03:47:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "rinna/japanese-hubert-base Overview This is a Japanese HuBERT Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-hubert-base", - "project_name": "japanese-hubert-base", - "downloads": 3314, + "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", + "url": "https://huggingface.co./fishaudio/fish-speech-1.2-sft", + "project_name": "fish-speech-1.2-sft", + "downloads": 3164, "source": "Hugging Face", - "score": -0.05785427997770526, - "first_commit": "2023-04-28 07:39:44", - "latest_commit": "2024-07-20 08:55:38", + "score": -0.060253224930391105, + "first_commit": "2024-07-18 08:00:29", + "latest_commit": "2024-08-02 08:13:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Model ID 実験モデルです /", - "url": "https://huggingface.co./mmnga/Llama-3-70B-japanese-suzume-vector-v0.1", - "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1", - "downloads": 3283, + "description": "OpenCALM-7B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-7b", + "project_name": "open-calm-7b", + "downloads": 3152, "source": "Hugging Face", - "score": -0.05829262378889349, - "first_commit": "2024-04-28 04:11:49", - "latest_commit": "2024-04-28 07:46:32", + "score": -0.06042852041946693, + "first_commit": "2023-05-15 07:53:34", + "latest_commit": "2023-05-18 01:12:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF Original Model elyza/ELYZA-japanese-Llama-2-13b-fast-instruct Run with LlamaEdge LlamaEdge version: v0.2.8 and above Prompt template Prompt type: llama-2-chat Prompt string <s>[INST] <<SYS>> {{ system_prompt }} <</SYS>> {{ user_msg_1 }}", + "url": "https://huggingface.co./second-state/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "downloads": 3134, + "source": "Hugging Face", + "score": -0.060691463653080674, + "first_commit": "2024-01-06 03:33:53", + "latest_commit": "2024-03-20 07:21:25", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "BERT base Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-v3", "project_name": "bert-base-japanese-char-v3", - "downloads": 3182, + "downloads": 3112, "source": "Hugging Face", - "score": -0.059720776205990624, + "score": -0.06101283871638635, "first_commit": "2023-05-19 00:33:09", "latest_commit": "2023-05-19 00:39:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Japanese DeBERTa V2 tiny Model description", - "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese", - "project_name": "deberta-v2-tiny-japanese", - "downloads": 3068, + "description": "Japanese SimCSE (BERT-base)", + "url": "https://huggingface.co./pkshatech/simcse-ja-bert-base-clcmlp", + "project_name": "simcse-ja-bert-base-clcmlp", + "downloads": 2972, "source": "Hugging Face", - "score": -0.06133275022132799, - "first_commit": "2023-01-18 13:36:09", - "latest_commit": "2023-03-23 16:13:46", + "score": -0.06305795275560434, + "first_commit": "2022-12-26 02:52:03", + "latest_commit": "2023-01-27 06:44:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Kotoba-Whisper-v1.1 Kotoba-Whisper-v1.1 is a Japanese ASR model based on kotoba-tech/kotoba-whisper-v1.0, with additional postprocessing stacks integrated as pipeline.", "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1", "project_name": "kotoba-whisper-v1.1", - "downloads": 2984, + "downloads": 2962, "source": "Hugging Face", - "score": -0.06252052054841868, + "score": -0.0632040323298342, "first_commit": "2024-04-29 14:53:48", "latest_commit": "2024-05-08 15:34:40", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-hf", - "project_name": "Swallow-7b-hf", - "downloads": 2982, - "source": "Hugging Face", - "score": -0.06254880079430179, - "first_commit": "2023-11-25 10:09:49", - "latest_commit": "2024-06-29 08:56:17", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.756 }, { "description": "haqishen-Llama-3-8B-Japanese-Instruct-gguf haqishenさんが公開しているLlama-3-8B-Japanese-Instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/haqishen-Llama-3-8B-Japanese-Instruct-gguf", "project_name": "haqishen-Llama-3-8B-Japanese-Instruct-gguf", - "downloads": 2954, + "downloads": 2962, "source": "Hugging Face", - "score": -0.06294472423666535, + "score": -0.0632040323298342, "first_commit": "2024-04-23 13:55:17", "latest_commit": "2024-04-23 14:54:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 + }, + { + "description": "rinna/japanese-hubert-base Overview This is a Japanese HuBERT Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-hubert-base", + "project_name": "japanese-hubert-base", + "downloads": 2946, + "source": "Hugging Face", + "score": -0.06343775964860196, + "first_commit": "2023-04-28 07:39:44", + "latest_commit": "2024-07-20 08:55:38", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.09440000000000001 + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-hf", + "project_name": "Swallow-7b-hf", + "downloads": 2921, + "source": "Hugging Face", + "score": -0.0638029585841766, + "first_commit": "2023-11-25 10:09:49", + "latest_commit": "2024-06-29 08:56:17", + "languages": [], + "model_or_dataset": "model", + "model_size": 6.83 }, { "description": "Japanese Company Lexicon (JCLdic)", @@ -3053,7 +3138,7 @@ "project_name": "Japanese-Company-Lexicon", "stargazers_count": 94, "source": "GitHub", - "score": -0.06383236405529288, + "score": -0.0638068971805432, "first_commit": "2020-01-16 15:25:09", "latest_commit": "2023-01-21 14:50:18", "languages": [ @@ -3067,7 +3152,7 @@ "project_name": "genshin-dict", "stargazers_count": 94, "source": "GitHub", - "score": -0.06383236405529288, + "score": -0.0638068971805432, "first_commit": "2021-05-05 00:02:08", "latest_commit": "2024-07-15 16:47:41", "languages": [ @@ -3076,136 +3161,120 @@ "model_or_dataset": "dataset" }, { - "description": "Japanese SimCSE (BERT-base)", - "url": "https://huggingface.co./pkshatech/simcse-ja-bert-base-clcmlp", - "project_name": "simcse-ja-bert-base-clcmlp", - "downloads": 2889, + "description": "Gemma-Mling: Multilingual Gemma Update @ 2024.04.15: First release of Gemma-Mling 7B model Original Gemma Model Page:", + "url": "https://huggingface.co./beomi/gemma-mling-7b", + "project_name": "gemma-mling-7b", + "downloads": 2727, "source": "Hugging Face", - "score": -0.06386383222786648, - "first_commit": "2022-12-26 02:52:03", - "latest_commit": "2023-01-27 06:44:23", + "score": -0.0666369023242358, + "first_commit": "2024-04-15 05:37:05", + "latest_commit": "2024-04-18 14:28:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.54 }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-instruct-hf", - "project_name": "Swallow-7b-NVE-instruct-hf", - "downloads": 2856, - "source": "Hugging Face", - "score": -0.06433045628493783, - "first_commit": "2023-12-07 02:08:59", - "latest_commit": "2024-07-06 15:18:11", + "description": "Japanese Morphological Analyzer written in Rust", + "url": "https://github.com/togatoga/kanpyo", + "project_name": "kanpyo", + "stargazers_count": 93, + "source": "GitHub", + "score": -0.0667299376782655, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": null }, { "description": "OpenCALM-Small Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", "url": "https://huggingface.co./cyberagent/open-calm-small", "project_name": "open-calm-small", - "downloads": 2781, + "downloads": 2648, "source": "Hugging Face", - "score": -0.06539096550555451, + "score": -0.06779093096065167, "first_commit": "2023-05-15 06:40:15", "latest_commit": "2023-05-18 01:10:33", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Gemma-Mling: Multilingual Gemma Update @ 2024.04.15: First release of Gemma-Mling 7B model Original Gemma Model Page:", - "url": "https://huggingface.co./beomi/gemma-mling-7b", - "project_name": "gemma-mling-7b", - "downloads": 2762, - "source": "Hugging Face", - "score": -0.06565962784144408, - "first_commit": "2024-04-15 05:37:05", - "latest_commit": "2024-04-18 14:28:20", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Chatbot Arena Conversationsの質問文から、aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2を使用して応答文を作成しました 質問文は、以下のモデルのPrompt部分を使用しました Chatbot Arena Conversations JA (calm2) 以下引用です。 ", - "url": "https://huggingface.co./datasets/aixsatoshi/Swallow-MX-chatbot-DPO", - "project_name": "Swallow-MX-chatbot-DPO", - "downloads": 2750, - "source": "Hugging Face", - "score": -0.06582930931674275, - "first_commit": "2024-03-31 06:42:39", - "latest_commit": "2024-03-31 08:16:43", - "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "BERT base Japanese (character tokenization, whole word masking enabled)", - "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-whole-word-masking", - "project_name": "bert-base-japanese-char-whole-word-masking", - "downloads": 2738, + "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with LlamaEdge LlamaEdge version: v0.10.1 and above Prompt template Prompt type: llama-3-chat Prompt string <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|><|start_header", + "url": "https://huggingface.co./second-state/Llama-3-8B-Japanese-Instruct-GGUF", + "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", + "downloads": 2630, "source": "Hugging Face", - "score": -0.06599899079204141, - "first_commit": "2020-04-28 21:34:13", - "latest_commit": "2024-02-22 00:58:18", + "score": -0.06805387419426541, + "first_commit": "2024-05-14 05:37:53", + "latest_commit": "2024-05-14 06:42:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "bert-base-japanese-upos Model Description", "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-upos", "project_name": "bert-base-japanese-upos", - "downloads": 2688, + "downloads": 2629, "source": "Hugging Face", - "score": -0.0667059969391192, + "score": -0.0680684821516884, "first_commit": "2021-08-26 23:02:50", "latest_commit": "2022-09-18 19:43:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "FINGU-AI/FinguAI-Chat-v1 Overview The FINGU-AI/FinguAI-Chat-v1 model offers a specialized curriculum tailored to English, Korean, and Japanese speakers interested in finance, investment, and legal frameworks.", "url": "https://huggingface.co./FINGU-AI/FinguAI-Chat-v1", "project_name": "FinguAI-Chat-v1", - "downloads": 2618, + "downloads": 2588, "source": "Hugging Face", - "score": -0.06769580554502812, + "score": -0.0686674084060308, "first_commit": "2024-03-21 07:08:05", "latest_commit": "2024-03-22 09:36:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.464 }, { - "description": "clip-japanese-base This is a Japanese CLIP (Contrastive Language-Image Pre-training) model developed by LY Corporation.", - "url": "https://huggingface.co./line-corporation/clip-japanese-base", - "project_name": "clip-japanese-base", - "downloads": 2548, + "description": "Llama-3-ELYZA-JP-8B-AWQ Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", + "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-AWQ", + "project_name": "Llama-3-ELYZA-JP-8B-AWQ", + "downloads": 2557, "source": "Hugging Face", - "score": -0.06868561415093702, - "first_commit": "2024-04-24 01:36:22", - "latest_commit": "2024-05-10 03:07:04", + "score": -0.06912025508614336, + "first_commit": "2024-06-25 04:31:31", + "latest_commit": "2024-06-26 02:56:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.98 }, { - "description": "bert-finetuned-japanese-sentiment This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on product amazon reviews japanese dataset.", - "url": "https://huggingface.co./christian-phu/bert-finetuned-japanese-sentiment", - "project_name": "bert-finetuned-japanese-sentiment", - "downloads": 2505, + "description": "BERT base Japanese (character tokenization, whole word masking enabled)", + "url": "https://huggingface.co./tohoku-nlp/bert-base-japanese-char-whole-word-masking", + "project_name": "bert-base-japanese-char-whole-word-masking", + "downloads": 2537, "source": "Hugging Face", - "score": -0.06929363943742392, - "first_commit": "2023-04-06 16:43:51", - "latest_commit": "2023-04-07 17:27:53", + "score": -0.06941241423460306, + "first_commit": "2020-04-28 21:34:13", + "latest_commit": "2024-02-22 00:58:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "nlp-waseda/roberta-base-japanese Model description This is a Japanese RoBERTa base model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese", - "project_name": "roberta-base-japanese", - "downloads": 2500, + "description": "Chatbot Arena Conversationsの質問文から、aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2を使用して応答文を作成しました 質問文は、以下のモデルのPrompt部分を使用しました Chatbot Arena Conversations JA (calm2) 以下引用です。 ", + "url": "https://huggingface.co./datasets/aixsatoshi/Swallow-MX-chatbot-DPO", + "project_name": "Swallow-MX-chatbot-DPO", + "downloads": 2533, "source": "Hugging Face", - "score": -0.0693643400521317, - "first_commit": "2021-12-20 05:12:06", - "latest_commit": "2022-10-21 14:46:36", + "score": -0.06947084606429502, + "first_commit": "2024-03-31 06:42:39", + "latest_commit": "2024-03-31 08:16:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "50k English-Japanese Parallel Corpus for Machine Translation Benchmark.", @@ -3213,71 +3282,89 @@ "project_name": "small_parallel_enja", "stargazers_count": 92, "source": "GitHub", - "score": -0.06966857597706529, + "score": -0.0696529781759878, "first_commit": "2016-10-27 12:27:03", "latest_commit": "2019-09-11 14:00:17", "languages": [], "model_or_dataset": "dataset" }, { - "description": "Llama-3-ELYZA-JP-8B-AWQ Model Description Llama-3-ELYZA-JP-8B is a large language model trained by ELYZA, Inc.", - "url": "https://huggingface.co./elyza/Llama-3-ELYZA-JP-8B-AWQ", - "project_name": "Llama-3-ELYZA-JP-8B-AWQ", - "downloads": 2468, + "description": "bert-large-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-upos", + "project_name": "bert-large-japanese-upos", + "downloads": 2496, "source": "Hugging Face", - "score": -0.06981682398626149, - "first_commit": "2024-06-25 04:31:31", - "latest_commit": "2024-06-26 02:56:39", + "score": -0.07001134048894547, + "first_commit": "2021-08-19 10:39:38", + "latest_commit": "2022-09-18 19:43:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Kotoba-Whisper Kotoba-Whisper is a collection of distilled Whisper models for Japanese ASR, developed through the collaboration bewteen Asahi Ushio and Kotoba Technologies.", - "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.0", - "project_name": "kotoba-whisper-v1.0", - "downloads": 2393, + "description": "HODACHI-EZO-Common-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-9B-gemma-2-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-9B-gemma-2-it-gguf", + "project_name": "HODACHI-EZO-Common-9B-gemma-2-it-gguf", + "downloads": 2472, "source": "Hugging Face", - "score": -0.07087733320687817, - "first_commit": "2024-04-14 08:53:48", - "latest_commit": "2024-05-08 12:40:53", + "score": -0.07036193146709713, + "first_commit": "2024-07-15 15:42:39", + "latest_commit": "2024-07-15 16:20:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.24 }, { "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-hf", - "project_name": "Swallow-70b-hf", - "downloads": 2390, + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-instruct-hf", + "project_name": "Swallow-7b-NVE-instruct-hf", + "downloads": 2456, "source": "Hugging Face", - "score": -0.07091975357570283, - "first_commit": "2023-11-25 02:13:04", - "latest_commit": "2024-06-29 08:56:23", + "score": -0.0705956587858649, + "first_commit": "2023-12-07 02:08:59", + "latest_commit": "2024-07-06 15:18:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "rinna/youri-7b Overview We conduct continual pre-training of llama2-7b on 40B tokens from a mixture of Japanese and English datasets.", - "url": "https://huggingface.co./rinna/youri-7b", - "project_name": "youri-7b", - "downloads": 2370, + "description": "bert-finetuned-japanese-sentiment This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on product amazon reviews japanese dataset.", + "url": "https://huggingface.co./christian-phu/bert-finetuned-japanese-sentiment", + "project_name": "bert-finetuned-japanese-sentiment", + "downloads": 2385, "source": "Hugging Face", - "score": -0.07120255603453396, - "first_commit": "2023-10-30 15:12:17", - "latest_commit": "2024-07-22 08:01:22", + "score": -0.07163282376289688, + "first_commit": "2023-04-06 16:43:51", + "latest_commit": "2023-04-07 17:27:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "japanese-gpt-neox-3.6b-instruction-ppo Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-ppo", "project_name": "japanese-gpt-neox-3.6b-instruction-ppo", - "downloads": 2337, + "downloads": 2338, "source": "Hugging Face", - "score": -0.0716691800916053, + "score": -0.0723193977617772, "first_commit": "2023-05-30 01:50:48", "latest_commit": "2024-07-20 07:58:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.76 + }, + { + "description": "Model Card for Japanese character-level DeBERTa V2 tiny Model description This is a Japanese DeBERTa V2 tiny model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese-char-wwm", + "project_name": "deberta-v2-tiny-japanese-char-wwm", + "downloads": 2321, + "source": "Hugging Face", + "score": -0.07256773303796796, + "first_commit": "2023-01-05 08:48:29", + "latest_commit": "2023-03-23 07:31:19", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.0101 }, { "description": "natto-py combines the Python programming language with MeCab, the part-of-speech and morphological analyzer for the Japanese language.", @@ -3285,7 +3372,7 @@ "project_name": "natto-py", "stargazers_count": 91, "source": "GitHub", - "score": -0.0725866819379515, + "score": -0.07257601867371008, "first_commit": "2014-10-24 20:56:40", "latest_commit": "2023-11-04 21:25:21", "languages": [ @@ -3301,7 +3388,7 @@ "project_name": "depccg", "stargazers_count": 91, "source": "GitHub", - "score": -0.0725866819379515, + "score": -0.07257601867371008, "first_commit": "2016-10-06 14:39:12", "latest_commit": "2023-08-26 16:03:23", "languages": [ @@ -3316,7 +3403,7 @@ "project_name": "oseti", "stargazers_count": 91, "source": "GitHub", - "score": -0.0725866819379515, + "score": -0.07257601867371008, "first_commit": "2019-02-12 02:03:26", "latest_commit": "2024-01-12 07:14:53", "languages": [ @@ -3325,100 +3412,147 @@ "model_or_dataset": null }, { - "description": "ELYZA-tasks-100: 日本語instructionモデル評価データセット Data Description 本データセットはinstruction-tuningを行ったモデルの評価用データセットです。", - "url": "https://huggingface.co./datasets/elyza/ELYZA-tasks-100", - "project_name": "ELYZA-tasks-100", - "downloads": 2257, + "description": "Kotoba-Whisper Kotoba-Whisper is a collection of distilled Whisper models for Japanese ASR, developed through the collaboration bewteen Asahi Ushio and Kotoba Technologies.", + "url": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.0", + "project_name": "kotoba-whisper-v1.0", + "downloads": 2318, "source": "Hugging Face", - "score": -0.07280038992692976, - "first_commit": "2023-08-28 09:01:44", - "latest_commit": "2023-12-27 18:17:36", + "score": -0.07261155691023691, + "first_commit": "2024-04-14 08:53:48", + "latest_commit": "2024-05-08 12:40:53", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.756 }, { - "description": "stockmark/stockmark-13b Stockmark-13b is a 13 billion parameter LLM pretrained from scratch based on Japanese corpus of about 220B tokens.", - "url": "https://huggingface.co./stockmark/stockmark-13b", - "project_name": "stockmark-13b", - "downloads": 2254, + "description": "このモデルはLuke-japanese-large-liteをファインチューニングしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-large-sentiment-analysis-wrime", + "project_name": "luke-japanese-large-sentiment-analysis-wrime", + "downloads": 2285, "source": "Hugging Face", - "score": -0.07284281029575443, - "first_commit": "2023-10-21 06:53:06", - "latest_commit": "2024-05-17 06:15:56", + "score": -0.07309361950519544, + "first_commit": "2023-03-13 12:40:08", + "latest_commit": "2023-05-15 12:58:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.41400000000000003 }, { - "description": "HODACHI-EZO-Common-9B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-9B-gemma-2-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-9B-gemma-2-it-gguf", - "project_name": "HODACHI-EZO-Common-9B-gemma-2-it-gguf", - "downloads": 2245, + "description": "Llama3 Swallow", + "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-Instruct-v0.1", + "project_name": "Llama-3-Swallow-70B-Instruct-v0.1", + "downloads": 2273, "source": "Hugging Face", - "score": -0.07297007140222844, - "first_commit": "2024-07-15 15:42:39", - "latest_commit": "2024-07-15 16:20:33", + "score": -0.07326891499427127, + "first_commit": "2024-06-28 16:17:32", + "latest_commit": "2024-07-19 08:08:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "Phi-3-mini-128k-instruct-gguf microsoftさんが公開しているPhi-3-mini-128k-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Phi-3-mini-128k-instruct-gguf", - "project_name": "Phi-3-mini-128k-instruct-gguf", - "downloads": 2236, + "description": "Stockmark-13b-instruct Stockmark-13b-instruct is an instruction-tuned version of Stockmark-13b, a 13 billion parameter Japanese LLM.", + "url": "https://huggingface.co./stockmark/stockmark-13b-instruct", + "project_name": "stockmark-13b-instruct", + "downloads": 2264, "source": "Hugging Face", - "score": -0.07309733250870244, - "first_commit": "2024-04-24 13:50:51", - "latest_commit": "2024-04-24 14:24:09", + "score": -0.07340038661107813, + "first_commit": "2023-11-08 16:56:34", + "latest_commit": "2023-11-08 17:02:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.2 }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-v0.1", - "project_name": "Swallow-13b-instruct-v0.1", - "downloads": 2215, + "description": "clip-japanese-base This is a Japanese CLIP (Contrastive Language-Image Pre-training) model developed by LY Corporation.", + "url": "https://huggingface.co./line-corporation/clip-japanese-base", + "project_name": "clip-japanese-base", + "downloads": 2244, "source": "Hugging Face", - "score": -0.07339427509047511, - "first_commit": "2024-03-04 11:30:28", - "latest_commit": "2024-06-29 09:00:15", + "score": -0.07369254575953785, + "first_commit": "2024-04-24 01:36:22", + "latest_commit": "2024-05-10 03:07:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.197 }, { "description": "llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset 「大規模言語モデル入門」の第6章で紹介している固有表現認識のモデルです。 ", "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-crf-ner-wikipedia-dataset", "project_name": "bert-base-japanese-v3-crf-ner-wikipedia-dataset", - "downloads": 2208, + "downloads": 2216, "source": "Hugging Face", - "score": -0.073493255951066, + "score": -0.07410156856738144, "first_commit": "2023-05-28 08:19:43", "latest_commit": "2023-07-25 15:04:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはLuke-japanese-large-liteをファインチューニングしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-large-sentiment-analysis-wrime", - "project_name": "luke-japanese-large-sentiment-analysis-wrime", - "downloads": 2170, + "description": "stockmark/stockmark-13b Stockmark-13b is a 13 billion parameter LLM pretrained from scratch based on Japanese corpus of about 220B tokens.", + "url": "https://huggingface.co./stockmark/stockmark-13b", + "project_name": "stockmark-13b", + "downloads": 2209, "source": "Hugging Face", - "score": -0.07403058062284512, - "first_commit": "2023-03-13 12:40:08", - "latest_commit": "2023-05-15 12:58:08", + "score": -0.07420382426934234, + "first_commit": "2023-10-21 06:53:06", + "latest_commit": "2024-05-17 06:15:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.2 }, { - "description": "Llama-3-Swallow-8B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-8B-Instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3-Swallow-8B-Instruct-v0.1-gguf", - "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-gguf", - "downloads": 2114, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-instruct-v0.1", + "project_name": "Swallow-13b-instruct-v0.1", + "downloads": 2208, "source": "Hugging Face", - "score": -0.07482242750757225, - "first_commit": "2024-07-01 16:42:54", - "latest_commit": "2024-07-02 10:43:55", + "score": -0.07421843222676533, + "first_commit": "2024-03-04 11:30:28", + "latest_commit": "2024-06-29 09:00:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 + }, + { + "description": "nlp-waseda/roberta-base-japanese Model description This is a Japanese RoBERTa base model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./nlp-waseda/roberta-base-japanese", + "project_name": "roberta-base-japanese", + "downloads": 2160, + "source": "Hugging Face", + "score": -0.07491961418306864, + "first_commit": "2021-12-20 05:12:06", + "latest_commit": "2022-10-21 14:46:36", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Phi-3-mini-128k-instruct-gguf microsoftさんが公開しているPhi-3-mini-128k-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Phi-3-mini-128k-instruct-gguf", + "project_name": "Phi-3-mini-128k-instruct-gguf", + "downloads": 2156, + "source": "Hugging Face", + "score": -0.07497804601276058, + "first_commit": "2024-04-24 13:50:51", + "latest_commit": "2024-04-24 14:24:09", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.82 + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-hf", + "project_name": "Swallow-70b-hf", + "downloads": 2122, + "source": "Hugging Face", + "score": -0.07547471656514208, + "first_commit": "2023-11-25 02:13:04", + "latest_commit": "2024-06-29 08:56:23", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Pytorch implementation and pre-trained Japanese model for CANINE, the efficient character-level transformer.", @@ -3426,7 +3560,7 @@ "project_name": "shiba", "stargazers_count": 90, "source": "GitHub", - "score": -0.07550478789883769, + "score": -0.07549905917143238, "first_commit": "2021-06-24 20:17:27", "latest_commit": "2023-11-03 10:01:53", "languages": [ @@ -3440,7 +3574,7 @@ "project_name": "open2ch-dialogue-corpus", "stargazers_count": 90, "source": "GitHub", - "score": -0.07550478789883769, + "score": -0.07549905917143238, "first_commit": "2019-09-13 11:21:53", "latest_commit": "2021-06-07 00:06:23", "languages": [ @@ -3449,328 +3583,264 @@ "model_or_dataset": "dataset" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-hf", - "project_name": "Swallow-70b-instruct-hf", - "downloads": 2052, - "source": "Hugging Face", - "score": -0.07569911512994872, - "first_commit": "2023-12-11 07:23:47", - "latest_commit": "2024-06-29 08:56:31", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with LlamaEdge LlamaEdge version: v0.10.1 and above Prompt template Prompt type: llama-3-chat Prompt string <|begin_of_text|><|start_header_id|>system<|end_header_id|> {{ system_prompt }}<|eot_id|><|start_header_id|>user<|end_header_id|> {{ user_message_1 }}<|eot_id|><|start_header_id|>assistant<|end_header_id|> {{ model_answer_1 }}<|eot_id|><|start_header", - "url": "https://huggingface.co./second-state/Llama-3-8B-Japanese-Instruct-GGUF", - "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", - "downloads": 2051, + "description": "ELYZA-tasks-100: 日本語instructionモデル評価データセット Data Description 本データセットはinstruction-tuningを行ったモデルの評価用データセットです。", + "url": "https://huggingface.co./datasets/elyza/ELYZA-tasks-100", + "project_name": "ELYZA-tasks-100", + "downloads": 2113, "source": "Hugging Face", - "score": -0.07571325525289027, - "first_commit": "2024-05-14 05:37:53", - "latest_commit": "2024-05-14 06:42:38", + "score": -0.07560618818194896, + "first_commit": "2023-08-28 09:01:44", + "latest_commit": "2023-12-27 18:17:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Llama3 Swallow", - "url": "https://huggingface.co./tokyotech-llm/Llama-3-Swallow-70B-Instruct-v0.1", - "project_name": "Llama-3-Swallow-70B-Instruct-v0.1", - "downloads": 2018, + "description": "rinna/youri-7b Overview We conduct continual pre-training of llama2-7b on 40B tokens from a mixture of Japanese and English datasets.", + "url": "https://huggingface.co./rinna/youri-7b", + "project_name": "youri-7b", + "downloads": 2108, "source": "Hugging Face", - "score": -0.0761798793099616, - "first_commit": "2024-06-28 16:17:32", - "latest_commit": "2024-07-19 08:08:59", + "score": -0.07567922796906389, + "first_commit": "2023-10-30 15:12:17", + "latest_commit": "2024-07-22 08:01:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "🎈 FlexDreamHK FlexDreamHKはリークされたNovelAIモデルの入っていない、あるいはそのリスクを可能な限り低くしたモデルを目指して作成しました。 ", - "url": "https://huggingface.co./den2nova/FlexDreamHK", - "project_name": "FlexDreamHK", - "downloads": 1995, + "description": "Llama-3-Swallow-8B-Instruct-v0.1-gguf tokyotech-llmさんが公開しているLlama-3-Swallow-8B-Instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3-Swallow-8B-Instruct-v0.1-gguf", + "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-gguf", + "downloads": 2038, "source": "Hugging Face", - "score": -0.0765051021376174, - "first_commit": "2023-07-06 10:11:45", - "latest_commit": "2023-07-29 04:21:29", + "score": -0.07670178498867287, + "first_commit": "2024-07-01 16:42:54", + "latest_commit": "2024-07-02 10:43:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "rinna/nekomata-7b Overview We conduct continual pre-training of qwen-7b on 30B tokens from a mixture of Japanese and English datasets.", - "url": "https://huggingface.co./rinna/nekomata-7b", - "project_name": "nekomata-7b", - "downloads": 1986, + "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jsts", + "project_name": "bert-base-japanese-v3-jsts", + "downloads": 2035, "source": "Hugging Face", - "score": -0.0766323632440914, - "first_commit": "2023-12-19 06:58:44", - "latest_commit": "2024-07-20 08:35:21", + "score": -0.07674560886094184, + "first_commit": "2023-06-11 15:27:32", + "latest_commit": "2023-07-29 11:27:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "ELYZA-japanese-Llama-2-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-instruct-gguf", "project_name": "ELYZA-japanese-Llama-2-7b-instruct-gguf", - "downloads": 1976, + "downloads": 2022, "source": "Hugging Face", - "score": -0.07677376447350695, + "score": -0.07693551230744065, "first_commit": "2023-08-29 05:33:45", "latest_commit": "2023-11-16 14:27:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 + }, + { + "description": "Llama-3.1-8B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-8B-EZO-1.1-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-8B-EZO-1.1-it-gguf", + "project_name": "Llama-3.1-8B-EZO-1.1-it-gguf", + "downloads": 2011, + "source": "Hugging Face", + "score": -0.0770961998390935, + "first_commit": "2024-07-31 11:06:36", + "latest_commit": "2024-07-31 12:47:45", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "Ninja-v1-NSFW-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFW-128kのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-128k-gguf", "project_name": "Ninja-v1-NSFW-128k-gguf", - "downloads": 1973, + "downloads": 1969, "source": "Hugging Face", - "score": -0.07681618484233162, + "score": -0.07770973405085889, "first_commit": "2024-05-01 17:45:52", "latest_commit": "2024-05-04 13:25:47", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-small-short", - "project_name": "t5-small-short", - "downloads": 1970, - "source": "Hugging Face", - "score": -0.07685860521115628, - "first_commit": "2023-04-25 04:37:20", - "latest_commit": "2023-05-10 09:55:39", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jsts", - "project_name": "bert-base-japanese-v3-jsts", - "downloads": 1953, + "description": "rinna/nekomata-7b Overview We conduct continual pre-training of qwen-7b on 30B tokens from a mixture of Japanese and English datasets.", + "url": "https://huggingface.co./rinna/nekomata-7b", + "project_name": "nekomata-7b", + "downloads": 1952, "source": "Hugging Face", - "score": -0.07709898730116274, - "first_commit": "2023-06-11 15:27:32", - "latest_commit": "2023-07-29 11:27:18", + "score": -0.07795806932704964, + "first_commit": "2023-12-19 06:58:44", + "latest_commit": "2024-07-20 08:35:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.72 }, { - "description": "このモデルはluke-japanese-baseをファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-ner", - "project_name": "luke-japanese-base-finetuned-ner", - "downloads": 1946, + "description": "ElanMT ElanMT-BT-ja-en is a Japanese to English translation model developed by ELAN MITSUA Project / Abstract Engine.", + "url": "https://huggingface.co./Mitsua/elan-mt-bt-ja-en", + "project_name": "elan-mt-bt-ja-en", + "downloads": 1950, "source": "Hugging Face", - "score": -0.07719796816175363, - "first_commit": "2023-01-17 23:36:52", - "latest_commit": "2023-05-12 00:36:17", + "score": -0.0779872852418956, + "first_commit": "2024-05-20 01:56:12", + "latest_commit": "2024-05-20 01:56:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.0606 }, { - "description": "bert-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-upos", - "project_name": "bert-large-japanese-upos", + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-hf", + "project_name": "Swallow-70b-instruct-hf", "downloads": 1929, "source": "Hugging Face", - "score": -0.07743835025176007, - "first_commit": "2021-08-19 10:39:38", - "latest_commit": "2022-09-18 19:43:53", + "score": -0.07829405234777831, + "first_commit": "2023-12-11 07:23:47", + "latest_commit": "2024-06-29 08:56:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.2 }, { - "description": "ElanMT ElanMT-BT-ja-en is a Japanese to English translation model developed by ELAN MITSUA Project / Abstract Engine.", - "url": "https://huggingface.co./Mitsua/elan-mt-bt-ja-en", - "project_name": "elan-mt-bt-ja-en", - "downloads": 1925, + "description": "🎈 FlexDreamHK FlexDreamHKはリークされたNovelAIモデルの入っていない、あるいはそのリスクを可能な限り低くしたモデルを目指して作成しました。 ", + "url": "https://huggingface.co./den2nova/FlexDreamHK", + "project_name": "FlexDreamHK", + "downloads": 1845, "source": "Hugging Face", - "score": -0.0774949107435263, - "first_commit": "2024-05-20 01:56:12", - "latest_commit": "2024-05-20 01:56:57", + "score": -0.0795211207713091, + "first_commit": "2023-07-06 10:11:45", + "latest_commit": "2023-07-29 04:21:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "japanese-large-lm-3.6b", "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b", "project_name": "japanese-large-lm-3.6b", - "downloads": 1909, + "downloads": 1835, "source": "Hugging Face", - "score": -0.0777211527105912, + "score": -0.07966720034553895, "first_commit": "2023-07-21 00:48:05", "latest_commit": "2023-08-17 01:06:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.68 }, { - "description": "japanese-gpt2-small This repository provides a small-sized Japanese GPT-2 model.", - "url": "https://huggingface.co./rinna/japanese-gpt2-small", - "project_name": "japanese-gpt2-small", - "downloads": 1899, + "description": "sbert-jsnli-luke-japanese-base-lite This is a sentence-transformers model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.", + "url": "https://huggingface.co./oshizo/sbert-jsnli-luke-japanese-base-lite", + "project_name": "sbert-jsnli-luke-japanese-base-lite", + "downloads": 1830, "source": "Hugging Face", - "score": -0.07786255394000675, - "first_commit": "2021-06-15 06:32:27", - "latest_commit": "2024-07-20 07:49:31", + "score": -0.07974024013265388, + "first_commit": "2023-01-10 11:53:15", + "latest_commit": "2023-01-10 12:36:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-hf", - "project_name": "Swallow-13b-hf", - "downloads": 1888, + "description": "ELYZA-japanese-CodeLlama-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-gguf", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-gguf", + "downloads": 1828, "source": "Hugging Face", - "score": -0.07801809529236386, - "first_commit": "2023-11-16 15:40:49", - "latest_commit": "2024-06-29 08:56:21", + "score": -0.07976945604749985, + "first_commit": "2023-11-15 09:48:32", + "latest_commit": "2023-11-16 14:28:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "Llama-3.1-8B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-8B-EZO-1.1-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-8B-EZO-1.1-it-gguf", - "project_name": "Llama-3.1-8B-EZO-1.1-it-gguf", - "downloads": 1876, + "description": "This dataset was created by automatically translating \"databricks-dolly-15k\" into Japanese.", + "url": "https://huggingface.co./datasets/kunishou/databricks-dolly-15k-ja", + "project_name": "databricks-dolly-15k-ja", + "downloads": 1812, "source": "Hugging Face", - "score": -0.07818777676766253, - "first_commit": "2024-07-31 11:06:36", - "latest_commit": "2024-07-31 12:47:45", + "score": -0.08000318336626762, + "first_commit": "2023-04-13 08:31:08", + "latest_commit": "2024-04-01 17:26:37", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "sbert-jsnli-luke-japanese-base-lite This is a sentence-transformers model: It maps sentences & paragraphs to a 768 dimensional dense vector space and can be used for tasks like clustering or semantic search.", - "url": "https://huggingface.co./oshizo/sbert-jsnli-luke-japanese-base-lite", - "project_name": "sbert-jsnli-luke-japanese-base-lite", - "downloads": 1857, - "source": "Hugging Face", - "score": -0.0784564391035521, - "first_commit": "2023-01-10 11:53:15", - "latest_commit": "2023-01-10 12:36:12", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "OpenCALM-Large Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-large", - "project_name": "open-calm-large", - "downloads": 1856, - "source": "Hugging Face", - "score": -0.07847057922649366, - "first_commit": "2023-05-15 06:50:24", - "latest_commit": "2023-05-18 01:11:13", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "このモデルは何? ", "url": "https://huggingface.co./Lasorco/lametta", "project_name": "lametta", - "downloads": 1848, + "downloads": 1775, "source": "Hugging Face", - "score": -0.0785837002100261, + "score": -0.08054367779091809, "first_commit": "2023-03-28 14:29:55", "latest_commit": "2023-11-08 07:37:12", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This dataset was created by automatically translating \"databricks-dolly-15k\" into Japanese.", - "url": "https://huggingface.co./datasets/kunishou/databricks-dolly-15k-ja", - "project_name": "databricks-dolly-15k-ja", - "downloads": 1838, - "source": "Hugging Face", - "score": -0.07872510143944166, - "first_commit": "2023-04-13 08:31:08", - "latest_commit": "2024-04-01 17:26:37", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "ELYZA-japanese-CodeLlama-7b-instruct-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-gguf", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-gguf", - "downloads": 1833, - "source": "Hugging Face", - "score": -0.07879580205414943, - "first_commit": "2023-11-15 09:48:32", - "latest_commit": "2023-11-16 14:28:24", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF Original Model elyza/ELYZA-japanese-Llama-2-13b-fast-instruct Run with LlamaEdge LlamaEdge version: v0.2.8 and above Prompt template Prompt type: llama-2-chat Prompt string <s>[INST] <<SYS>> {{ system_prompt }} <</SYS>> {{ user_msg_1 }}", - "url": "https://huggingface.co./second-state/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "downloads": 1774, - "source": "Hugging Face", - "score": -0.07963006930770124, - "first_commit": "2024-01-06 03:33:53", - "latest_commit": "2024-03-20 07:21:25", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama-3-ELYZA-JP-8B-gguf elyzaさんが公開しているLlama-3-ELYZA-JP-8Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3-ELYZA-JP-8B-gguf", - "project_name": "Llama-3-ELYZA-JP-8B-gguf", - "downloads": 1747, - "source": "Hugging Face", - "score": -0.08001185262712324, - "first_commit": "2024-06-26 16:36:04", - "latest_commit": "2024-06-26 17:55:35", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "bilingual-gpt-neox-4b Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", - "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b", - "project_name": "bilingual-gpt-neox-4b", - "downloads": 1697, + "description": "OpenCALM-Large Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-large", + "project_name": "open-calm-large", + "downloads": 1756, "source": "Hugging Face", - "score": -0.08071885877420103, - "first_commit": "2023-07-31 02:34:03", - "latest_commit": "2024-07-20 08:02:07", + "score": -0.08082122898195482, + "first_commit": "2023-05-15 06:50:24", + "latest_commit": "2023-05-18 01:11:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", - "url": "https://huggingface.co./rinna/japanese-gpt-1b", - "project_name": "japanese-gpt-1b", - "downloads": 1674, + "description": "japanese-gpt2-small This repository provides a small-sized Japanese GPT-2 model.", + "url": "https://huggingface.co./rinna/japanese-gpt2-small", + "project_name": "japanese-gpt2-small", + "downloads": 1739, "source": "Hugging Face", - "score": -0.08104408160185682, - "first_commit": "2022-01-20 02:30:19", - "latest_commit": "2024-07-20 07:52:31", + "score": -0.08106956425814557, + "first_commit": "2021-06-15 06:32:27", + "latest_commit": "2024-07-20 07:49:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.123 }, { - "description": "Wav2Vec2-Large-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice, JSUT, TEDxJP and some other data.", - "url": "https://huggingface.co./NTQAI/wav2vec2-large-japanese", - "project_name": "wav2vec2-large-japanese", - "downloads": 1671, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-hf", + "project_name": "Swallow-13b-hf", + "downloads": 1731, "source": "Hugging Face", - "score": -0.08108650197068148, - "first_commit": "2021-07-05 02:44:40", - "latest_commit": "2023-02-17 13:07:47", + "score": -0.08118642791752945, + "first_commit": "2023-11-16 15:40:49", + "latest_commit": "2024-06-29 08:56:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese E5 Mixtral 7B Slerp GGUF GGUF conversion of oshizo/japanese-e5-mistral-7b_slerp Avaiable formats: Q2_K.gguf Q3_K.gguf Q4_K.gguf Q5_K.gguf", - "url": "https://huggingface.co./mm/japanese-e5-mistral-7b_slerp_gguf", - "project_name": "japanese-e5-mistral-7b_slerp_gguf", - "downloads": 1661, + "description": "このモデルはluke-japanese-baseをファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-ner", + "project_name": "luke-japanese-base-finetuned-ner", + "downloads": 1725, "source": "Hugging Face", - "score": -0.08122790320009704, - "first_commit": "2024-06-09 08:34:37", - "latest_commit": "2024-06-14 16:12:17", + "score": -0.08127407566206736, + "first_commit": "2023-01-17 23:36:52", + "latest_commit": "2023-05-12 00:36:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.279 }, { "description": "A Python Module for JUMAN++/KNP", @@ -3778,7 +3848,7 @@ "project_name": "pyknp", "stargazers_count": 88, "source": "GitHub", - "score": -0.08134099982061009, + "score": -0.08134514016687697, "first_commit": "2015-04-08 15:25:47", "latest_commit": "2024-07-06 15:16:48", "languages": [ @@ -3792,7 +3862,7 @@ "project_name": "STAIR-captions", "stargazers_count": 88, "source": "GitHub", - "score": -0.08134099982061009, + "score": -0.08134514016687697, "first_commit": "2017-02-21 16:49:14", "latest_commit": "2018-07-04 18:24:35", "languages": [], @@ -3804,7 +3874,7 @@ "project_name": "NMeCab", "stargazers_count": 88, "source": "GitHub", - "score": -0.08134099982061009, + "score": -0.08134514016687697, "first_commit": "2014-04-24 17:34:29", "latest_commit": "2024-03-31 03:51:55", "languages": [ @@ -3813,328 +3883,419 @@ "model_or_dataset": "dataset" }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-plus-hf", - "project_name": "Swallow-7b-plus-hf", - "downloads": 1652, + "description": "Model Card for Japanese DeBERTa V2 tiny Model description", + "url": "https://huggingface.co./ku-nlp/deberta-v2-tiny-japanese", + "project_name": "deberta-v2-tiny-japanese", + "downloads": 1711, "source": "Hugging Face", - "score": -0.08135516430657104, - "first_commit": "2024-02-29 11:28:52", - "latest_commit": "2024-06-29 08:56:19", + "score": -0.08147858706598916, + "first_commit": "2023-01-18 13:36:09", + "latest_commit": "2023-03-23 16:13:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.013900000000000001 + }, + { + "description": "OpenCALM-3B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-3b", + "project_name": "open-calm-3b", + "downloads": 1699, + "source": "Hugging Face", + "score": -0.08165388255506499, + "first_commit": "2023-05-15 07:14:36", + "latest_commit": "2023-05-18 01:11:50", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Llama-3-ELYZA-JP-8B-gguf elyzaさんが公開しているLlama-3-ELYZA-JP-8Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3-ELYZA-JP-8B-gguf", + "project_name": "Llama-3-ELYZA-JP-8B-gguf", + "downloads": 1690, + "source": "Hugging Face", + "score": -0.08178535417187187, + "first_commit": "2024-06-26 16:36:04", + "latest_commit": "2024-06-26 17:55:35", + "languages": [], + "model_or_dataset": "model", + "model_size": 8.03 + }, + { + "description": "Japanese E5 Mixtral 7B Slerp GGUF GGUF conversion of oshizo/japanese-e5-mistral-7b_slerp Avaiable formats: Q2_K.gguf Q3_K.gguf Q4_K.gguf Q5_K.gguf", + "url": "https://huggingface.co./mm/japanese-e5-mistral-7b_slerp_gguf", + "project_name": "japanese-e5-mistral-7b_slerp_gguf", + "downloads": 1672, + "source": "Hugging Face", + "score": -0.08204829740548561, + "first_commit": "2024-06-09 08:34:37", + "latest_commit": "2024-06-14 16:12:17", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 }, { "description": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf HODACHI��んが公開しているEZO-Humanities-9B-gemma-2-itのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", "project_name": "HODACHI-EZO-Humanities-9B-gemma-2-it-gguf", - "downloads": 1643, + "downloads": 1658, "source": "Hugging Face", - "score": -0.08148242541304504, + "score": -0.0822528088094074, "first_commit": "2024-07-15 15:43:00", "latest_commit": "2024-07-15 17:01:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.24 }, { - "description": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "project_name": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", - "downloads": 1634, + "description": "Wav2Vec2-Large-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice, JSUT, TEDxJP and some other data.", + "url": "https://huggingface.co./NTQAI/wav2vec2-large-japanese", + "project_name": "wav2vec2-large-japanese", + "downloads": 1637, "source": "Hugging Face", - "score": -0.08160968651951905, - "first_commit": "2024-01-29 12:52:31", - "latest_commit": "2024-02-07 19:49:25", + "score": -0.0825595759152901, + "first_commit": "2021-07-05 02:44:40", + "latest_commit": "2023-02-17 13:07:47", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-large-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-large-v1", - "project_name": "japanese-reranker-cross-encoder-large-v1", + "description": "Japanese-StableLM-Base-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-7b", + "project_name": "japanese-stablelm-base-beta-7b", "downloads": 1620, "source": "Hugging Face", - "score": -0.08180764824070083, - "first_commit": "2024-03-28 20:53:30", - "latest_commit": "2024-04-01 02:39:45", + "score": -0.08280791119148086, + "first_commit": "2023-10-30 07:43:36", + "latest_commit": "2023-12-19 06:43:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "japanese-gpt-neox-3.6b-instruction-sft Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft", - "project_name": "japanese-gpt-neox-3.6b-instruction-sft", - "downloads": 1614, + "description": "Japanese-StableLM-Base-Alpha-7B \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-base-alpha-7b is a 7B-parameter decoder-only language model pre-trained on a diverse collection of Japanese and English datasets which focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-alpha-7b", + "project_name": "japanese-stablelm-base-alpha-7b", + "downloads": 1610, "source": "Hugging Face", - "score": -0.08189248897835016, - "first_commit": "2023-05-17 02:16:28", - "latest_commit": "2024-07-20 07:56:34", + "score": -0.08295399076571071, + "first_commit": "2023-08-09 14:30:09", + "latest_commit": "2023-08-22 09:36:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "stockmark/gpt-neox-japanese-1.4b This repository provides a GPT-NeoX based model with 1.4B parameters pre-trained on Japanese corpus of about 20B tokens.", - "url": "https://huggingface.co./stockmark/gpt-neox-japanese-1.4b", - "project_name": "gpt-neox-japanese-1.4b", - "downloads": 1574, + "description": "bilingual-gpt-neox-4b Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", + "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b", + "project_name": "bilingual-gpt-neox-4b", + "downloads": 1595, "source": "Hugging Face", - "score": -0.0824580938960124, - "first_commit": "2023-08-06 07:37:38", - "latest_commit": "2023-09-07 03:44:19", + "score": -0.0831731101270555, + "first_commit": "2023-07-31 02:34:03", + "latest_commit": "2024-07-20 08:02:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.95 + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-plus-hf", + "project_name": "Swallow-7b-plus-hf", + "downloads": 1587, + "source": "Hugging Face", + "score": -0.08328997378643938, + "first_commit": "2024-02-29 11:28:52", + "latest_commit": "2024-06-29 08:56:19", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", + "url": "https://huggingface.co./rinna/japanese-gpt-1b", + "project_name": "japanese-gpt-1b", + "downloads": 1530, + "source": "Hugging Face", + "score": -0.08412262735954956, + "first_commit": "2022-01-20 02:30:19", + "latest_commit": "2024-07-20 07:52:31", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.33 }, { "description": "Japanese Stable LM Instruct Gamma 7B Model Description", "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-gamma-7b", "project_name": "japanese-stablelm-instruct-gamma-7b", - "downloads": 1572, + "downloads": 1523, "source": "Hugging Face", - "score": -0.08248637414189551, + "score": -0.08422488306151045, "first_commit": "2023-10-16 08:55:06", "latest_commit": "2024-01-24 05:54:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "alpacaデータセットを日本語化したものです", + "url": "https://github.com/shi3z/alpaca_ja", + "project_name": "alpaca_ja", + "stargazers_count": 87, + "source": "GitHub", + "score": -0.08426818066459926, + "first_commit": "2023-04-01 07:03:33", + "latest_commit": "2023-05-17 15:43:50", + "languages": [], + "model_or_dataset": "dataset" }, { "description": "Model Card for Japanese DeBERTa V3 base Model description This is a Japanese DeBERTa V3 base model pre-trained on LLM-jp corpus v1.0.", "url": "https://huggingface.co./ku-nlp/deberta-v3-base-japanese", "project_name": "deberta-v3-base-japanese", - "downloads": 1559, + "downloads": 1514, "source": "Hugging Face", - "score": -0.08267019574013573, + "score": -0.08435635467831733, "first_commit": "2024-04-23 05:08:21", "latest_commit": "2024-04-28 06:08:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "roberta-small-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-luw-upos", + "project_name": "roberta-small-japanese-luw-upos", + "downloads": 1482, + "source": "Hugging Face", + "score": -0.08482380931585287, + "first_commit": "2021-11-03 05:51:58", + "latest_commit": "2022-09-18 19:45:09", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "ELYZA-japanese-Llama-2-7b Model Description ELYZA-japanese-Llama-2-7b は、 Llama2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-7b-fast", "project_name": "ELYZA-japanese-Llama-2-7b-fast", - "downloads": 1554, + "downloads": 1466, "source": "Hugging Face", - "score": -0.08274089635484351, + "score": -0.08505753663462064, "first_commit": "2023-08-28 13:17:58", "latest_commit": "2023-08-29 03:46:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-StableLM-Base-Alpha-7B \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-base-alpha-7b is a 7B-parameter decoder-only language model pre-trained on a diverse collection of Japanese and English datasets which focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-alpha-7b", - "project_name": "japanese-stablelm-base-alpha-7b", - "downloads": 1515, + "description": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "project_name": "llm-jp-13b-instruct-full-dolly_en-dolly_ja-ichikara_003_001-oasst_en-oasst_ja-v1.1", + "downloads": 1458, "source": "Hugging Face", - "score": -0.08329236114956419, - "first_commit": "2023-08-09 14:30:09", - "latest_commit": "2023-08-22 09:36:29", + "score": -0.08517440029400451, + "first_commit": "2024-01-29 12:52:31", + "latest_commit": "2024-02-07 19:49:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 12.9 + }, + { + "description": "hotchpotch/japanese-reranker-cross-encoder-large-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-large-v1", + "project_name": "japanese-reranker-cross-encoder-large-v1", + "downloads": 1451, + "source": "Hugging Face", + "score": -0.08527665599596541, + "first_commit": "2024-03-28 20:53:30", + "latest_commit": "2024-04-01 02:39:45", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.337 }, { "description": "Japanese-StableLM-Base-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-70b is a 70B-parameter decoder-only language model based on Llama-2-70b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-70b", "project_name": "japanese-stablelm-base-beta-70b", - "downloads": 1513, + "downloads": 1445, "source": "Hugging Face", - "score": -0.0833206413954473, + "score": -0.08536430374050333, "first_commit": "2023-10-30 07:46:28", "latest_commit": "2023-12-19 06:44:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.0 }, { - "description": "roberta-small-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-luw-upos", - "project_name": "roberta-small-japanese-luw-upos", - "downloads": 1503, - "source": "Hugging Face", - "score": -0.08346204262486286, - "first_commit": "2021-11-03 05:51:58", - "latest_commit": "2022-09-18 19:45:09", + "description": "Ninja-v1-NSFW-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFWのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-gguf", + "project_name": "Ninja-v1-NSFW-gguf", + "downloads": 1432, + "source": "Hugging Face", + "score": -0.08555420718700214, + "first_commit": "2024-05-03 14:03:23", + "latest_commit": "2024-05-04 13:26:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Japanese-StableLM-Instruct-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-70b is a 70B-parameter decoder-only language model based on japanese-stablelm-base-beta-70b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-70b", - "project_name": "japanese-stablelm-instruct-beta-70b", - "downloads": 1491, + "description": "Mistral-Nemo-Instruct-2407-gguf mistralaiさんが公開しているMistral-Nemo-Instruct-2407のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Mistral-Nemo-Instruct-2407-gguf", + "project_name": "Mistral-Nemo-Instruct-2407-gguf", + "downloads": 1418, "source": "Hugging Face", - "score": -0.08363172410016154, - "first_commit": "2023-10-30 07:47:31", - "latest_commit": "2023-12-19 06:45:10", + "score": -0.08575871859092395, + "first_commit": "2024-07-22 13:28:13", + "latest_commit": "2024-07-22 17:25:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Description llava-calm2-siglip is an experimental Vision Language Model that can answer questions in Japanese about images.", - "url": "https://huggingface.co./cyberagent/llava-calm2-siglip", - "project_name": "llava-calm2-siglip", - "downloads": 1477, + "description": "OpenCALM-1B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-1b", + "project_name": "open-calm-1b", + "downloads": 1412, "source": "Hugging Face", - "score": -0.08382968582134331, - "first_commit": "2024-06-12 19:35:20", - "latest_commit": "2024-06-12 19:40:39", + "score": -0.08584636633546186, + "first_commit": "2023-05-15 07:00:18", + "latest_commit": "2023-05-18 01:11:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "ELYZA-japanese-Llama-2-13b-fast-instruct Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-fast-instruct", "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct", - "downloads": 1465, + "downloads": 1410, "source": "Hugging Face", - "score": -0.08399936729664198, + "score": -0.08587558225030782, "first_commit": "2023-12-25 18:14:10", "latest_commit": "2023-12-27 01:41:51", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", - "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b", - "project_name": "ELYZA-japanese-Llama-2-13b", - "downloads": 1456, - "source": "Hugging Face", - "score": -0.08412662840311598, - "first_commit": "2023-12-25 16:38:08", - "latest_commit": "2023-12-27 01:40:43", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "alpacaデータセットを日本語化したものです", - "url": "https://github.com/shi3z/alpaca_ja", - "project_name": "alpaca_ja", - "stargazers_count": 87, - "source": "GitHub", - "score": -0.0842591057814963, - "first_commit": "2023-04-01 07:03:33", - "latest_commit": "2023-05-17 15:43:50", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Ninja-v1-NSFW-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-NSFWのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-NSFW-gguf", - "project_name": "Ninja-v1-NSFW-gguf", - "downloads": 1429, - "source": "Hugging Face", - "score": -0.084508411722538, - "first_commit": "2024-05-03 14:03:23", - "latest_commit": "2024-05-04 13:26:52", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "OpenCALM-1B Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-1b", - "project_name": "open-calm-1b", - "downloads": 1388, - "source": "Hugging Face", - "score": -0.08508815676314178, - "first_commit": "2023-05-15 07:00:18", - "latest_commit": "2023-05-18 01:11:30", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Mistral-Nemo-Instruct-2407-gguf mistralaiさんが公開しているMistral-Nemo-Instruct-2407のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Mistral-Nemo-Instruct-2407-gguf", - "project_name": "Mistral-Nemo-Instruct-2407-gguf", - "downloads": 1381, + "description": "stockmark/gpt-neox-japanese-1.4b This repository provides a GPT-NeoX based model with 1.4B parameters pre-trained on Japanese corpus of about 20B tokens.", + "url": "https://huggingface.co./stockmark/gpt-neox-japanese-1.4b", + "project_name": "gpt-neox-japanese-1.4b", + "downloads": 1410, "source": "Hugging Face", - "score": -0.08518713762373267, - "first_commit": "2024-07-22 13:28:13", - "latest_commit": "2024-07-22 17:25:48", + "score": -0.08587558225030782, + "first_commit": "2023-08-06 07:37:38", + "latest_commit": "2023-09-07 03:44:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.44 }, { - "description": "OpenCALM-Medium Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", - "url": "https://huggingface.co./cyberagent/open-calm-medium", - "project_name": "open-calm-medium", - "downloads": 1380, + "description": "Japanese-StableLM-Instruct-Beta-70B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-70b is a 70B-parameter decoder-only language model based on japanese-stablelm-base-beta-70b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-70b", + "project_name": "japanese-stablelm-instruct-beta-70b", + "downloads": 1397, "source": "Hugging Face", - "score": -0.08520127774667423, - "first_commit": "2023-05-15 06:44:47", - "latest_commit": "2023-05-18 01:10:54", + "score": -0.08606548569680664, + "first_commit": "2023-10-30 07:47:31", + "latest_commit": "2023-12-19 06:45:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.0 }, { - "description": "HODACHI様の Llama-3-EZO-8b-Common-it をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3-EZO-8b-Common-it-GGUF", - "project_name": "Llama-3-EZO-8b-Common-it-GGUF", - "downloads": 1342, + "description": "ELYZA-japanese-Llama-2-13b Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。", + "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b", + "project_name": "ELYZA-japanese-Llama-2-13b", + "downloads": 1387, "source": "Hugging Face", - "score": -0.08573860241845335, - "first_commit": "2024-07-15 11:58:12", - "latest_commit": "2024-07-15 20:08:22", + "score": -0.0862115652710365, + "first_commit": "2023-12-25 16:38:08", + "latest_commit": "2023-12-27 01:40:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-StableLM-Base-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-beta-7b", - "project_name": "japanese-stablelm-base-beta-7b", - "downloads": 1333, + "description": "japanese-gpt-neox-3.6b-instruction-sft Overview This repository provides a Japanese GPT-NeoX model of 3.6 billion parameters.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft", + "project_name": "japanese-gpt-neox-3.6b-instruction-sft", + "downloads": 1376, "source": "Hugging Face", - "score": -0.08586586352492735, - "first_commit": "2023-10-30 07:43:36", - "latest_commit": "2023-12-19 06:43:01", + "score": -0.08637225280268934, + "first_commit": "2023-05-17 02:16:28", + "latest_commit": "2024-07-20 07:56:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.76 }, { - "description": "rinna-llama-3-youko-70b-instruct-gguf rinnaさんが公開しているllama-3-youko-70b-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-70b-instruct-gguf", - "project_name": "rinna-llama-3-youko-70b-instruct-gguf", - "downloads": 1314, + "description": "Model Description llava-calm2-siglip is an experimental Vision Language Model that can answer questions in Japanese about images.", + "url": "https://huggingface.co./cyberagent/llava-calm2-siglip", + "project_name": "llava-calm2-siglip", + "downloads": 1375, "source": "Hugging Face", - "score": -0.08613452586081691, - "first_commit": "2024-07-27 09:04:09", - "latest_commit": "2024-07-31 14:35:52", + "score": -0.08638686076011233, + "first_commit": "2024-06-12 19:35:20", + "latest_commit": "2024-06-12 19:40:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.46 }, { "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fast-instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", - "downloads": 1301, + "downloads": 1353, "source": "Hugging Face", - "score": -0.08631834745905714, + "score": -0.08670823582341801, "first_commit": "2023-12-27 09:46:04", "latest_commit": "2023-12-27 11:39:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "This dataset contains a diverse set of natural Japanese speech, collected from terrestrial television streams.", "url": "https://huggingface.co./datasets/reazon-research/reazonspeech", "project_name": "reazonspeech", - "downloads": 1296, + "downloads": 1341, "source": "Hugging Face", - "score": -0.08638904807376492, + "score": -0.08688353131249384, "first_commit": null, "latest_commit": null, "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Llama-3.1-70B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-70B-EZO-1.1-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama-3.1-70B-EZO-1.1-it-gguf", - "project_name": "Llama-3.1-70B-EZO-1.1-it-gguf", - "downloads": 1268, + "description": "Vecteus-v1-gguf Local-Novel-LLM-projectさんが公開しているVecteus-v1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Vecteus-v1-gguf", + "project_name": "Vecteus-v1-gguf", + "downloads": 1337, "source": "Hugging Face", - "score": -0.08678497151612848, - "first_commit": "2024-07-31 12:12:13", - "latest_commit": "2024-07-31 21:47:25", + "score": -0.08694196314218577, + "first_commit": "2024-05-01 17:49:42", + "latest_commit": "2024-05-01 18:37:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "rinna-llama-3-youko-70b-instruct-gguf rinnaさんが公開しているllama-3-youko-70b-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-70b-instruct-gguf", + "project_name": "rinna-llama-3-youko-70b-instruct-gguf", + "downloads": 1322, + "source": "Hugging Face", + "score": -0.08716108250353057, + "first_commit": "2024-07-27 09:04:09", + "latest_commit": "2024-07-31 14:35:52", + "languages": [], + "model_or_dataset": "model", + "model_size": 70.6 }, { "description": "GUIで動作する文書校正ツール GUI tool for textlinting.", @@ -4142,7 +4303,7 @@ "project_name": "proofreading-tool", "stargazers_count": 86, "source": "GitHub", - "score": -0.0871772117423825, + "score": -0.08719122116232154, "first_commit": "2021-04-08 12:10:36", "latest_commit": "2024-06-22 20:18:09", "languages": [ @@ -4151,2870 +4312,3062 @@ "model_or_dataset": null }, { - "description": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf pfnetさんが公開しているLlama3-Preferred-MedSwallow-70Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-Llama3-Preferred-MedSwallow-70B-gguf", - "project_name": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf", - "downloads": 1218, + "description": "OpenCALM-Medium Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by", + "url": "https://huggingface.co./cyberagent/open-calm-medium", + "project_name": "open-calm-medium", + "downloads": 1299, "source": "Hugging Face", - "score": -0.08749197766320627, - "first_commit": "2024-07-18 15:45:16", - "latest_commit": "2024-07-19 09:14:38", + "score": -0.08749706552425923, + "first_commit": "2023-05-15 06:44:47", + "latest_commit": "2023-05-18 01:10:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Llama-3.1-70B-EZO-1.1-it-gguf HODACHIさんが公開しているLlama-3.1-70B-EZO-1.1-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama-3.1-70B-EZO-1.1-it-gguf", + "project_name": "Llama-3.1-70B-EZO-1.1-it-gguf", + "downloads": 1299, + "source": "Hugging Face", + "score": -0.08749706552425923, + "first_commit": "2024-07-31 12:12:13", + "latest_commit": "2024-07-31 21:47:25", + "languages": [], + "model_or_dataset": "model", + "model_size": 70.6 }, { "description": "ELYZA-japanese-Llama-2-13b-fast Model Description ELYZA-japanese-Llama-2-13b は、 Llama 2をベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", "url": "https://huggingface.co./elyza/ELYZA-japanese-Llama-2-13b-fast", "project_name": "ELYZA-japanese-Llama-2-13b-fast", - "downloads": 1209, + "downloads": 1124, "source": "Hugging Face", - "score": -0.08761923876968028, + "score": -0.0900534580732817, "first_commit": "2023-12-25 17:14:45", "latest_commit": "2023-12-27 01:41:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Corpus of Annual Reports in Japan", + "url": "https://github.com/chakki-works/CoARiJ", + "project_name": "CoARiJ", + "stargazers_count": 85, + "source": "GitHub", + "score": -0.09011426166004384, + "first_commit": "2019-09-02 14:12:48", + "latest_commit": "2020-12-19 14:00:34", + "languages": [ + "Python" + ], + "model_or_dataset": "dataset" }, { "description": "ODEX is an Open-Domain EXecution-based NL-to-Code generation data benchmark.", "url": "https://huggingface.co./datasets/neulab/odex", "project_name": "odex", - "downloads": 1208, + "downloads": 1108, "source": "Hugging Face", - "score": -0.08763337889262184, + "score": -0.09028718539204948, "first_commit": "2023-01-06 14:30:00", "latest_commit": "2023-02-10 18:01:34", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Vecteus-v1-gguf Local-Novel-LLM-projectさんが公開しているVecteus-v1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Vecteus-v1-gguf", - "project_name": "Vecteus-v1-gguf", - "downloads": 1204, - "source": "Hugging Face", - "score": -0.08768993938438806, - "first_commit": "2024-05-01 17:49:42", - "latest_commit": "2024-05-01 18:37:01", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-base-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-base-v1", - "project_name": "japanese-reranker-cross-encoder-base-v1", - "downloads": 1180, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-small-short", + "project_name": "t5-small-short", + "downloads": 1103, "source": "Hugging Face", - "score": -0.08802930233498539, - "first_commit": "2024-03-29 07:07:38", - "latest_commit": "2024-04-01 02:39:31", + "score": -0.09036022517916441, + "first_commit": "2023-04-25 04:37:20", + "latest_commit": "2023-05-10 09:55:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "bilingual-gpt-neox-4b-instruction-ppo Overview This repository provides an English-Japanese bilingual GPT-NeoX model of 3.8 billion parameters.", "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-instruction-ppo", "project_name": "bilingual-gpt-neox-4b-instruction-ppo", - "downloads": 1158, + "downloads": 1042, "source": "Hugging Face", - "score": -0.08834038503969963, + "score": -0.09125131058196652, "first_commit": "2023-08-02 05:56:07", "latest_commit": "2024-07-20 08:05:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.95 }, { - "description": "japanese-large-lm-3.6b-instruction-sft", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft", - "project_name": "japanese-large-lm-3.6b-instruction-sft", - "downloads": 1119, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat", + "project_name": "Orion-14B-Chat", + "downloads": 1040, "source": "Hugging Face", - "score": -0.0888918498344203, - "first_commit": "2023-08-14 17:18:09", - "latest_commit": "2023-08-24 10:08:28", + "score": -0.0912805264968125, + "first_commit": "2024-01-16 06:03:30", + "latest_commit": "2024-04-11 10:48:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.5 + }, + { + "description": "GitHub リポジトリ ids-cv/wrime で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/wrime-sentiment", + "project_name": "wrime-sentiment", + "downloads": 1019, + "source": "Hugging Face", + "score": -0.0915872936026952, + "first_commit": "2023-07-29 06:38:26", + "latest_commit": "2023-10-06 00:56:38", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "wav2vec2-base-asr", "url": "https://huggingface.co./TKU410410103/wav2vec2-base-japanese-asr", "project_name": "wav2vec2-base-japanese-asr", - "downloads": 1076, + "downloads": 1012, "source": "Hugging Face", - "score": -0.08949987512090721, + "score": -0.09168954930465609, "first_commit": "2024-04-14 10:22:21", "latest_commit": "2024-04-14 14:00:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.0945 }, { - "description": "Japanese-StableLM-Instruct-Alpha-7B-v2 \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-instruct-alpha-7b-v2 is a 7B parameter decoder-only language models pre-trained built on top of the Japanese-StableLM-Base-Alpha-7B model and further fine-tuned on various instruction-following datasets.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b-v2", - "project_name": "japanese-stablelm-instruct-alpha-7b-v2", - "downloads": 1036, + "description": "japanese-large-lm-3.6b-instruction-sft", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft", + "project_name": "japanese-large-lm-3.6b-instruction-sft", + "downloads": 989, "source": "Hugging Face", - "score": -0.09006548003856943, - "first_commit": "2023-10-06 08:40:24", - "latest_commit": "2023-10-06 08:40:24", + "score": -0.09202553232538475, + "first_commit": "2023-08-14 17:18:09", + "latest_commit": "2023-08-24 10:08:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.68 }, { - "description": "Corpus of Annual Reports in Japan", - "url": "https://github.com/chakki-works/CoARiJ", - "project_name": "CoARiJ", - "stargazers_count": 85, - "source": "GitHub", - "score": -0.0900953177032687, - "first_commit": "2019-09-02 14:12:48", - "latest_commit": "2020-12-19 14:00:34", - "languages": [ - "Python" - ], - "model_or_dataset": "dataset" + "description": "PLaMo-13B-Instruct Model Description PLaMo-13B-Instruct is an instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", + "url": "https://huggingface.co./pfnet/plamo-13b-instruct", + "project_name": "plamo-13b-instruct", + "downloads": 986, + "source": "Hugging Face", + "score": -0.09206935619765372, + "first_commit": "2023-10-26 02:11:24", + "latest_commit": "2024-01-25 07:46:09", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat", - "project_name": "Orion-14B-Chat", - "downloads": 1008, + "description": "old? ", + "url": "https://huggingface.co./Lasorco/lametta_old", + "project_name": "lametta_old", + "downloads": 968, "source": "Hugging Face", - "score": -0.090461403480933, - "first_commit": "2024-01-16 06:03:30", - "latest_commit": "2024-04-11 10:48:51", + "score": -0.09233229943126746, + "first_commit": "2023-05-21 11:16:50", + "latest_commit": "2024-07-23 07:24:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Dataset Summary RealPersonaChat は,話者本人のペルソナと性格特性を含む,約14,000件の日本語雑談対話からなるコーパスです.", - "url": "https://huggingface.co./datasets/nu-dialogue/real-persona-chat", - "project_name": "real-persona-chat", - "downloads": 980, + "description": "hubert-base-jtube This repo provides model weights for the hubert-base model trained on the JTubeSpeech corpus. ", + "url": "https://huggingface.co./sarulab-speech/hubert-base-jtube", + "project_name": "hubert-base-jtube", + "downloads": 964, "source": "Hugging Face", - "score": -0.09085732692329657, - "first_commit": "2024-03-09 22:52:22", - "latest_commit": "2024-03-13 10:26:42", + "score": -0.0923907312609594, + "first_commit": "2024-02-02 04:15:22", + "latest_commit": "2024-02-05 11:49:57", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "old? ", - "url": "https://huggingface.co./Lasorco/lametta_old", - "project_name": "lametta_old", - "downloads": 962, + "description": "hotchpotch/japanese-reranker-cross-encoder-base-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-base-v1", + "project_name": "japanese-reranker-cross-encoder-base-v1", + "downloads": 957, "source": "Hugging Face", - "score": -0.09111184913624457, - "first_commit": "2023-05-21 11:16:50", - "latest_commit": "2024-07-23 07:24:33", + "score": -0.0924929869629203, + "first_commit": "2024-03-29 07:07:38", + "latest_commit": "2024-04-01 02:39:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 + }, + { + "description": "Japanese-StableLM-Instruct-Alpha-7B-v2 \"A parrot able to speak Japanese, ukiyoe, edo period\" — Stable Diffusion XL Model Description japanese-stablelm-instruct-alpha-7b-v2 is a 7B parameter decoder-only language models pre-trained built on top of the Japanese-StableLM-Base-Alpha-7B model and further fine-tuned on various instruction-following datasets.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b-v2", + "project_name": "japanese-stablelm-instruct-alpha-7b-v2", + "downloads": 944, + "source": "Hugging Face", + "score": -0.09268289040941911, + "first_commit": "2023-10-06 08:40:24", + "latest_commit": "2023-10-06 08:40:24", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.01 }, { "description": "This is a Japanese translated version of HumanEval, an evaluation harness for the HumanEval problem solving dataset described in the paper \"Evaluating Large Language Models Trained on Code\".", "url": "https://huggingface.co./datasets/kogi-jwu/jhumaneval", "project_name": "jhumaneval", - "downloads": 944, + "downloads": 940, "source": "Hugging Face", - "score": -0.09136637134919257, + "score": -0.09274132223911105, "first_commit": "2023-10-21 08:20:14", "latest_commit": "2024-01-10 21:52:35", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "hotchpotch/japanese-bge-reranker-v2-m3-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-bge-reranker-v2-m3-v1", - "project_name": "japanese-bge-reranker-v2-m3-v1", - "downloads": 940, + "description": "Leia-Swallow-7B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", + "url": "https://huggingface.co./leia-llm/Leia-Swallow-7b", + "project_name": "Leia-Swallow-7b", + "downloads": 938, "source": "Hugging Face", - "score": -0.09142293184095879, - "first_commit": "2024-03-28 20:45:16", - "latest_commit": "2024-04-01 02:40:22", + "score": -0.09277053815395703, + "first_commit": "2024-04-17 07:12:28", + "latest_commit": "2024-04-17 10:29:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.83 }, { - "description": "GitHub リポジトリ ids-cv/wrime で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/wrime-sentiment", - "project_name": "wrime-sentiment", - "downloads": 937, + "description": "ELYZA-japanese-Llama-2-7b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fastのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-fast-gguf", + "downloads": 936, "source": "Hugging Face", - "score": -0.09146535220978345, - "first_commit": "2023-07-29 06:38:26", - "latest_commit": "2023-10-06 00:56:38", + "score": -0.092799754068803, + "first_commit": "2023-08-29 07:23:20", + "latest_commit": "2023-11-16 14:27:36", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 6.85 + }, + { + "description": "このツールは、複数のデータセットを横断して日本語の大規模言語モデルを自動評価するものです.", + "url": "https://github.com/llm-jp/llm-jp-eval", + "project_name": "llm-jp-eval", + "stargazers_count": 84, + "source": "GitHub", + "score": -0.09303730215776614, + "first_commit": "2023-10-19 19:36:10", + "latest_commit": "2024-07-18 21:31:53", + "languages": [ + "Python" + ], + "model_or_dataset": null }, { "description": "stockmark-gpt-neox-japanese-1.4b-gguf stockmarkさんが公開しているgpt-neox-japanese-1.4bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/stockmark-gpt-neox-japanese-1.4b-gguf", "project_name": "stockmark-gpt-neox-japanese-1.4b-gguf", - "downloads": 934, + "downloads": 908, "source": "Hugging Face", - "score": -0.09150777257860813, + "score": -0.0932087768766466, "first_commit": "2023-08-22 12:45:18", "latest_commit": "2023-09-08 22:00:37", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ELYZA-japanese-Llama-2-7b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7b-fastのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-fast-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-fast-gguf", - "downloads": 925, - "source": "Hugging Face", - "score": -0.09163503368508213, - "first_commit": "2023-08-29 07:23:20", - "latest_commit": "2023-11-16 14:27:36", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.41 }, { "description": "japanese-stablelm-2-instruct-1_6b-gguf stabilityaiさんが公開しているjapanese-stablelm-2-instruct-1_6bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/japanese-stablelm-2-instruct-1_6b-gguf", "project_name": "japanese-stablelm-2-instruct-1_6b-gguf", - "downloads": 899, + "downloads": 898, "source": "Hugging Face", - "score": -0.09200267688156258, + "score": -0.09335485645087645, "first_commit": "2024-05-11 07:26:43", "latest_commit": "2024-05-11 09:56:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.64 }, { - "description": "Dataset Preprocessing Supported Tasks and Leaderboards Languages 注釈はすべて日本語を主要言語としています。 ", - "url": "https://huggingface.co./datasets/shunk031/jsnli", - "project_name": "jsnli", - "downloads": 899, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-v0.1", + "project_name": "Swallow-70b-instruct-v0.1", + "downloads": 892, "source": "Hugging Face", - "score": -0.09200267688156258, - "first_commit": "2022-12-01 01:31:32", - "latest_commit": "2022-12-12 16:36:58", + "score": -0.09344250419541436, + "first_commit": "2024-03-06 14:39:34", + "latest_commit": "2024-06-29 09:00:17", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 69.2 }, { - "description": "PLaMo-13B-Instruct Model Description PLaMo-13B-Instruct is an instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", - "url": "https://huggingface.co./pfnet/plamo-13b-instruct", - "project_name": "plamo-13b-instruct", + "description": "Leia-Swallow-13B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", + "url": "https://huggingface.co./leia-llm/Leia-Swallow-13b", + "project_name": "Leia-Swallow-13b", "downloads": 886, "source": "Hugging Face", - "score": -0.09218649847980281, - "first_commit": "2023-10-26 02:11:24", - "latest_commit": "2024-01-25 07:46:09", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "hubert-base-jtube This repo provides model weights for the hubert-base model trained on the JTubeSpeech corpus. ", - "url": "https://huggingface.co./sarulab-speech/hubert-base-jtube", - "project_name": "hubert-base-jtube", - "downloads": 878, - "source": "Hugging Face", - "score": -0.09229961946333526, - "first_commit": "2024-02-02 04:15:22", - "latest_commit": "2024-02-05 11:49:57", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Stockmark-13b-instruct Stockmark-13b-instruct is an instruction-tuned version of Stockmark-13b, a 13 billion parameter Japanese LLM.", - "url": "https://huggingface.co./stockmark/stockmark-13b-instruct", - "project_name": "stockmark-13b-instruct", - "downloads": 871, - "source": "Hugging Face", - "score": -0.09239860032392615, - "first_commit": "2023-11-08 16:56:34", - "latest_commit": "2023-11-08 17:02:17", + "score": -0.09353015193995227, + "first_commit": "2024-04-17 07:32:11", + "latest_commit": "2024-04-18 05:21:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", - "url": "https://huggingface.co./fishaudio/fish-speech-1.2", - "project_name": "fish-speech-1.2", - "downloads": 852, + "description": "Dataset Summary RealPersonaChat は,話者本人のペルソナと性格特性を含む,約14,000件の日本語雑談対話からなるコーパスです.", + "url": "https://huggingface.co./datasets/nu-dialogue/real-persona-chat", + "project_name": "real-persona-chat", + "downloads": 855, "source": "Hugging Face", - "score": -0.0926672626598157, - "first_commit": "2024-07-02 04:24:09", - "latest_commit": "2024-07-02 04:31:26", + "score": -0.09398299862006483, + "first_commit": "2024-03-09 22:52:22", + "latest_commit": "2024-03-13 10:26:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-instruct-v0.1", - "project_name": "Swallow-70b-instruct-v0.1", - "downloads": 829, + "description": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-T2-2B-gemma-2-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", + "project_name": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", + "downloads": 851, "source": "Hugging Face", - "score": -0.0929924854874715, - "first_commit": "2024-03-06 14:39:34", - "latest_commit": "2024-06-29 09:00:17", + "score": -0.09404143044975677, + "first_commit": "2024-08-01 17:32:31", + "latest_commit": "2024-08-01 18:38:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.61 }, { "description": "rinna-llama-3-youko-8b-gguf rinnaさんが公開しているllama-3-youko-8bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/rinna-llama-3-youko-8b-gguf", "project_name": "rinna-llama-3-youko-8b-gguf", - "downloads": 829, + "downloads": 838, "source": "Hugging Face", - "score": -0.0929924854874715, + "score": -0.09423133389625558, "first_commit": "2024-05-01 14:17:53", "latest_commit": "2024-05-01 15:11:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "このツールは、複数のデータセットを横断して日本語の大規模言語モデルを自動評価するものです.", - "url": "https://github.com/llm-jp/llm-jp-eval", - "project_name": "llm-jp-eval", - "stargazers_count": 84, - "source": "GitHub", - "score": -0.0930134236641549, - "first_commit": "2023-10-19 19:36:10", - "latest_commit": "2024-07-18 21:31:53", - "languages": [ - "Python" - ], - "model_or_dataset": null + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf HODACHIさんが公開しているEZO-Common-T2-2B-gemma-2-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", - "project_name": "HODACHI-EZO-Common-T2-2B-gemma-2-it-gguf", - "downloads": 813, + "description": "hotchpotch/japanese-bge-reranker-v2-m3-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-bge-reranker-v2-m3-v1", + "project_name": "japanese-bge-reranker-v2-m3-v1", + "downloads": 833, "source": "Hugging Face", - "score": -0.09321872745453638, - "first_commit": "2024-08-01 17:32:31", - "latest_commit": "2024-08-01 18:38:31", + "score": -0.09430437368337051, + "first_commit": "2024-03-28 20:45:16", + "latest_commit": "2024-04-01 02:40:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.5680000000000001 }, { "description": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf aixsatoshiさんが公開しているLlama-3-8b-Cosmopedia-japaneseのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", "project_name": "aixsatoshi-Llama-3-8b-Cosmopedia-japanese-gguf", - "downloads": 813, + "downloads": 822, "source": "Hugging Face", - "score": -0.09321872745453638, + "score": -0.09446506121502335, "first_commit": "2024-05-01 12:36:43", "latest_commit": "2024-05-19 08:27:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Phi-3-medium-128k-instruct-gguf microsoftさんが公開しているPhi-3-medium-128k-instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Phi-3-medium-128k-instruct-gguf", - "project_name": "Phi-3-medium-128k-instruct-gguf", - "downloads": 807, - "source": "Hugging Face", - "score": -0.09330356819218572, - "first_commit": "2024-05-22 15:27:33", - "latest_commit": "2024-05-22 16:56:55", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", - "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-v0.1", - "project_name": "karakuri-lm-70b-v0.1", - "downloads": 784, + "description": "BERT Base Japanese for Irony", + "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-irony", + "project_name": "bert-base-japanese-sentiment-irony", + "downloads": 820, "source": "Hugging Face", - "score": -0.0936287910198415, - "first_commit": "2024-01-26 10:49:53", - "latest_commit": "2024-05-07 09:00:06", + "score": -0.09449427712986933, + "first_commit": "2022-11-07 06:29:21", + "latest_commit": "2022-11-08 04:23:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "rinna/nekomata-14b Overview We conduct continual pre-training of qwen-14b on 66B tokens from a mixture of Japanese and English datasets.", - "url": "https://huggingface.co./rinna/nekomata-14b", - "project_name": "nekomata-14b", + "description": "recruit-jp/japanese-clip-vit-b-32-roberta-base Overview Developed by: Recruit Co.", + "url": "https://huggingface.co./recruit-jp/japanese-clip-vit-b-32-roberta-base", + "project_name": "japanese-clip-vit-b-32-roberta-base", "downloads": 781, "source": "Hugging Face", - "score": -0.09367121138866617, - "first_commit": "2023-12-19 08:09:53", - "latest_commit": "2024-07-22 07:58:40", + "score": -0.09506398746936576, + "first_commit": "2023-12-20 06:06:12", + "latest_commit": "2024-01-22 07:41:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.198 }, { "description": "(简体中文|English|日本語) Introduction github repo : https://github.com/FunAudioLLM/SenseVoice SenseVoice is a speech foundation model with multiple speech understanding capabilities, including automatic speech recognition (ASR), spoken language identification (LID), speech emotion recognition (SER), and audio event detection (AED).", "url": "https://huggingface.co./FunAudioLLM/SenseVoiceSmall", "project_name": "SenseVoiceSmall", - "downloads": 777, + "downloads": 756, "source": "Hugging Face", - "score": -0.0937277718804324, + "score": -0.0954291864049404, "first_commit": "2024-07-03 03:56:49", "latest_commit": "2024-07-31 05:47:48", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "bilingual-gpt-neox-4b-8k Overview Notice: This model requires transformers>=4.31.0 to work properly.", - "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-8k", - "project_name": "bilingual-gpt-neox-4b-8k", - "downloads": 776, - "source": "Hugging Face", - "score": -0.09374191200337395, - "first_commit": "2023-07-31 02:34:21", - "latest_commit": "2024-07-20 08:03:16", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "hubert-base-asr", - "url": "https://huggingface.co./TKU410410103/hubert-base-japanese-asr", - "project_name": "hubert-base-japanese-asr", - "downloads": 774, - "source": "Hugging Face", - "score": -0.09377019224925706, - "first_commit": "2024-04-09 06:01:43", - "latest_commit": "2024-04-14 13:20:43", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-large-lm-1.7b This repository provides a 1.7B parameters Japanese language model, trained by LINE Corporation.", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b", - "project_name": "japanese-large-lm-1.7b", - "downloads": 769, + "description": "Phi-3-medium-128k-instruct-gguf microsoftさんが公開しているPhi-3-medium-128k-instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Phi-3-medium-128k-instruct-gguf", + "project_name": "Phi-3-medium-128k-instruct-gguf", + "downloads": 756, "source": "Hugging Face", - "score": -0.09384089286396484, - "first_commit": "2023-07-21 00:46:33", - "latest_commit": "2023-08-17 01:06:37", + "score": -0.0954291864049404, + "first_commit": "2024-05-22 15:27:33", + "latest_commit": "2024-05-22 16:56:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.0 }, { "description": "shisa-base-7b-v1 shisa-base-7b-v1 takes Mistral 7B and adds an additional 8B tokens of primarily Japanese pre-training.", "url": "https://huggingface.co./augmxnt/shisa-base-7b-v1", "project_name": "shisa-base-7b-v1", - "downloads": 768, + "downloads": 754, "source": "Hugging Face", - "score": -0.0938550329869064, + "score": -0.09545840231978638, "first_commit": "2023-11-19 09:44:36", "latest_commit": "2023-12-09 10:34:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.96 }, { "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", "url": "https://huggingface.co./augmxnt/shisa-7b-v1", "project_name": "shisa-7b-v1", - "downloads": 765, + "downloads": 753, "source": "Hugging Face", - "score": -0.09389745335573106, + "score": -0.09547301027720936, "first_commit": "2023-11-27 17:55:31", "latest_commit": "2023-12-20 18:11:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.96 }, { - "description": "BERT Base Japanese for Irony", - "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-irony", - "project_name": "bert-base-japanese-sentiment-irony", - "downloads": 764, + "description": "bilingual-gpt-neox-4b-8k Overview Notice: This model requires transformers>=4.31.0 to work properly.", + "url": "https://huggingface.co./rinna/bilingual-gpt-neox-4b-8k", + "project_name": "bilingual-gpt-neox-4b-8k", + "downloads": 753, "source": "Hugging Face", - "score": -0.09391159347867262, - "first_commit": "2022-11-07 06:29:21", - "latest_commit": "2022-11-08 04:23:27", + "score": -0.09547301027720936, + "first_commit": "2023-07-31 02:34:21", + "latest_commit": "2024-07-20 08:03:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.95 }, { "description": "Japanese StableLM-3B-4E1T Instruct Model Description", "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-instruct", "project_name": "japanese-stablelm-3b-4e1t-instruct", - "downloads": 757, + "downloads": 751, "source": "Hugging Face", - "score": -0.0940105743392635, + "score": -0.09550222619205533, "first_commit": "2023-10-16 07:50:31", "latest_commit": "2024-04-26 03:20:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.8 + }, + { + "description": "hubert-base-asr", + "url": "https://huggingface.co./TKU410410103/hubert-base-japanese-asr", + "project_name": "hubert-base-japanese-asr", + "downloads": 748, + "source": "Hugging Face", + "score": -0.09554605006432429, + "first_commit": "2024-04-09 06:01:43", + "latest_commit": "2024-04-14 13:20:43", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.0945 }, { "description": "Model Card for Model ID Original model elyza/ELYZA-japanese-Llama-2-7b-fast-instruct which is based on Meta's \"Llama 2\" and has undergone additional pre-training in Japanese, and thier original post-training and speed up tuning.", "url": "https://huggingface.co./dahara1/ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", "project_name": "ELYZA-japanese-Llama-2-7b-fast-instruct-GPTQ", - "downloads": 756, + "downloads": 740, "source": "Hugging Face", - "score": -0.09402471446220506, + "score": -0.09566291372370818, "first_commit": "2023-08-30 09:18:50", "latest_commit": "2023-11-14 00:10:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.24 }, { - "description": "Japanese StableLM-3B-4E1T Base Model Description This is a 3B-parameter decoder-only language model with a focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-base", - "project_name": "japanese-stablelm-3b-4e1t-base", - "downloads": 754, + "description": "weblab-10b-instruction-sft-GPTQ Original model weblab-10b-instruction-sft which is a Japanese-centric multilingual GPT-NeoX model of 10 billion parameters created by matsuo-lab Takeshi Kojima.", + "url": "https://huggingface.co./dahara1/weblab-10b-instruction-sft-GPTQ", + "project_name": "weblab-10b-instruction-sft-GPTQ", + "downloads": 728, "source": "Hugging Face", - "score": -0.09405299470808817, - "first_commit": "2023-10-16 06:04:58", - "latest_commit": "2024-04-26 03:20:34", + "score": -0.09583820921278399, + "first_commit": "2023-08-21 05:45:35", + "latest_commit": "2023-11-14 00:24:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.86 }, { - "description": "recruit-jp/japanese-clip-vit-b-32-roberta-base Overview Developed by: Recruit Co.", - "url": "https://huggingface.co./recruit-jp/japanese-clip-vit-b-32-roberta-base", - "project_name": "japanese-clip-vit-b-32-roberta-base", - "downloads": 750, + "description": "Fish Speech V1.2 Fish Speech V1.2 is a leading text-to-speech (TTS) model trained on 300k hours of English, Chinese, and Japanese audio data.", + "url": "https://huggingface.co./fishaudio/fish-speech-1.2", + "project_name": "fish-speech-1.2", + "downloads": 724, "source": "Hugging Face", - "score": -0.0941095551998544, - "first_commit": "2023-12-20 06:06:12", - "latest_commit": "2024-01-22 07:41:59", + "score": -0.09589664104247594, + "first_commit": "2024-07-02 04:24:09", + "latest_commit": "2024-07-02 04:31:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-hf", - "project_name": "Swallow-70b-NVE-hf", - "downloads": 746, + "description": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf umiyukiさんが公開しているJapanese-Chat-Umievo-itr001-7bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", + "project_name": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", + "downloads": 724, "source": "Hugging Face", - "score": -0.09416611569162063, - "first_commit": "2023-12-07 07:34:35", - "latest_commit": "2024-06-29 08:56:25", + "score": -0.09589664104247594, + "first_commit": "2024-04-27 09:55:39", + "latest_commit": "2024-04-27 10:52:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "japanese-gpt-neox-3.6b-instruction-sft-v2 Overview", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft-v2", - "project_name": "japanese-gpt-neox-3.6b-instruction-sft-v2", - "downloads": 745, + "description": "Ninja-v1-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Ninja-v1-gguf", + "project_name": "Ninja-v1-gguf", + "downloads": 721, "source": "Hugging Face", - "score": -0.09418025581456219, - "first_commit": "2023-05-30 01:50:25", - "latest_commit": "2024-07-20 07:57:35", + "score": -0.0959404649147449, + "first_commit": "2024-05-03 14:03:22", + "latest_commit": "2024-05-04 13:26:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "weblab-10b-instruction-sft-GPTQ Original model weblab-10b-instruction-sft which is a Japanese-centric multilingual GPT-NeoX model of 10 billion parameters created by matsuo-lab Takeshi Kojima.", - "url": "https://huggingface.co./dahara1/weblab-10b-instruction-sft-GPTQ", - "project_name": "weblab-10b-instruction-sft-GPTQ", - "downloads": 743, + "description": "Please feel free to open an issue or pull request. ", + "url": "https://huggingface.co./datasets/kumapo/JAQKET", + "project_name": "JAQKET", + "downloads": 721, "source": "Hugging Face", - "score": -0.0942085360604453, - "first_commit": "2023-08-21 05:45:35", - "latest_commit": "2023-11-14 00:24:22", + "score": -0.0959404649147449, + "first_commit": "2023-06-21 13:04:38", + "latest_commit": "2023-10-09 06:44:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf umiyukiさんが公開しているJapanese-Chat-Umievo-itr001-7bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", - "project_name": "umiyuki-Japanese-Chat-Umievo-itr001-7b-gguf", - "downloads": 737, + "description": "※llama.cpp Releases b3428(7/21)", + "url": "https://huggingface.co./MCZK/EZO-Common-9B-gemma-2-it-GGUF", + "project_name": "EZO-Common-9B-gemma-2-it-GGUF", + "downloads": 720, "source": "Hugging Face", - "score": -0.09429337679809463, - "first_commit": "2024-04-27 09:55:39", - "latest_commit": "2024-04-27 10:52:17", + "score": -0.09595507287216788, + "first_commit": "2024-07-10 11:12:59", + "latest_commit": "2024-07-21 11:26:08", "languages": [], + "model_or_dataset": "model", + "model_size": 9.24 + }, + { + "description": "llm-lora-classification", + "url": "https://github.com/hppRC/llm-lora-classification", + "project_name": "llm-lora-classification", + "stargazers_count": 83, + "source": "GitHub", + "score": -0.09596034265548843, + "first_commit": "2023-07-17 12:42:57", + "latest_commit": "2023-07-22 19:46:45", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, + { + "description": "Word2vec (word to vectors) approach for Japanese language using Gensim and Mecab.", + "url": "https://github.com/philipperemy/japanese-words-to-vectors", + "project_name": "japanese-words-to-vectors", + "stargazers_count": 83, + "source": "GitHub", + "score": -0.09596034265548843, + "first_commit": "2016-09-04 09:43:00", + "latest_commit": "2020-08-09 19:48:23", + "languages": [ + "Python" + ], "model_or_dataset": "model" }, { - "description": "Japanese-StableLM-Instruct-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-7b is a 7B-parameter decoder-only language model based on", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-7b", - "project_name": "japanese-stablelm-instruct-beta-7b", - "downloads": 728, + "description": "Parakeet TDT-CTC 0.6B (ja) | | parakeet-tdt_ctc-0.6b-ja is an ASR model that transcribes Japanese speech with Punctuations.", + "url": "https://huggingface.co./nvidia/parakeet-tdt_ctc-0.6b-ja", + "project_name": "parakeet-tdt_ctc-0.6b-ja", + "downloads": 718, "source": "Hugging Face", - "score": -0.09442063790456863, - "first_commit": "2023-10-30 07:47:09", - "latest_commit": "2023-12-19 06:43:49", + "score": -0.09598428878701386, + "first_commit": "2024-05-13 15:39:30", + "latest_commit": "2024-05-17 17:20:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-long", - "project_name": "t5-base-long", - "downloads": 723, + "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", + "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-v0.1", + "project_name": "karakuri-lm-70b-v0.1", + "downloads": 715, "source": "Hugging Face", - "score": -0.09449133851927641, - "first_commit": "2023-04-26 08:30:59", - "latest_commit": "2023-05-10 10:00:00", + "score": -0.09602811265928281, + "first_commit": "2024-01-26 10:49:53", + "latest_commit": "2024-05-07 09:00:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.2 + }, + { + "description": "japanese-large-lm-1.7b This repository provides a 1.7B parameters Japanese language model, trained by LINE Corporation.", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b", + "project_name": "japanese-large-lm-1.7b", + "downloads": 710, + "source": "Hugging Face", + "score": -0.09610115244639773, + "first_commit": "2023-07-21 00:46:33", + "latest_commit": "2023-08-17 01:06:37", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.75 + }, + { + "description": "japanese-gpt-neox-3.6b-instruction-sft-v2 Overview", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-3.6b-instruction-sft-v2", + "project_name": "japanese-gpt-neox-3.6b-instruction-sft-v2", + "downloads": 706, + "source": "Hugging Face", + "score": -0.09615958427608969, + "first_commit": "2023-05-30 01:50:25", + "latest_commit": "2024-07-20 07:57:35", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.76 + }, + { + "description": "lightblue-suzume-llama-3-8B-multilingual-gguf lightblueさんが公開しているsuzume-llama-3-8B-multilingualのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-multilingual-gguf", + "project_name": "lightblue-suzume-llama-3-8B-multilingual-gguf", + "downloads": 703, + "source": "Hugging Face", + "score": -0.09620340814835864, + "first_commit": "2024-05-06 16:31:55", + "latest_commit": "2024-05-07 12:59:57", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "bert-base-japanese-v3-unsup-simcse-jawiki 「大規模言語モデル入門」の第8章で紹介している教師なしSimCSEのモデルです。 ", "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-unsup-simcse-jawiki", "project_name": "bert-base-japanese-v3-unsup-simcse-jawiki", - "downloads": 720, + "downloads": 701, "source": "Hugging Face", - "score": -0.09453375888810107, + "score": -0.09623262406320461, "first_commit": "2023-06-21 10:52:27", "latest_commit": "2023-07-24 07:07:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-StableLM-Base-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-ja_vocab-beta-7b", - "project_name": "japanese-stablelm-base-ja_vocab-beta-7b", - "downloads": 719, + "description": "Dataset Preprocessing Supported Tasks and Leaderboards Languages 注釈はすべて日本語を主要言語としています。 ", + "url": "https://huggingface.co./datasets/shunk031/jsnli", + "project_name": "jsnli", + "downloads": 689, "source": "Hugging Face", - "score": -0.09454789901104263, - "first_commit": "2023-10-30 07:49:15", - "latest_commit": "2023-12-19 06:45:58", + "score": -0.09640791955228044, + "first_commit": "2022-12-01 01:31:32", + "latest_commit": "2022-12-12 16:36:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Sarashina1-65B", - "url": "https://huggingface.co./sbintuitions/sarashina1-65b", - "project_name": "sarashina1-65b", - "downloads": 716, + "description": "Japanese-StableLM-Instruct-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-beta-7b is a 7B-parameter decoder-only language model based on", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-beta-7b", + "project_name": "japanese-stablelm-instruct-beta-7b", + "downloads": 686, "source": "Hugging Face", - "score": -0.0945903193798673, - "first_commit": "2024-06-07 11:57:56", - "latest_commit": "2024-06-27 06:56:36", + "score": -0.09645174342454939, + "first_commit": "2023-10-30 07:47:09", + "latest_commit": "2023-12-19 06:43:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "lightblue-suzume-llama-3-8B-multilingual-gguf lightblueさんが公開しているsuzume-llama-3-8B-multilingualのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-multilingual-gguf", - "project_name": "lightblue-suzume-llama-3-8B-multilingual-gguf", - "downloads": 714, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base", + "project_name": "Orion-14B-Base", + "downloads": 684, "source": "Hugging Face", - "score": -0.09461859962575041, - "first_commit": "2024-05-06 16:31:55", - "latest_commit": "2024-05-07 12:59:57", + "score": -0.09648095933939536, + "first_commit": "2024-01-16 06:07:42", + "latest_commit": "2024-03-26 09:21:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "ELYZA-japanese-Llama-2-13b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fastのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-gguf", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-gguf", + "downloads": 674, + "source": "Hugging Face", + "score": -0.09662703891362522, + "first_commit": "2023-12-27 10:40:52", + "latest_commit": "2023-12-27 13:18:46", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "KARAKURI LM KARAKURI LM is a pretrained language model that builds upon Llama 2.", "url": "https://huggingface.co./karakuri-ai/karakuri-lm-70b-chat-v0.1", "project_name": "karakuri-lm-70b-chat-v0.1", - "downloads": 710, + "downloads": 673, "source": "Hugging Face", - "score": -0.09467516011751663, + "score": -0.0966416468710482, "first_commit": "2024-01-26 09:08:09", "latest_commit": "2024-05-07 09:00:17", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "gpt2-large-japanese This repository provides a large sized Japanese GPT-2 model.", - "url": "https://huggingface.co./abeja/gpt2-large-japanese", - "project_name": "gpt2-large-japanese", - "downloads": 708, - "source": "Hugging Face", - "score": -0.09470344036339974, - "first_commit": "2022-08-29 05:17:36", - "latest_commit": "2022-08-29 16:10:11", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.2 }, { "description": "Polyglot-math-4x7b-24b Polyglot-4x7b is a Mixture of Experts approach to a multilingual model.", "url": "https://huggingface.co./macadeliccc/polyglot-math-4x7b", "project_name": "polyglot-math-4x7b", - "downloads": 695, + "downloads": 672, "source": "Hugging Face", - "score": -0.09488726196163998, + "score": -0.09665625482847119, "first_commit": "2024-01-13 03:05:44", "latest_commit": "2024-03-04 19:25:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 24.2 }, { - "description": "Ninja-v1-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Ninja-v1-gguf", - "project_name": "Ninja-v1-gguf", - "downloads": 694, + "description": "JaCWIR: Japanese Casual Web IR - 日本語情報検索評価のための小規模でカジュアルなWebタイトルと概要のデータセット 近年、大規模言語モデル(LLM)の台頭により、一般的な日本語を用いた自然な検索クエリで質問するユースケースが増えています。", + "url": "https://huggingface.co./datasets/hotchpotch/JaCWIR", + "project_name": "JaCWIR", + "downloads": 672, "source": "Hugging Face", - "score": -0.09490140208458153, - "first_commit": "2024-05-03 14:03:22", - "latest_commit": "2024-05-04 13:26:22", + "score": -0.09665625482847119, + "first_commit": "2024-03-23 05:57:58", + "latest_commit": "2024-04-01 02:34:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "PLaMo-13B-Instruct-NC Model Description PLaMo-13B-Instruct-NC is a noncommercial instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", - "url": "https://huggingface.co./pfnet/plamo-13b-instruct-nc", - "project_name": "plamo-13b-instruct-nc", - "downloads": 694, + "description": "hotchpotch/japanese-reranker-cross-encoder-small-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", + "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-small-v1", + "project_name": "japanese-reranker-cross-encoder-small-v1", + "downloads": 670, "source": "Hugging Face", - "score": -0.09490140208458153, - "first_commit": "2023-10-26 05:36:26", - "latest_commit": "2024-01-25 07:46:45", + "score": -0.09668547074331717, + "first_commit": "2024-03-28 04:31:45", + "latest_commit": "2024-04-01 02:39:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.11800000000000001 }, { - "description": "※llama.cpp Releases b3428(7/21)", - "url": "https://huggingface.co./MCZK/EZO-Common-9B-gemma-2-it-GGUF", - "project_name": "EZO-Common-9B-gemma-2-it-GGUF", - "downloads": 691, + "description": "ELYZA-japanese-Llama-2-7b-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-gguf", + "project_name": "ELYZA-japanese-Llama-2-7b-gguf", + "downloads": 669, "source": "Hugging Face", - "score": -0.0949438224534062, - "first_commit": "2024-07-10 11:12:59", - "latest_commit": "2024-07-21 11:26:08", + "score": -0.09670007870074016, + "first_commit": "2023-08-29 06:32:01", + "latest_commit": "2023-11-16 14:27:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GGUF", - "project_name": "japanese-stablelm-instruct-gamma-7B-GGUF", - "downloads": 675, + "description": "rinna/nekomata-14b Overview We conduct continual pre-training of qwen-14b on 66B tokens from a mixture of Japanese and English datasets.", + "url": "https://huggingface.co./rinna/nekomata-14b", + "project_name": "nekomata-14b", + "downloads": 667, "source": "Hugging Face", - "score": -0.09517006442047109, - "first_commit": "2023-10-28 19:03:17", - "latest_commit": "2023-10-28 19:07:41", + "score": -0.09672929461558612, + "first_commit": "2023-12-19 08:09:53", + "latest_commit": "2024-07-22 07:58:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.2 }, { - "description": "japanese-large-lm-1.7b-instruction-sft This repository provides a 1.7B parameters Japanese language model, fine-tuned and trained by LINE Corporation.", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft", - "project_name": "japanese-large-lm-1.7b-instruction-sft", - "downloads": 674, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-hf", + "project_name": "Swallow-70b-NVE-hf", + "downloads": 667, "source": "Hugging Face", - "score": -0.09518420454341264, - "first_commit": "2023-08-14 17:19:11", - "latest_commit": "2023-08-14 17:19:11", + "score": -0.09672929461558612, + "first_commit": "2023-12-07 07:34:35", + "latest_commit": "2024-06-29 08:56:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base", - "project_name": "Orion-14B-Base", - "downloads": 670, + "description": "DataPilot-ArrowPro-7B-KUJIRA-gguf DataPilotさんが公開しているArrowPro-7B-KUJIRAのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-KUJIRA-gguf", + "project_name": "DataPilot-ArrowPro-7B-KUJIRA-gguf", + "downloads": 666, "source": "Hugging Face", - "score": -0.09524076503517886, - "first_commit": "2024-01-16 06:07:42", - "latest_commit": "2024-03-26 09:21:52", + "score": -0.0967439025730091, + "first_commit": "2024-05-09 13:21:27", + "latest_commit": "2024-05-11 07:24:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-hf", - "project_name": "Swallow-7b-NVE-hf", - "downloads": 668, + "description": "Japanese StableLM-3B-4E1T Base Model Description This is a 3B-parameter decoder-only language model with a focus on maximizing Japanese language modeling performance and Japanese downstream task performance.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-3b-4e1t-base", + "project_name": "japanese-stablelm-3b-4e1t-base", + "downloads": 663, "source": "Hugging Face", - "score": -0.09526904528106198, - "first_commit": "2023-11-30 09:02:26", - "latest_commit": "2024-06-29 08:56:18", + "score": -0.09678772644527807, + "first_commit": "2023-10-16 06:04:58", + "latest_commit": "2024-04-26 03:20:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.8 }, { - "description": "Meta-Llama-3-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3-8B-Instructのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Meta-Llama-3-8B-Instruct-gguf", - "project_name": "Meta-Llama-3-8B-Instruct-gguf", - "downloads": 668, + "description": "※llama.cpp Releases b3428(7/21)", + "url": "https://huggingface.co./MCZK/EZO-Humanities-9B-gemma-2-it-GGUF", + "project_name": "EZO-Humanities-9B-gemma-2-it-GGUF", + "downloads": 663, "source": "Hugging Face", - "score": -0.09526904528106198, - "first_commit": "2024-05-12 07:18:00", - "latest_commit": "2024-05-12 08:08:38", + "score": -0.09678772644527807, + "first_commit": "2024-07-10 22:02:03", + "latest_commit": "2024-07-21 18:11:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.24 }, { - "description": "Please feel free to open an issue or pull request. ", - "url": "https://huggingface.co./datasets/kumapo/JAQKET", - "project_name": "JAQKET", - "downloads": 668, + "description": "hubert-large-asr", + "url": "https://huggingface.co./TKU410410103/hubert-large-japanese-asr", + "project_name": "hubert-large-japanese-asr", + "downloads": 661, "source": "Hugging Face", - "score": -0.09526904528106198, - "first_commit": "2023-06-21 13:04:38", - "latest_commit": "2023-10-09 06:44:28", + "score": -0.09681694236012403, + "first_commit": "2024-04-09 03:01:08", + "latest_commit": "2024-04-14 13:21:01", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.316 }, { - "description": "JaCWIR: Japanese Casual Web IR - 日本語情報検索評価のための小規模でカジュアルなWebタイトルと概要のデータセット 近年、大規模言語モデル(LLM)の台頭により、一般的な日本語を用いた自然な検索クエリで質問するユースケースが増えています。", - "url": "https://huggingface.co./datasets/hotchpotch/JaCWIR", - "project_name": "JaCWIR", - "downloads": 667, + "description": "gpt2-large-japanese This repository provides a large sized Japanese GPT-2 model.", + "url": "https://huggingface.co./abeja/gpt2-large-japanese", + "project_name": "gpt2-large-japanese", + "downloads": 657, "source": "Hugging Face", - "score": -0.09528318540400353, - "first_commit": "2024-03-23 05:57:58", - "latest_commit": "2024-04-01 02:34:34", + "score": -0.09687537418981598, + "first_commit": "2022-08-29 05:17:36", + "latest_commit": "2022-08-29 16:10:11", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "※llama.cpp Releases b3428(7/21)", - "url": "https://huggingface.co./MCZK/EZO-Humanities-9B-gemma-2-it-GGUF", - "project_name": "EZO-Humanities-9B-gemma-2-it-GGUF", - "downloads": 664, + "description": "Meta-Llama-3-8B-Instruct-gguf meta-llamaさんが公開しているMeta-Llama-3-8B-Instructのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Meta-Llama-3-8B-Instruct-gguf", + "project_name": "Meta-Llama-3-8B-Instruct-gguf", + "downloads": 656, "source": "Hugging Face", - "score": -0.0953256057728282, - "first_commit": "2024-07-10 22:02:03", - "latest_commit": "2024-07-21 18:11:21", + "score": -0.09688998214723896, + "first_commit": "2024-05-12 07:18:00", + "latest_commit": "2024-05-12 08:08:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "lightblue-suzume-llama-3-8B-japanese-gguf lightblueさんが公開しているsuzume-llama-3-8B-japaneseのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/lightblue-suzume-llama-3-8B-japanese-gguf", "project_name": "lightblue-suzume-llama-3-8B-japanese-gguf", - "downloads": 661, + "downloads": 656, "source": "Hugging Face", - "score": -0.09536802614165286, + "score": -0.09688998214723896, "first_commit": "2024-04-23 13:30:08", "latest_commit": "2024-05-07 12:58:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "DataPilot-ArrowPro-7B-KUJIRA-gguf DataPilotさんが公開しているArrowPro-7B-KUJIRAのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-KUJIRA-gguf", - "project_name": "DataPilot-ArrowPro-7B-KUJIRA-gguf", - "downloads": 660, + "description": "aya-23-35B-gguf CohereForAIさんが公開しているaya-23-35Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aya-23-35B-gguf", + "project_name": "aya-23-35B-gguf", + "downloads": 650, "source": "Hugging Face", - "score": -0.09538216626459442, - "first_commit": "2024-05-09 13:21:27", - "latest_commit": "2024-05-11 07:24:16", + "score": -0.09697762989177687, + "first_commit": "2024-05-26 16:32:27", + "latest_commit": "2024-05-27 00:47:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 35.0 }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-instruct-hf", - "project_name": "Swallow-70b-NVE-instruct-hf", - "downloads": 660, + "description": "PLaMo-13B-Instruct-NC Model Description PLaMo-13B-Instruct-NC is a noncommercial instruct fine-tuned model built upon the 8192 context length version of PLaMo-13B text generation model.", + "url": "https://huggingface.co./pfnet/plamo-13b-instruct-nc", + "project_name": "plamo-13b-instruct-nc", + "downloads": 646, "source": "Hugging Face", - "score": -0.09538216626459442, - "first_commit": "2023-12-13 03:56:30", - "latest_commit": "2024-07-06 15:18:24", + "score": -0.09703606172146882, + "first_commit": "2023-10-26 05:36:26", + "latest_commit": "2024-01-25 07:46:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "hubert-large-asr", - "url": "https://huggingface.co./TKU410410103/hubert-large-japanese-asr", - "project_name": "hubert-large-japanese-asr", - "downloads": 659, + "description": "luke-japanese-large-lite luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-large-lite", + "project_name": "luke-japanese-large-lite", + "downloads": 645, "source": "Hugging Face", - "score": -0.09539630638753598, - "first_commit": "2024-04-09 03:01:08", - "latest_commit": "2024-04-14 13:21:01", + "score": -0.09705066967889181, + "first_commit": "2022-11-07 14:26:40", + "latest_commit": "2022-11-09 11:19:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Japanese character-level GPT-2 Small Model description This is a Japanese character-level GPT-2 Small (90M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/gpt2-small-japanese-char", - "project_name": "gpt2-small-japanese-char", - "downloads": 657, + "description": "Japanese-StableLM-Base-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-base-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on Llama-2-7b that has been fine-tuned on a diverse collection of Japanese data, with the intent of maximizing downstream performance on Japanese language tasks.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-base-ja_vocab-beta-7b", + "project_name": "japanese-stablelm-base-ja_vocab-beta-7b", + "downloads": 642, "source": "Hugging Face", - "score": -0.09542458663341909, - "first_commit": "2023-04-18 08:24:55", - "latest_commit": "2023-05-08 10:08:13", + "score": -0.09709449355116076, + "first_commit": "2023-10-30 07:49:15", + "latest_commit": "2023-12-19 06:45:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.88 }, { - "description": "ELYZA-japanese-Llama-2-7b-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-7bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-7b-gguf", - "project_name": "ELYZA-japanese-Llama-2-7b-gguf", - "downloads": 656, + "description": "japanese-gpt-neox-small This repository provides a small-sized Japanese GPT-NeoX model.", + "url": "https://huggingface.co./rinna/japanese-gpt-neox-small", + "project_name": "japanese-gpt-neox-small", + "downloads": 636, "source": "Hugging Face", - "score": -0.09543872675636064, - "first_commit": "2023-08-29 06:32:01", - "latest_commit": "2023-11-16 14:27:12", + "score": -0.09718214129569867, + "first_commit": "2022-08-31 05:58:25", + "latest_commit": "2024-07-20 07:53:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.20400000000000001 }, { - "description": "Parakeet TDT-CTC 0.6B (ja) | | parakeet-tdt_ctc-0.6b-ja is an ASR model that transcribes Japanese speech with Punctuations.", - "url": "https://huggingface.co./nvidia/parakeet-tdt_ctc-0.6b-ja", - "project_name": "parakeet-tdt_ctc-0.6b-ja", - "downloads": 655, + "description": "Sarashina1-65B", + "url": "https://huggingface.co./sbintuitions/sarashina1-65b", + "project_name": "sarashina1-65b", + "downloads": 630, "source": "Hugging Face", - "score": -0.09545286687930221, - "first_commit": "2024-05-13 15:39:30", - "latest_commit": "2024-05-17 17:20:17", + "score": -0.09726978904023659, + "first_commit": "2024-06-07 11:57:56", + "latest_commit": "2024-06-27 06:56:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "hotchpotch/japanese-reranker-cross-encoder-small-v1 日本語で学習させた Reranker (CrossEncoder) シリーズです。 ", - "url": "https://huggingface.co./hotchpotch/japanese-reranker-cross-encoder-small-v1", - "project_name": "japanese-reranker-cross-encoder-small-v1", - "downloads": 653, + "description": "HODACHI様の Llama-3.1-8B-EZO-1.1-it をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3.1-8B-EZO-1.1-it-GGUF", + "project_name": "Llama-3.1-8B-EZO-1.1-it-GGUF", + "downloads": 630, "source": "Hugging Face", - "score": -0.09548114712518532, - "first_commit": "2024-03-28 04:31:45", - "latest_commit": "2024-04-01 02:39:19", + "score": -0.09726978904023659, + "first_commit": "2024-07-31 12:12:01", + "latest_commit": "2024-07-31 18:13:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "aya-23-35B-gguf CohereForAIさんが公開しているaya-23-35Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aya-23-35B-gguf", - "project_name": "aya-23-35B-gguf", - "downloads": 641, + "description": "japanese-large-lm-1.7b-instruction-sft This repository provides a 1.7B parameters Japanese language model, fine-tuned and trained by LINE Corporation.", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft", + "project_name": "japanese-large-lm-1.7b-instruction-sft", + "downloads": 628, "source": "Hugging Face", - "score": -0.09565082860048399, - "first_commit": "2024-05-26 16:32:27", - "latest_commit": "2024-05-27 00:47:56", + "score": -0.09729900495508256, + "first_commit": "2023-08-14 17:19:11", + "latest_commit": "2023-08-14 17:19:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-StableLM-Instruct-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on japanese-stablelm-ja_vocab-beta-7b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-ja_vocab-beta-7b", - "project_name": "japanese-stablelm-instruct-ja_vocab-beta-7b", - "downloads": 640, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-70b-NVE-instruct-hf", + "project_name": "Swallow-70b-NVE-instruct-hf", + "downloads": 621, "source": "Hugging Face", - "score": -0.09566496872342554, - "first_commit": "2023-10-30 07:49:38", - "latest_commit": "2023-12-19 06:46:01", + "score": -0.09740126065704345, + "first_commit": "2023-12-13 03:56:30", + "latest_commit": "2024-07-06 15:18:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.0 + }, + { + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-7b-NVE-hf", + "project_name": "Swallow-7b-NVE-hf", + "downloads": 619, + "source": "Hugging Face", + "score": -0.09743047657188943, + "first_commit": "2023-11-30 09:02:26", + "latest_commit": "2024-06-29 08:56:18", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF This is quantized version of DataPilot/Llama3.1-ArrowSE-v0.4 created using llama.cpp Original Model Card 概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", + "url": "https://huggingface.co./QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF", + "project_name": "Llama3.1-ArrowSE-v0.4-GGUF", + "downloads": 606, + "source": "Hugging Face", + "score": -0.09762038001838824, + "first_commit": "2024-07-28 06:17:48", + "latest_commit": "2024-07-28 06:57:40", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "uniTKU-hubert-japanese-asr", + "url": "https://huggingface.co./TKU410410103/uniTKU-hubert-japanese-asr", + "project_name": "uniTKU-hubert-japanese-asr", + "downloads": 602, + "source": "Hugging Face", + "score": -0.09767881184808018, + "first_commit": "2024-04-20 14:59:52", + "latest_commit": "2024-04-22 18:37:33", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.0945 }, { "description": "JQaRA : Japanese Question Answering with Retrieval Augmentation - 検索拡張(RAG)評価のための日本語 Q&A データセット 高性能な LLM の台頭に伴い、LLM を用いた質疑応答のユースケースが増加しています。", "url": "https://huggingface.co./datasets/hotchpotch/JQaRA", "project_name": "JQaRA", - "downloads": 630, + "downloads": 602, "source": "Hugging Face", - "score": -0.0958063699528411, + "score": -0.09767881184808018, "first_commit": "2024-03-03 01:58:34", "latest_commit": "2024-08-10 02:56:05", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "japanese-gpt-neox-small This repository provides a small-sized Japanese GPT-NeoX model.", - "url": "https://huggingface.co./rinna/japanese-gpt-neox-small", - "project_name": "japanese-gpt-neox-small", - "downloads": 628, + "description": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf pfnetさんが公開しているLlama3-Preferred-MedSwallow-70Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-Llama3-Preferred-MedSwallow-70B-gguf", + "project_name": "pfnet-Llama3-Preferred-MedSwallow-70B-gguf", + "downloads": 586, "source": "Hugging Face", - "score": -0.09583465019872421, - "first_commit": "2022-08-31 05:58:25", - "latest_commit": "2024-07-20 07:53:40", + "score": -0.09791253916684796, + "first_commit": "2024-07-18 15:45:16", + "latest_commit": "2024-07-19 09:14:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "This repository is publicly accessible, but you have to accept the conditions to access its files and content.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b", - "project_name": "japanese-stablelm-instruct-alpha-7b", - "downloads": 622, + "description": "Sarashina1-13B", + "url": "https://huggingface.co./sbintuitions/sarashina1-13b", + "project_name": "sarashina1-13b", + "downloads": 583, "source": "Hugging Face", - "score": -0.09591949093637354, - "first_commit": null, - "latest_commit": null, + "score": -0.09795636303911691, + "first_commit": "2024-06-07 11:56:53", + "latest_commit": "2024-06-27 06:56:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "llm-lora-classification", - "url": "https://github.com/hppRC/llm-lora-classification", - "project_name": "llm-lora-classification", - "stargazers_count": 83, - "source": "GitHub", - "score": -0.0959315296250411, - "first_commit": "2023-07-17 12:42:57", - "latest_commit": "2023-07-22 19:46:45", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, - { - "description": "Word2vec (word to vectors) approach for Japanese language using Gensim and Mecab.", - "url": "https://github.com/philipperemy/japanese-words-to-vectors", - "project_name": "japanese-words-to-vectors", - "stargazers_count": 83, - "source": "GitHub", - "score": -0.0959315296250411, - "first_commit": "2016-09-04 09:43:00", - "latest_commit": "2020-08-09 19:48:23", - "languages": [ - "Python" - ], - "model_or_dataset": "model" - }, - { - "description": "Sarashina1-13B", - "url": "https://huggingface.co./sbintuitions/sarashina1-13b", - "project_name": "sarashina1-13b", - "downloads": 620, - "source": "Hugging Face", - "score": -0.09594777118225666, - "first_commit": "2024-06-07 11:56:53", - "latest_commit": "2024-06-27 06:56:06", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "HODACHI様の Llama-3.1-8B-EZO-1.1-it をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3.1-8B-EZO-1.1-it-GGUF", - "project_name": "Llama-3.1-8B-EZO-1.1-it-GGUF", - "downloads": 612, - "source": "Hugging Face", - "score": -0.0960608921657891, - "first_commit": "2024-07-31 12:12:01", - "latest_commit": "2024-07-31 18:13:59", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Model Card for Japanese BART base Model description This is a Japanese BART base model pre-trained on Japanese Wikipedia.", - "url": "https://huggingface.co./ku-nlp/bart-base-japanese", - "project_name": "bart-base-japanese", - "downloads": 611, - "source": "Hugging Face", - "score": -0.09607503228873066, - "first_commit": "2023-05-09 07:00:51", - "latest_commit": "2023-05-12 11:03:20", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ELYZA-japanese-Llama-2-13b-fast-gguf ELYZAさんが公開しているELYZA-japanese-Llama-2-13b-fastのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-Llama-2-13b-fast-gguf", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-gguf", - "downloads": 606, - "source": "Hugging Face", - "score": -0.09614573290343845, - "first_commit": "2023-12-27 10:40:52", - "latest_commit": "2023-12-27 13:18:46", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "uniTKU-hubert-japanese-asr", - "url": "https://huggingface.co./TKU410410103/uniTKU-hubert-japanese-asr", - "project_name": "uniTKU-hubert-japanese-asr", - "downloads": 597, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GGUF", + "project_name": "japanese-stablelm-instruct-beta-70B-GGUF", + "downloads": 581, "source": "Hugging Face", - "score": -0.09627299400991245, - "first_commit": "2024-04-20 14:59:52", - "latest_commit": "2024-04-22 18:37:33", + "score": -0.09798557895396288, + "first_commit": "2023-11-02 15:45:24", + "latest_commit": "2023-11-02 18:22:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.0 }, { "description": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-70b-instruct-v0.1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", "project_name": "tokyotech-llm-Swallow-70b-instruct-v0.1-gguf", - "downloads": 594, + "downloads": 580, "source": "Hugging Face", - "score": -0.09631541437873711, + "score": -0.09800018691138587, "first_commit": "2024-05-03 09:00:00", "latest_commit": "2024-05-04 06:52:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.2 }, { - "description": "Sarashina1-7B This repository provides Japanese language models trained by SB Intuitions.", - "url": "https://huggingface.co./sbintuitions/sarashina1-7b", - "project_name": "sarashina1-7b", - "downloads": 593, + "description": "Model Card for Japanese character-level GPT-2 Small Model description This is a Japanese character-level GPT-2 Small (90M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/gpt2-small-japanese-char", + "project_name": "gpt2-small-japanese-char", + "downloads": 574, "source": "Hugging Face", - "score": -0.09632955450167867, - "first_commit": "2024-06-07 10:13:21", - "latest_commit": "2024-06-27 06:55:38", + "score": -0.09808783465592379, + "first_commit": "2023-04-18 08:24:55", + "latest_commit": "2023-05-08 10:08:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.10300000000000001 }, { - "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", - "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-NVE-hf", - "project_name": "Swallow-13b-NVE-hf", - "downloads": 590, + "description": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-13b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", + "downloads": 572, "source": "Hugging Face", - "score": -0.09637197487050334, - "first_commit": "2024-01-30 11:39:05", - "latest_commit": "2024-06-29 08:56:22", + "score": -0.09811705057076975, + "first_commit": "2024-05-02 14:18:27", + "latest_commit": "2024-05-03 04:36:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF This is quantized version of DataPilot/Llama3.1-ArrowSE-v0.4 created using llama.cpp Original Model Card 概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", - "url": "https://huggingface.co./QuantFactory/Llama3.1-ArrowSE-v0.4-GGUF", - "project_name": "Llama3.1-ArrowSE-v0.4-GGUF", - "downloads": 587, + "description": "Converted from clu-ling/whisper-large-v2-japanese-5k-steps using CTranslate2.", + "url": "https://huggingface.co./zh-plus/faster-whisper-large-v2-japanese-5k-steps", + "project_name": "faster-whisper-large-v2-japanese-5k-steps", + "downloads": 571, "source": "Hugging Face", - "score": -0.096414395239328, - "first_commit": "2024-07-28 06:17:48", - "latest_commit": "2024-07-28 06:57:40", + "score": -0.09813165852819274, + "first_commit": "2023-07-03 08:29:37", + "latest_commit": "2023-07-03 18:42:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "albert-base-japanese-v1 日本語事前学習済みALBERTモデルです How to use ファインチューニング このモデルはPreTrainedモデルです基本的には各種タスク用にファインチューニングして使用されることを想定しています Fill-Mask このモデルではTokenizerにSentencepieceを利用していますそのままでは[MASK]トークンのあとに余計なトークンが混入する問題があるので、利用する際には以下のようにする必要があります for PyTorch from transformers import ( AlbertForMaskedLM, AlbertTokenizerFast ) import torch tokenizer = AlbertTokenizerFast.from_pretrained(\"ken11/albert-base-japanese-v1\")", "url": "https://huggingface.co./ken11/albert-base-japanese-v1", "project_name": "albert-base-japanese-v1", - "downloads": 582, + "downloads": 569, "source": "Hugging Face", - "score": -0.09648509585403578, + "score": -0.09816087444303871, "first_commit": "2021-12-19 17:07:14", "latest_commit": "2021-12-22 03:04:30", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GGUF", - "project_name": "japanese-stablelm-instruct-beta-70B-GGUF", - "downloads": 581, - "source": "Hugging Face", - "score": -0.09649923597697734, - "first_commit": "2023-11-02 15:45:24", - "latest_commit": "2023-11-02 18:22:05", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-MS-7b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", - "downloads": 578, - "source": "Hugging Face", - "score": -0.096541656345802, - "first_commit": "2024-05-02 13:37:22", - "latest_commit": "2024-05-03 04:35:34", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "tokyotech-llm様の Llama-3-Swallow-8B-Instruct-v0.1 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama-3-Swallow-8B-Instruct-v0.1-GGUF", - "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-GGUF", - "downloads": 577, - "source": "Hugging Face", - "score": -0.09655579646874356, - "first_commit": "2024-07-01 11:45:22", - "latest_commit": "2024-07-01 17:54:05", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Overview This dataset provides a convenient and user-friendly format of data from Aozora Bunko (青空文庫), a website that compiles public-domain books in Japan, ideal for Machine Learning applications.", "url": "https://huggingface.co./datasets/globis-university/aozorabunko-clean", "project_name": "aozorabunko-clean", - "downloads": 575, + "downloads": 562, "source": "Hugging Face", - "score": -0.09658407671462667, + "score": -0.09826313014499961, "first_commit": "2023-06-26 13:31:28", "latest_commit": "2023-10-27 13:22:32", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-13b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-13b-instruct-v0.1-gguf", - "downloads": 573, + "description": "alabnii/jmedroberta-base-manbyo-wordpiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece", + "project_name": "jmedroberta-base-manbyo-wordpiece", + "downloads": 561, "source": "Hugging Face", - "score": -0.09661235696050978, - "first_commit": "2024-05-02 14:18:27", - "latest_commit": "2024-05-03 04:36:24", + "score": -0.09827773810242259, + "first_commit": "2022-12-22 17:17:03", + "latest_commit": "2023-03-08 01:44:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Leia-Swallow-7B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", - "url": "https://huggingface.co./leia-llm/Leia-Swallow-7b", - "project_name": "Leia-Swallow-7b", - "downloads": 570, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GGUF", + "project_name": "japanese-stablelm-instruct-gamma-7B-GGUF", + "downloads": 560, "source": "Hugging Face", - "score": -0.09665477732933445, - "first_commit": "2024-04-17 07:12:28", - "latest_commit": "2024-04-17 10:29:56", + "score": -0.09829234605984558, + "first_commit": "2023-10-28 19:03:17", + "latest_commit": "2023-10-28 19:07:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-7b-instruct-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", - "project_name": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", - "downloads": 561, + "description": "This repository is publicly accessible, but you have to accept the conditions to access its files and content.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-alpha-7b", + "project_name": "japanese-stablelm-instruct-alpha-7b", + "downloads": 557, "source": "Hugging Face", - "score": -0.09678203843580846, - "first_commit": "2024-05-03 04:09:27", - "latest_commit": "2024-05-03 04:53:43", + "score": -0.09833616993211454, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "bert-base-japanese-v3-jnli 「大規模言語モデル入門」の第5章で紹介している(自然言語推論)のモデルです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jnli", - "project_name": "bert-base-japanese-v3-jnli", - "downloads": 559, + "description": "Japanese-Starling-ChatV-7B-GGUF GGUF conversion of \"Japanese-Starling-ChatV-7B\" \"Japanese-Starling-ChatV-7B\" is a Japanese chat model built on top of \"chatntq-ja-7b-v1.0\", originally based on Mistral-7B-v0.1.", + "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B-GGUF", + "project_name": "Japanese-Starling-ChatV-7B-GGUF", + "downloads": 555, "source": "Hugging Face", - "score": -0.09681031868169157, - "first_commit": "2023-06-12 14:15:16", - "latest_commit": "2023-07-24 06:49:14", + "score": -0.0983653858469605, + "first_commit": "2024-04-14 12:42:01", + "latest_commit": "2024-04-20 01:23:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!", - "url": "https://huggingface.co./bclavie/fio-base-japanese-v0.1", - "project_name": "fio-base-japanese-v0.1", - "downloads": 545, + "description": "tokyotech-llm様の Llama-3-Swallow-8B-Instruct-v0.1 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3-Swallow-8B-Instruct-v0.1-GGUF", + "project_name": "Llama-3-Swallow-8B-Instruct-v0.1-GGUF", + "downloads": 553, "source": "Hugging Face", - "score": -0.09700828040287335, - "first_commit": "2023-12-18 11:01:07", - "latest_commit": "2023-12-19 10:28:16", + "score": -0.09839460176180648, + "first_commit": "2024-07-01 11:45:22", + "latest_commit": "2024-07-01 17:54:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct", - "project_name": "Fugaku-LLM-13B-instruct", + "description": "Sarashina1-7B This repository provides Japanese language models trained by SB Intuitions.", + "url": "https://huggingface.co./sbintuitions/sarashina1-7b", + "project_name": "sarashina1-7b", "downloads": 543, "source": "Hugging Face", - "score": -0.09703656064875646, - "first_commit": null, - "latest_commit": null, + "score": -0.09854068133603633, + "first_commit": "2024-06-07 10:13:21", + "latest_commit": "2024-06-27 06:55:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-Starling-ChatV-7B-GGUF GGUF conversion of \"Japanese-Starling-ChatV-7B\" \"Japanese-Starling-ChatV-7B\" is a Japanese chat model built on top of \"chatntq-ja-7b-v1.0\", originally based on Mistral-7B-v0.1.", - "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B-GGUF", - "project_name": "Japanese-Starling-ChatV-7B-GGUF", - "downloads": 541, + "description": "Japanese-StableLM-Instruct-JAVocab-Beta-7B A cute robot wearing a kimono writes calligraphy with one single brush — Stable Diffusion XL Model Description japanese-stablelm-instruct-ja_vocab-beta-7b is a 7B-parameter decoder-only language model based on japanese-stablelm-ja_vocab-beta-7b and further fine tuned on Databricks Dolly-15k, Anthropic HH, and other public data.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-instruct-ja_vocab-beta-7b", + "project_name": "japanese-stablelm-instruct-ja_vocab-beta-7b", + "downloads": 543, "source": "Hugging Face", - "score": -0.09706484089463957, - "first_commit": "2024-04-14 12:42:01", - "latest_commit": "2024-04-20 01:23:10", + "score": -0.09854068133603633, + "first_commit": "2023-10-30 07:49:38", + "latest_commit": "2023-12-19 06:46:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.88 }, { - "description": "Leia-Swallow-13B LEIA is a training technique for autoregressive LLMs that effectively improves their performance in languages other than English by enhancing cross-lingual knowledge transfer from English to a target language.", - "url": "https://huggingface.co./leia-llm/Leia-Swallow-13b", - "project_name": "Leia-Swallow-13b", - "downloads": 538, + "description": "Model Card for Japanese BART base Model description This is a Japanese BART base model pre-trained on Japanese Wikipedia.", + "url": "https://huggingface.co./ku-nlp/bart-base-japanese", + "project_name": "bart-base-japanese", + "downloads": 537, "source": "Hugging Face", - "score": -0.09710726126346424, - "first_commit": "2024-04-17 07:32:11", - "latest_commit": "2024-04-18 05:21:10", + "score": -0.09862832908057424, + "first_commit": "2023-05-09 07:00:51", + "latest_commit": "2023-05-12 11:03:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf ryota39さんが公開しているPhi-3-mini-4k-instruct-dpoのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/ryota39-Phi-3-mini-4k-instruct-dpo-gguf", - "project_name": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf", - "downloads": 531, + "description": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-MS-7b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-MS-7b-instruct-v0.1-gguf", + "downloads": 535, "source": "Hugging Face", - "score": -0.09720624212405513, - "first_commit": "2024-04-29 14:27:31", - "latest_commit": "2024-04-29 16:53:45", + "score": -0.09865754499542022, + "first_commit": "2024-05-02 13:37:22", + "latest_commit": "2024-05-03 04:35:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { - "description": "alabnii/jmedroberta-base-manbyo-wordpiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece", - "project_name": "jmedroberta-base-manbyo-wordpiece", - "downloads": 527, + "description": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf tokyotech-llmさんが公開しているSwallow-7b-instruct-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", + "project_name": "tokyotech-llm-Swallow-7b-instruct-v0.1-gguf", + "downloads": 528, "source": "Hugging Face", - "score": -0.09726280261582135, - "first_commit": "2022-12-22 17:17:03", - "latest_commit": "2023-03-08 01:44:36", + "score": -0.09875980069738112, + "first_commit": "2024-05-03 04:09:27", + "latest_commit": "2024-05-03 04:53:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.83 }, { - "description": "This is a model for named entity recognition of Japanese medical documents.", - "url": "https://huggingface.co./sociocom/MedNER-CR-JA", - "project_name": "MedNER-CR-JA", - "downloads": 525, + "description": "c4ai-command-r-plus-gguf CohereForAIさんが公開しているc4ai-command-r-plusのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/c4ai-command-r-plus-gguf", + "project_name": "c4ai-command-r-plus-gguf", + "downloads": 524, "source": "Hugging Face", - "score": -0.09729108286170446, - "first_commit": "2022-08-23 03:30:43", - "latest_commit": "2024-07-31 07:44:00", + "score": -0.09881823252707306, + "first_commit": "2024-04-22 14:46:41", + "latest_commit": "2024-04-23 16:13:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 104.0 }, { - "description": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf pfnetさんが公開しているnekomata-14b-pfn-qfin-inst-mergeのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", - "project_name": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", - "downloads": 522, - "source": "Hugging Face", - "score": -0.09733350323052913, - "first_commit": "2024-04-23 14:53:08", - "latest_commit": "2024-04-24 14:39:32", + "description": "External dictionary importer for Yomichan.", + "url": "https://github.com/FooSoft/yomichan-import", + "project_name": "yomichan-import", + "stargazers_count": 82, + "source": "GitHub", + "score": -0.09888338315321071, + "first_commit": "2016-07-26 20:24:33", + "latest_commit": "2023-02-25 12:43:03", + "languages": [ + "Go" + ], + "model_or_dataset": null + }, + { + "description": "日英変換・英語略語展開のための IME 追加辞書 orange_book 日本語から英語への和英変換や英語略語の展開を Google 日本語入力や ATOK などで可能にする IME 拡張辞書", + "url": "https://github.com/peaceiris/google-ime-dictionary", + "project_name": "google-ime-dictionary", + "stargazers_count": 82, + "source": "GitHub", + "score": -0.09888338315321071, + "first_commit": "2018-09-13 01:54:32", + "latest_commit": "2023-01-16 10:47:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset" }, { "description": "Chat & support: TheBloke's Discord server Want to contribute?", "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-GGUF", "project_name": "japanese-stablelm-instruct-beta-7B-GGUF", - "downloads": 522, + "downloads": 519, "source": "Hugging Face", - "score": -0.09733350323052913, + "score": -0.09889127231418798, "first_commit": "2023-11-03 01:04:31", "latest_commit": "2023-11-03 12:54:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、約10000件の日本語instruction tuning用データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", - "project_name": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", - "downloads": 508, - "source": "Hugging Face", - "score": -0.09753146495171092, - "first_commit": "2024-07-05 13:53:45", - "latest_commit": "2024-07-05 13:57:08", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "c4ai-command-r-plus-gguf CohereForAIさんが公開しているc4ai-command-r-plusのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/c4ai-command-r-plus-gguf", - "project_name": "c4ai-command-r-plus-gguf", - "downloads": 507, + "description": "Swallow Our Swallow model has undergone continual pre-training from the Llama 2 family, primarily with the addition of Japanese language data.", + "url": "https://huggingface.co./tokyotech-llm/Swallow-13b-NVE-hf", + "project_name": "Swallow-13b-NVE-hf", + "downloads": 518, "source": "Hugging Face", - "score": -0.09754560507465247, - "first_commit": "2024-04-22 14:46:41", - "latest_commit": "2024-04-23 16:13:37", + "score": -0.09890588027161097, + "first_commit": "2024-01-30 11:39:05", + "latest_commit": "2024-06-29 08:56:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf umiyukiさんが公開しているUmievo-itr012-Gleipnir-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf", - "project_name": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf", - "downloads": 501, + "description": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf pfnetさんが公開しているnekomata-14b-pfn-qfin-inst-mergeのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", + "project_name": "pfnet-nekomata-14b-pfn-qfin-inst-merge-gguf", + "downloads": 516, "source": "Hugging Face", - "score": -0.0976304458123018, - "first_commit": "2024-05-29 15:05:32", - "latest_commit": "2024-05-29 15:53:40", + "score": -0.09893509618645695, + "first_commit": "2024-04-23 14:53:08", + "latest_commit": "2024-04-24 14:39:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Llama3-ArrowSE-8B-v0.3-gguf DataPilotさんが公開しているLlama3-ArrowSE-8B-v0.3のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Llama3-ArrowSE-8B-v0.3-gguf", - "project_name": "Llama3-ArrowSE-8B-v0.3-gguf", - "downloads": 497, + "description": "mt5_summarize_japanese (Japanese caption : 日本語の要約のモデル)", + "url": "https://huggingface.co./tsmatz/mt5_summarize_japanese", + "project_name": "mt5_summarize_japanese", + "downloads": 510, "source": "Hugging Face", - "score": -0.09768700630406803, - "first_commit": "2024-07-07 07:27:12", - "latest_commit": "2024-07-07 09:30:16", + "score": -0.09902274393099486, + "first_commit": "2022-11-26 10:51:27", + "latest_commit": "2024-07-12 00:01:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.3 }, { "description": "YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf yuisekiさんが公開しているYuisekinAIEvol-Mistral-7B-ja-math-v0.1.1のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf", "project_name": "YuisekinAIEvol-Mistral-7B-ja-math-v0.1.1-gguf", - "downloads": 497, + "downloads": 503, "source": "Hugging Face", - "score": -0.09768700630406803, + "score": -0.09912499963295576, "first_commit": "2024-04-29 14:18:07", "latest_commit": "2024-04-29 15:52:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "mt5_summarize_japanese (Japanese caption : 日本語の要約のモデル)", - "url": "https://huggingface.co./tsmatz/mt5_summarize_japanese", - "project_name": "mt5_summarize_japanese", - "downloads": 496, + "description": "rinna/japanese-hubert-large Overview This is a Japanese HuBERT Large model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-hubert-large", + "project_name": "japanese-hubert-large", + "downloads": 500, "source": "Hugging Face", - "score": -0.09770114642700958, - "first_commit": "2022-11-26 10:51:27", - "latest_commit": "2024-07-12 00:01:31", + "score": -0.09916882350522471, + "first_commit": "2024-03-05 10:24:37", + "latest_commit": "2024-07-22 08:12:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.315 }, { - "description": "luke-japanese-large-lite luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-large-lite", - "project_name": "luke-japanese-large-lite", - "downloads": 492, + "description": "bert-base-japanese-v3-jnli 「大規模言語モデル入門」の第5章で紹介している(自然言語推論)のモデルです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jnli", + "project_name": "bert-base-japanese-v3-jnli", + "downloads": 495, "source": "Hugging Face", - "score": -0.0977577069187758, - "first_commit": "2022-11-07 14:26:40", - "latest_commit": "2022-11-09 11:19:36", + "score": -0.09924186329233964, + "first_commit": "2023-06-12 14:15:16", + "latest_commit": "2023-07-24 06:49:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf alfredplplさんが公開しているLlama-3-8B-Instruct-Jaのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/alfredplpl-Llama-3-8B-Instruct-Ja-gguf", - "project_name": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf", - "downloads": 483, + "description": "hh-rlhf-12k-ja This repository provides a human preference dataset developed by LLM-jp, a collaborative project launched in Japan.", + "url": "https://huggingface.co./datasets/llm-jp/hh-rlhf-12k-ja", + "project_name": "hh-rlhf-12k-ja", + "downloads": 492, "source": "Hugging Face", - "score": -0.0978849680252498, - "first_commit": "2024-04-23 14:18:57", - "latest_commit": "2024-04-23 15:24:47", + "score": -0.0992856871646086, + "first_commit": "2024-02-04 21:19:53", + "latest_commit": "2024-02-04 21:45:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "aya-23-8B-gguf CohereForAIさんが公開しているaya-23-8Bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/aya-23-8B-gguf", "project_name": "aya-23-8B-gguf", - "downloads": 481, + "downloads": 485, "source": "Hugging Face", - "score": -0.09791324827113292, + "score": -0.0993879428665695, "first_commit": "2024-05-26 16:32:53", "latest_commit": "2024-05-27 00:54:36", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "日本語版CLIPモデル This is a CLIP text/image encoder model for Japanese. ", - "url": "https://huggingface.co./sonoisa/clip-vit-b-32-japanese-v1", - "project_name": "clip-vit-b-32-japanese-v1", - "downloads": 478, - "source": "Hugging Face", - "score": -0.09795566863995758, - "first_commit": "2022-02-15 15:47:34", - "latest_commit": "2022-04-19 14:18:58", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "rinna/japanese-gpt-neox-3.6b-instruction-ppo rinnaさんが公開しているjapanese-gpt-neox-3.6b-instruction-ppoのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", - "project_name": "rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", - "downloads": 475, + "description": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf alfredplplさんが公開しているLlama-3-8B-Instruct-Jaのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/alfredplpl-Llama-3-8B-Instruct-Ja-gguf", + "project_name": "alfredplpl-Llama-3-8B-Instruct-Ja-gguf", + "downloads": 480, "source": "Hugging Face", - "score": -0.09799808900878225, - "first_commit": "2023-09-02 17:52:26", - "latest_commit": "2023-09-08 02:39:00", + "score": -0.09946098265368443, + "first_commit": "2024-04-23 14:18:57", + "latest_commit": "2024-04-23 15:24:47", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "[Llama-3.1-70B-EZO-1.1-it] Model Card モデル情報 / Model Information このモデルは、Meta AI の Llama 3.1 をベースに、日本語タスクでの性能を向上させるためにファインチューニングを行ったものです。", "url": "https://huggingface.co./HODACHI/Llama-3.1-70B-EZO-1.1-it", "project_name": "Llama-3.1-70B-EZO-1.1-it", - "downloads": 472, + "downloads": 479, "source": "Hugging Face", - "score": -0.09804050937760692, + "score": -0.09947559061110742, "first_commit": "2024-07-29 01:35:35", "latest_commit": "2024-08-04 06:16:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 + }, + { + "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct", + "project_name": "Fugaku-LLM-13B-instruct", + "downloads": 478, + "source": "Hugging Face", + "score": -0.09949019856853039, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": "model", + "model_size": 13.2 }, { "description": "HODACHI様の EZO-Common-T2-2B-gemma-2-it をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/EZO-Common-T2-2B-gemma-2-it-GGUF", "project_name": "EZO-Common-T2-2B-gemma-2-it-GGUF", - "downloads": 468, + "downloads": 476, "source": "Hugging Face", - "score": -0.09809706986937314, + "score": -0.09951941448337637, "first_commit": "2024-08-01 11:38:48", "latest_commit": "2024-08-01 13:42:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.61 }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-clip-vit-l-16", - "project_name": "japanese-stable-clip-vit-l-16", - "downloads": 464, + "description": "日本語版CLIPモデル This is a CLIP text/image encoder model for Japanese. ", + "url": "https://huggingface.co./sonoisa/clip-vit-b-32-japanese-v1", + "project_name": "clip-vit-b-32-japanese-v1", + "downloads": 469, "source": "Hugging Face", - "score": -0.09815363036113937, - "first_commit": null, - "latest_commit": null, + "score": -0.09962167018533727, + "first_commit": "2022-02-15 15:47:34", + "latest_commit": "2022-04-19 14:18:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", - "url": "https://huggingface.co./if001/tiny_mixtral_ja", - "project_name": "tiny_mixtral_ja", - "downloads": 459, + "description": "Llama3-ArrowSE-8B-v0.3-gguf DataPilotさんが公開しているLlama3-ArrowSE-8B-v0.3のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Llama3-ArrowSE-8B-v0.3-gguf", + "project_name": "Llama3-ArrowSE-8B-v0.3-gguf", + "downloads": 468, "source": "Hugging Face", - "score": -0.09822433097584715, - "first_commit": "2024-01-22 15:02:21", - "latest_commit": "2024-01-23 00:42:05", + "score": -0.09963627814276026, + "first_commit": "2024-07-07 07:27:12", + "latest_commit": "2024-07-07 09:30:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "rinna/japanese-gpt-neox-3.6b rinnaさんが公開しているjapanese-gpt-neox-3.6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-gguf", - "project_name": "rinna-japanese-gpt-neox-3.6b-gguf", - "downloads": 459, + "description": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf ryota39さんが公開しているPhi-3-mini-4k-instruct-dpoのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/ryota39-Phi-3-mini-4k-instruct-dpo-gguf", + "project_name": "ryota39-Phi-3-mini-4k-instruct-dpo-gguf", + "downloads": 463, "source": "Hugging Face", - "score": -0.09822433097584715, - "first_commit": "2023-09-02 18:46:08", - "latest_commit": "2023-09-08 02:37:19", + "score": -0.09970931792987518, + "first_commit": "2024-04-29 14:27:31", + "latest_commit": "2024-04-29 16:53:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.82 }, { - "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", - "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese-hiragana", - "project_name": "wav2vec2-large-xlsr-japanese-hiragana", - "downloads": 455, + "description": "QuantFactory/Llama3-ArrowSE-8B-v0.3-GGUF This is quantized version of DataPilot/Llama3-ArrowSE-8B-v0.3 created using llama.cpp Original Model Card 概要 elyza/Llama-3-ELYZA-JP-8Bを元にchat vectorを用いて改良しAItuberに特化させました。 ", + "url": "https://huggingface.co./QuantFactory/Llama3-ArrowSE-8B-v0.3-GGUF", + "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", + "downloads": 457, "source": "Hugging Face", - "score": -0.09828089146761337, - "first_commit": "2021-06-18 07:15:24", - "latest_commit": "2023-02-08 00:36:47", + "score": -0.0997969656744131, + "first_commit": "2024-07-28 15:51:47", + "latest_commit": "2024-07-28 16:29:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-RAG", - "project_name": "Orion-14B-Chat-RAG", + "description": "Model Card for Japanese character-level DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-char-wwm", + "project_name": "deberta-v2-base-japanese-char-wwm", "downloads": 453, "source": "Hugging Face", - "score": -0.09830917171349648, - "first_commit": "2024-01-16 12:19:08", - "latest_commit": "2024-03-26 10:08:09", + "score": -0.09985539750410503, + "first_commit": "2023-01-18 13:55:30", + "latest_commit": "2023-03-26 03:32:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.122 }, { - "description": "QuantFactory/Llama3-ArrowSE-8B-v0.3-GGUF This is quantized version of DataPilot/Llama3-ArrowSE-8B-v0.3 created using llama.cpp Original Model Card 概要 elyza/Llama-3-ELYZA-JP-8Bを元にchat vectorを用いて改良しAItuberに特化させました。 ", - "url": "https://huggingface.co./QuantFactory/Llama3-ArrowSE-8B-v0.3-GGUF", - "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", - "downloads": 453, + "description": "rinna/japanese-gpt-neox-3.6b-instruction-ppo rinnaさんが公開しているjapanese-gpt-neox-3.6b-instruction-ppoのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", + "project_name": "rinna-japanese-gpt-neox-3.6b-instruction-ppo-gguf", + "downloads": 450, "source": "Hugging Face", - "score": -0.09830917171349648, - "first_commit": "2024-07-28 15:51:47", - "latest_commit": "2024-07-28 16:29:51", + "score": -0.099899221376374, + "first_commit": "2023-09-02 17:52:26", + "latest_commit": "2023-09-08 02:39:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.61 }, { - "description": "Oumuamua-7b-instruct-v2 🚨 If you want to avoid outputs that appear to be literal translations, please prompt this model to role-play as a Japanese person.", - "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct-v2", - "project_name": "Oumuamua-7b-instruct-v2", - "downloads": 448, + "description": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf umiyukiさんが公開しているUmievo-itr012-Gleipnir-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/umiyuki-Umievo-itr012-Gleipnir-7B-gguf", + "project_name": "umiyuki-Umievo-itr012-Gleipnir-7B-gguf", + "downloads": 446, "source": "Hugging Face", - "score": -0.09837987232820426, - "first_commit": "2024-06-14 07:08:07", - "latest_commit": "2024-06-19 22:29:07", + "score": -0.09995765320606594, + "first_commit": "2024-05-29 15:05:32", + "latest_commit": "2024-05-29 15:53:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-base", - "project_name": "luke-japanese-base", - "downloads": 447, + "description": "rinna/japanese-gpt-neox-3.6b rinnaさんが公開しているjapanese-gpt-neox-3.6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/rinna-japanese-gpt-neox-3.6b-gguf", + "project_name": "rinna-japanese-gpt-neox-3.6b-gguf", + "downloads": 445, "source": "Hugging Face", - "score": -0.09839401245114582, - "first_commit": "2022-10-25 06:30:23", - "latest_commit": "2022-11-09 15:23:20", + "score": -0.09997226116348892, + "first_commit": "2023-09-02 18:46:08", + "latest_commit": "2023-09-08 02:37:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.61 }, { - "description": "pfnet-nekomata-14b-pfn-qfin-gguf pfnetさんが公開しているnekomata-14b-pfn-qfinのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-gguf", - "project_name": "pfnet-nekomata-14b-pfn-qfin-gguf", - "downloads": 446, + "description": "Oumuamua-7b-instruct-v2 🚨 If you want to avoid outputs that appear to be literal translations, please prompt this model to role-play as a Japanese person.", + "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct-v2", + "project_name": "Oumuamua-7b-instruct-v2", + "downloads": 444, "source": "Hugging Face", - "score": -0.09840815257408737, - "first_commit": "2024-04-24 12:58:10", - "latest_commit": "2024-04-24 14:46:15", + "score": -0.09998686912091191, + "first_commit": "2024-06-14 07:08:07", + "latest_commit": "2024-06-19 22:29:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { - "description": "hh-rlhf-12k-ja This repository provides a human preference dataset developed by LLM-jp, a collaborative project launched in Japan.", - "url": "https://huggingface.co./datasets/llm-jp/hh-rlhf-12k-ja", - "project_name": "hh-rlhf-12k-ja", - "downloads": 442, + "description": "gemma-2-2b-it-gguf googleさんが公開しているgemma-2-2b-itのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/gemma-2-2b-it-gguf", + "project_name": "gemma-2-2b-it-gguf", + "downloads": 444, "source": "Hugging Face", - "score": -0.0984647130658536, - "first_commit": "2024-02-04 21:19:53", - "latest_commit": "2024-02-04 21:45:59", + "score": -0.09998686912091191, + "first_commit": "2024-08-01 17:22:58", + "latest_commit": "2024-08-01 18:29:08", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 2.61 }, { "description": "DataPilot-ArrowPro-7B-RobinHood-gguf DataPilotさんが公開しているArrowPro-7B-RobinHoodのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/DataPilot-ArrowPro-7B-RobinHood-gguf", "project_name": "DataPilot-ArrowPro-7B-RobinHood-gguf", - "downloads": 437, + "downloads": 439, "source": "Hugging Face", - "score": -0.09853541368056137, + "score": -0.10005990890802684, "first_commit": "2024-05-11 07:22:37", "latest_commit": "2024-05-11 13:43:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "rinna/japanese-hubert-large Overview This is a Japanese HuBERT Large model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-hubert-large", - "project_name": "japanese-hubert-large", - "downloads": 428, + "description": "pfnet-nekomata-14b-pfn-qfin-gguf pfnetさんが公開しているnekomata-14b-pfn-qfinのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/pfnet-nekomata-14b-pfn-qfin-gguf", + "project_name": "pfnet-nekomata-14b-pfn-qfin-gguf", + "downloads": 434, "source": "Hugging Face", - "score": -0.09866267478703537, - "first_commit": "2024-03-05 10:24:37", - "latest_commit": "2024-07-22 08:12:21", + "score": -0.10013294869514176, + "first_commit": "2024-04-24 12:58:10", + "latest_commit": "2024-04-24 14:46:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.2 }, { "description": "Mistral-Large-Instruct-2407-gguf mistralaiさんが公開しているMistral-Large-Instruct-2407のggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Mistral-Large-Instruct-2407-gguf", "project_name": "Mistral-Large-Instruct-2407-gguf", - "downloads": 426, + "downloads": 430, "source": "Hugging Face", - "score": -0.09869095503291848, + "score": -0.1001913805248337, "first_commit": "2024-07-24 18:59:58", "latest_commit": "2024-07-26 12:21:45", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "gemma-2-2b-it-gguf googleさんが公開しているgemma-2-2b-itのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/gemma-2-2b-it-gguf", - "project_name": "gemma-2-2b-it-gguf", - "downloads": 424, - "source": "Hugging Face", - "score": -0.0987192352788016, - "first_commit": "2024-08-01 17:22:58", - "latest_commit": "2024-08-01 18:29:08", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 123.0 }, { "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", "url": "https://huggingface.co./izumi-lab/bert-small-japanese", "project_name": "bert-small-japanese", - "downloads": 423, + "downloads": 427, "source": "Hugging Face", - "score": -0.09873337540174315, + "score": -0.10023520439710266, "first_commit": "2021-10-04 13:09:36", "latest_commit": "2022-12-09 00:40:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-base", + "project_name": "luke-japanese-base", + "downloads": 424, + "source": "Hugging Face", + "score": -0.10027902826937161, + "first_commit": "2022-10-25 06:30:23", + "latest_commit": "2022-11-09 15:23:20", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "alabnii/jmedroberta-base-sentencepiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece", + "project_name": "jmedroberta-base-sentencepiece", + "downloads": 418, + "source": "Hugging Face", + "score": -0.10036667601390953, + "first_commit": "2022-12-22 17:20:33", + "latest_commit": "2023-03-21 23:57:37", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.109 }, { "description": "Llama-3-8B-Japanese-Instruct-GGUF Original Model haqishen/Llama-3-8B-Japanese-Instruct Run with Gaianet Prompt template: prompt template: llama-3-chat Context size: chat_ctx_size: 4096 Run with GaiaNet:", "url": "https://huggingface.co./gaianet/Llama-3-8B-Japanese-Instruct-GGUF", "project_name": "Llama-3-8B-Japanese-Instruct-GGUF", - "downloads": 415, + "downloads": 414, "source": "Hugging Face", - "score": -0.09884649638527561, + "score": -0.10042510784360148, "first_commit": "2024-05-14 05:38:05", "latest_commit": "2024-05-16 13:44:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 + }, + { + "description": "aixsatoshi-Honyaku-13b-gguf aixsatoshiさんが公開しているHonyaku-13bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/aixsatoshi-Honyaku-13b-gguf", + "project_name": "aixsatoshi-Honyaku-13b-gguf", + "downloads": 410, + "source": "Hugging Face", + "score": -0.10048353967329342, + "first_commit": "2024-05-19 08:07:15", + "latest_commit": "2024-05-19 09:24:59", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.1 }, { "description": "line-corporation/japanese-large-lm-1.7b line-corporationさんが公開しているjapanese-large-lm-1.7bのgguf変換版です。 ", "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-gguf", "project_name": "line-corp-japanese-large-lm-1.7b-gguf", - "downloads": 415, + "downloads": 410, "source": "Hugging Face", - "score": -0.09884649638527561, + "score": -0.10048353967329342, "first_commit": "2023-09-03 22:35:34", "latest_commit": "2024-03-24 05:54:30", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "External dictionary importer for Yomichan.", - "url": "https://github.com/FooSoft/yomichan-import", - "project_name": "yomichan-import", - "stargazers_count": 82, - "source": "GitHub", - "score": -0.0988496355859273, - "first_commit": "2016-07-26 20:24:33", - "latest_commit": "2023-02-25 12:43:03", - "languages": [ - "Go" - ], - "model_or_dataset": null + "model_or_dataset": "model", + "model_size": 1.77 }, { - "description": "日英変換・英語略語展開のための IME 追加辞書 orange_book 日本語から英語への和英変換や英語略語の展開を Google 日本語入力や ATOK などで可能にする IME 拡張辞書", - "url": "https://github.com/peaceiris/google-ime-dictionary", - "project_name": "google-ime-dictionary", - "stargazers_count": 82, - "source": "GitHub", - "score": -0.0988496355859273, - "first_commit": "2018-09-13 01:54:32", - "latest_commit": "2023-01-16 10:47:31", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-RAG", + "project_name": "Orion-14B-Chat-RAG", + "downloads": 403, + "source": "Hugging Face", + "score": -0.10058579537525432, + "first_commit": "2024-01-16 12:19:08", + "latest_commit": "2024-03-26 10:08:09", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "url": "https://huggingface.co./QuantFactory/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", - "downloads": 414, + "description": "Japanese GPT2 Lyric Model Model description", + "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-small", + "project_name": "gpt2-japanese-lyric-small", + "downloads": 398, "source": "Hugging Face", - "score": -0.09886063650821716, - "first_commit": "2024-07-05 05:56:09", - "latest_commit": "2024-07-13 13:29:45", + "score": -0.10065883516236925, + "first_commit": "2022-04-21 04:25:18", + "latest_commit": "2023-10-23 12:46:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.123 }, { - "description": "whisper-large-v2-japanese-5k-steps This model is a fine-tuned version of openai/whisper-large-v2 on the Japanese CommonVoice dataset (v11)..", - "url": "https://huggingface.co./clu-ling/whisper-large-v2-japanese-5k-steps", - "project_name": "whisper-large-v2-japanese-5k-steps", - "downloads": 412, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-long", + "project_name": "t5-base-long", + "downloads": 395, "source": "Hugging Face", - "score": -0.09888891675410028, - "first_commit": "2023-01-28 22:14:29", - "latest_commit": "2023-03-03 21:11:39", + "score": -0.1007026590346382, + "first_commit": "2023-04-26 08:30:59", + "latest_commit": "2023-05-10 10:00:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "aixsatoshi-Honyaku-13b-gguf aixsatoshiさんが公開しているHonyaku-13bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/aixsatoshi-Honyaku-13b-gguf", - "project_name": "aixsatoshi-Honyaku-13b-gguf", - "downloads": 412, + "description": "This repository contains some GGUF quantizations of the VNTL Gemma 2 27B model.", + "url": "https://huggingface.co./lmg-anon/vntl-gemma2-27b-gguf", + "project_name": "vntl-gemma2-27b-gguf", + "downloads": 394, "source": "Hugging Face", - "score": -0.09888891675410028, - "first_commit": "2024-05-19 08:07:15", - "latest_commit": "2024-05-19 09:24:59", + "score": -0.10071726699206118, + "first_commit": "2024-07-07 00:28:06", + "latest_commit": "2024-07-08 16:13:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 27.2 }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-base-1_6b", - "project_name": "japanese-stablelm-2-base-1_6b", - "downloads": 406, + "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "url": "https://huggingface.co./QuantFactory/ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-GGUF", + "downloads": 394, "source": "Hugging Face", - "score": -0.09897375749174961, - "first_commit": null, - "latest_commit": null, + "score": -0.10071726699206118, + "first_commit": "2024-07-05 05:56:09", + "latest_commit": "2024-07-13 13:29:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.1 }, { "description": "This repository contains some GGUF quantizations of the merge of the VNTL LLaMA 3 8B qlora.", "url": "https://huggingface.co./lmg-anon/vntl-llama3-8b-gguf", "project_name": "vntl-llama3-8b-gguf", - "downloads": 403, + "downloads": 394, "source": "Hugging Face", - "score": -0.09901617786057428, + "score": -0.10071726699206118, "first_commit": "2024-06-13 17:17:30", "latest_commit": "2024-06-15 17:33:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、20000件の日⇔英翻訳データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k", - "project_name": "Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k", - "downloads": 402, + "description": "nlp-waseda/roberta-large-japanese-seq512 Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100 with the maximum sequence length of 512.", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512", + "project_name": "roberta-large-japanese-seq512", + "downloads": 393, "source": "Hugging Face", - "score": -0.09903031798351583, - "first_commit": "2024-07-07 11:08:34", - "latest_commit": "2024-07-07 11:13:47", + "score": -0.10073187494948417, + "first_commit": "2022-06-13 09:46:45", + "latest_commit": "2022-10-21 14:49:40", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { "description": "Ninja-v1-128k-gguf Local-Novel-LLM-projectさんが公開しているNinja-v1-128kのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Ninja-v1-128k-gguf", "project_name": "Ninja-v1-128k-gguf", - "downloads": 393, + "downloads": 392, "source": "Hugging Face", - "score": -0.09915757908998983, + "score": -0.10074648290690716, "first_commit": "2024-05-01 17:48:06", "latest_commit": "2024-05-04 13:25:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressive-v2の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-v2-GGUF", + "project_name": "Ninja-v1-RP-expressive-v2-GGUF", + "downloads": 383, + "source": "Hugging Face", + "score": -0.10087795452371402, + "first_commit": "2024-05-26 06:09:57", + "latest_commit": "2024-05-26 15:22:01", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stable-clip-vit-l-16", + "project_name": "japanese-stable-clip-vit-l-16", + "downloads": 382, + "source": "Hugging Face", + "score": -0.10089256248113701, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": "model", + "model_size": 0.41400000000000003 }, { "description": "line-corporation/japanese-large-lm-1.7b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-1.7b-instruction-sftのgguf変換版です。 ", "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", "project_name": "line-corp-japanese-large-lm-1.7b-instruction-sft-gguf", - "downloads": 389, + "downloads": 382, "source": "Hugging Face", - "score": -0.09921413958175605, + "score": -0.10089256248113701, "first_commit": "2023-09-03 22:30:23", "latest_commit": "2024-03-24 05:54:56", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This repository contains some GGUF quantizations of the VNTL Gemma 2 27B model.", - "url": "https://huggingface.co./lmg-anon/vntl-gemma2-27b-gguf", - "project_name": "vntl-gemma2-27b-gguf", - "downloads": 388, - "source": "Hugging Face", - "score": -0.09922827970469761, - "first_commit": "2024-07-07 00:28:06", - "latest_commit": "2024-07-08 16:13:54", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.77 }, { "description": "ArrowPro-7B-KillerWhale-gguf DataPilotさんが公開しているArrowPro-7B-KillerWhaleのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ArrowPro-7B-KillerWhale-gguf", "project_name": "ArrowPro-7B-KillerWhale-gguf", - "downloads": 384, + "downloads": 378, "source": "Hugging Face", - "score": -0.09928484019646383, + "score": -0.10095099431082896, "first_commit": "2024-05-29 15:06:55", "latest_commit": "2024-05-29 15:53:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Mistral-7B-Instruct-v0.3-gguf mistralaiさんが公開しているMistral-7B-Instruct-v0.3のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Mistral-7B-Instruct-v0.3-gguf", - "project_name": "Mistral-7B-Instruct-v0.3-gguf", - "downloads": 379, + "description": "BERT large Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char-v2", + "project_name": "bert-large-japanese-char-v2", + "downloads": 378, "source": "Hugging Face", - "score": -0.09935554081117161, - "first_commit": "2024-05-23 14:44:25", - "latest_commit": "2024-05-23 15:58:46", + "score": -0.10095099431082896, + "first_commit": "2023-05-19 00:48:06", + "latest_commit": "2023-05-19 00:54:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "llm-japanese-dataset LLM構築用の日本語インストラクション(チャット)データセット 主に,英語で構築されたLLMモデルなどに対して,チャット(Instruction)応答タスクに関してLoRAなどでチューニングするために使用できます. ", - "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset", - "project_name": "llm-japanese-dataset", - "downloads": 379, + "description": "whisper-large-v2-japanese-5k-steps This model is a fine-tuned version of openai/whisper-large-v2 on the Japanese CommonVoice dataset (v11)..", + "url": "https://huggingface.co./clu-ling/whisper-large-v2-japanese-5k-steps", + "project_name": "whisper-large-v2-japanese-5k-steps", + "downloads": 377, "source": "Hugging Face", - "score": -0.09935554081117161, - "first_commit": "2023-04-30 06:13:24", - "latest_commit": "2024-01-18 13:42:50", + "score": -0.10096560226825194, + "first_commit": "2023-01-28 22:14:29", + "latest_commit": "2023-03-03 21:11:39", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressive-v2の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-v2-GGUF", - "project_name": "Ninja-v1-RP-expressive-v2-GGUF", - "downloads": 376, + "description": "Mistral-7B-Instruct-v0.3-gguf mistralaiさんが公開しているMistral-7B-Instruct-v0.3のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Mistral-7B-Instruct-v0.3-gguf", + "project_name": "Mistral-7B-Instruct-v0.3-gguf", + "downloads": 373, "source": "Hugging Face", - "score": -0.09939796117999627, - "first_commit": "2024-05-26 06:09:57", - "latest_commit": "2024-05-26 15:22:01", + "score": -0.10102403409794389, + "first_commit": "2024-05-23 14:44:25", + "latest_commit": "2024-05-23 15:58:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.25 }, { - "description": "nlp-waseda/roberta-large-japanese-seq512 Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100 with the maximum sequence length of 512.", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese-seq512", - "project_name": "roberta-large-japanese-seq512", - "downloads": 367, + "description": "Oumuamua-7b-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP", + "project_name": "Oumuamua-7b-RP", + "downloads": 372, "source": "Hugging Face", - "score": -0.09952522228647029, - "first_commit": "2022-06-13 09:46:45", - "latest_commit": "2022-10-21 14:49:40", + "score": -0.10103864205536688, + "first_commit": "2024-06-23 12:30:16", + "latest_commit": "2024-06-23 17:06:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { "description": "Dataset.", "url": "https://huggingface.co./datasets/hpprc/jsick", "project_name": "jsick", - "downloads": 365, + "downloads": 357, "source": "Hugging Face", - "score": -0.0995535025323534, + "score": -0.10125776141671165, "first_commit": "2023-04-08 16:02:06", "latest_commit": "2023-04-11 15:18:09", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-title-generation", - "project_name": "t5-base-japanese-title-generation", - "downloads": 364, - "source": "Hugging Face", - "score": -0.09956764265529496, - "first_commit": "2021-04-04 06:57:18", - "latest_commit": "2022-02-21 13:38:09", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Japanese GPT2 Lyric Model Model description", - "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-small", - "project_name": "gpt2-japanese-lyric-small", - "downloads": 359, + "description": "bert-base-japanese-wikipedia-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-wikipedia-ud-head", + "project_name": "bert-base-japanese-wikipedia-ud-head", + "downloads": 349, "source": "Hugging Face", - "score": -0.09963834327000273, - "first_commit": "2022-04-21 04:25:18", - "latest_commit": "2023-10-23 12:46:36", + "score": -0.10137462507609554, + "first_commit": "2022-06-20 21:58:53", + "latest_commit": "2023-03-04 20:16:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "BERT large Japanese (character-level tokenization with whole word masking, CC-100 and jawiki-20230102)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char-v2", - "project_name": "bert-large-japanese-char-v2", - "downloads": 358, + "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてQAタスクに用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-QAe", + "project_name": "deberta-v2-base-japanese-finetuned-QAe", + "downloads": 344, "source": "Hugging Face", - "score": -0.09965248339294429, - "first_commit": "2023-05-19 00:48:06", - "latest_commit": "2023-05-19 00:54:57", + "score": -0.10144766486321047, + "first_commit": "2023-01-09 11:59:13", + "latest_commit": "2023-03-27 02:43:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.112 }, { - "description": "bert-base-japanese-wikipedia-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-wikipedia-ud-head", - "project_name": "bert-base-japanese-wikipedia-ud-head", - "downloads": 358, + "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", + "url": "https://huggingface.co./studio-ousia/luke-japanese-base-lite", + "project_name": "luke-japanese-base-lite", + "downloads": 342, "source": "Hugging Face", - "score": -0.09965248339294429, - "first_commit": "2022-06-20 21:58:53", - "latest_commit": "2023-03-04 20:16:55", + "score": -0.10147688077805643, + "first_commit": "2022-10-25 09:27:16", + "latest_commit": "2022-11-09 15:22:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "RetrievaBERT Model The RetrievaBERT is the pre-trained Transformer Encoder using Megatron-LM.", - "url": "https://huggingface.co./retrieva-jp/bert-1.3b", - "project_name": "bert-1.3b", - "downloads": 353, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-large-short", + "project_name": "t5-large-short", + "downloads": 340, "source": "Hugging Face", - "score": -0.09972318400765207, - "first_commit": "2024-06-25 06:18:24", - "latest_commit": "2024-07-09 05:36:08", + "score": -0.1015060966929024, + "first_commit": "2023-04-26 08:18:58", + "latest_commit": "2023-05-10 10:00:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "XLNet-japanese Model description This model require Mecab and senetencepiece with XLNetTokenizer.", - "url": "https://huggingface.co./hajime9652/xlnet-japanese", - "project_name": "xlnet-japanese", - "downloads": 351, + "description": "概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", + "url": "https://huggingface.co./DataPilot/Llama3.1-ArrowSE-v0.4", + "project_name": "Llama3.1-ArrowSE-v0.4", + "downloads": 339, "source": "Hugging Face", - "score": -0.09975146425353518, - "first_commit": "2021-04-01 03:12:11", - "latest_commit": "2023-01-05 04:28:36", + "score": -0.1015207046503254, + "first_commit": "2024-07-24 07:37:16", + "latest_commit": "2024-07-24 12:00:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Model Card for Japanese character-level DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-char-wwm", - "project_name": "deberta-v2-base-japanese-char-wwm", - "downloads": 349, + "description": "fio-base-japanese-v0.1 日本語版は近日公開予定です(日本語を勉強中なので、間違いはご容赦ください!", + "url": "https://huggingface.co./bclavie/fio-base-japanese-v0.1", + "project_name": "fio-base-japanese-v0.1", + "downloads": 339, "source": "Hugging Face", - "score": -0.09977974449941829, - "first_commit": "2023-01-18 13:55:30", - "latest_commit": "2023-03-26 03:32:27", + "score": -0.1015207046503254, + "first_commit": "2023-12-18 11:01:07", + "latest_commit": "2023-12-19 10:28:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-large-short", - "project_name": "t5-large-short", - "downloads": 349, + "description": "ku-nlp/roberta-base-japanese-char-wwm Model description This is a Japanese RoBERTa base model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./ku-nlp/roberta-base-japanese-char-wwm", + "project_name": "roberta-base-japanese-char-wwm", + "downloads": 338, "source": "Hugging Face", - "score": -0.09977974449941829, - "first_commit": "2023-04-26 08:18:58", - "latest_commit": "2023-05-10 10:00:54", + "score": -0.10153531260774838, + "first_commit": "2022-09-20 05:07:34", + "latest_commit": "2023-03-20 08:05:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.1 }, { "description": "Model card for model ID", "url": "https://huggingface.co./retrieva-jp/t5-large-long", "project_name": "t5-large-long", - "downloads": 348, + "downloads": 338, "source": "Hugging Face", - "score": -0.09979388462235984, + "score": -0.10153531260774838, "first_commit": "2023-04-26 08:33:12", "latest_commit": "2023-05-10 10:00:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてQAタスクに用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-QAe", - "project_name": "deberta-v2-base-japanese-finetuned-QAe", - "downloads": 342, + "description": "llm-japanese-dataset LLM構築用の日本語インストラクション(チャット)データセット 主に,英語で構築されたLLMモデルなどに対して,チャット(Instruction)応答タスクに関してLoRAなどでチューニングするために使用できます. ", + "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset", + "project_name": "llm-japanese-dataset", + "downloads": 337, "source": "Hugging Face", - "score": -0.09987872536000918, - "first_commit": "2023-01-09 11:59:13", - "latest_commit": "2023-03-27 02:43:35", + "score": -0.10154992056517137, + "first_commit": "2023-04-30 06:13:24", + "latest_commit": "2024-01-18 13:42:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Oumuamua-7b-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP", - "project_name": "Oumuamua-7b-RP", - "downloads": 337, + "description": "RetrievaBERT Model The RetrievaBERT is the pre-trained Transformer Encoder using Megatron-LM.", + "url": "https://huggingface.co./retrieva-jp/bert-1.3b", + "project_name": "bert-1.3b", + "downloads": 336, "source": "Hugging Face", - "score": -0.09994942597471695, - "first_commit": "2024-06-23 12:30:16", - "latest_commit": "2024-06-23 17:06:53", + "score": -0.10156452852259434, + "first_commit": "2024-06-25 06:18:24", + "latest_commit": "2024-07-09 05:36:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.3 }, { - "description": "ku-nlp/roberta-base-japanese-char-wwm Model description This is a Japanese RoBERTa base model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./ku-nlp/roberta-base-japanese-char-wwm", - "project_name": "roberta-base-japanese-char-wwm", - "downloads": 329, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-base-1_6b", + "project_name": "japanese-stablelm-2-base-1_6b", + "downloads": 334, "source": "Hugging Face", - "score": -0.1000625469582494, - "first_commit": "2022-09-20 05:07:34", - "latest_commit": "2023-03-20 08:05:45", + "score": -0.10159374443744032, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.64 }, { - "description": "luke-japanese luke-japanese is the Japanese version of LUKE (Language Understanding with Knowledge-based Embeddings), a pre-trained knowledge-enhanced contextualized representation of words and entities.", - "url": "https://huggingface.co./studio-ousia/luke-japanese-base-lite", - "project_name": "luke-japanese-base-lite", - "downloads": 325, + "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", + "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese-hiragana", + "project_name": "wav2vec2-large-xlsr-japanese-hiragana", + "downloads": 332, + "source": "Hugging Face", + "score": -0.1016229603522863, + "first_commit": "2021-06-18 07:15:24", + "latest_commit": "2023-02-08 00:36:47", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.316 + }, + { + "description": "XLNet-japanese Model description This model require Mecab and senetencepiece with XLNetTokenizer.", + "url": "https://huggingface.co./hajime9652/xlnet-japanese", + "project_name": "xlnet-japanese", + "downloads": 327, + "source": "Hugging Face", + "score": -0.10169600013940122, + "first_commit": "2021-04-01 03:12:11", + "latest_commit": "2023-01-05 04:28:36", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "rinna/japanese-wav2vec2-base Overview This is a Japanese wav2vec 2.0 Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-wav2vec2-base", + "project_name": "japanese-wav2vec2-base", + "downloads": 322, + "source": "Hugging Face", + "score": -0.10176903992651615, + "first_commit": "2024-03-06 01:07:56", + "latest_commit": "2024-07-22 08:11:46", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.095 + }, + { + "description": "HODACHI様の Llama-3-EZO-8b-Common-it をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama-3-EZO-8b-Common-it-GGUF", + "project_name": "Llama-3-EZO-8b-Common-it-GGUF", + "downloads": 321, "source": "Hugging Face", - "score": -0.10011910745001563, - "first_commit": "2022-10-25 09:27:16", - "latest_commit": "2022-11-09 15:22:22", + "score": -0.10178364788393914, + "first_commit": "2024-07-15 11:58:12", + "latest_commit": "2024-07-15 20:08:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-base - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-base StableLM", "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-base-gguf", "project_name": "stabilityai-japanese-stablelm-3b-4e1t-base-gguf", - "downloads": 325, + "downloads": 321, "source": "Hugging Face", - "score": -0.10011910745001563, + "score": -0.10178364788393914, "first_commit": "2023-11-16 10:23:21", "latest_commit": "2023-11-16 11:18:48", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "回答と回答が出てくるパラグラフを与えると質問文を生成するモデル SEE: https://github.com/sonoisa/deep-question-generation 本モデルの作成ステップ概要 SQuAD 1.1を日本語に機械翻訳し、不正なデータをクレンジング(有効なデータは約半分)。", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-question-generation", - "project_name": "t5-base-japanese-question-generation", - "downloads": 323, - "source": "Hugging Face", - "score": -0.10014738769589875, - "first_commit": "2021-04-03 14:08:55", - "latest_commit": "2022-03-11 02:50:33", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "llm-book/t5-base-long-livedoor-news-corpus 「大規模言語モデル入門」の第7章で紹介している要約生成のモデルです。 ", - "url": "https://huggingface.co./llm-book/t5-base-long-livedoor-news-corpus", - "project_name": "t5-base-long-livedoor-news-corpus", - "downloads": 322, - "source": "Hugging Face", - "score": -0.1001615278188403, - "first_commit": "2023-06-27 13:32:54", - "latest_commit": "2023-07-25 13:10:36", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.8 }, { "description": "Qwen1.5-110B-Chat-gguf Qwenさんが公開しているQwen1.5-110B-Chatのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Qwen1.5-110B-Chat-gguf", "project_name": "Qwen1.5-110B-Chat-gguf", - "downloads": 318, + "downloads": 320, "source": "Hugging Face", - "score": -0.10021808831060652, + "score": -0.10179825584136212, "first_commit": "2024-04-27 19:35:48", "latest_commit": "2024-04-28 08:09:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 111.0 }, { - "description": "概要 このモデルはllama3.1-8B-instructをもとに日本語性能を高めることを目的にMergekit&ファインチューニングを用いて作成されました。 ", - "url": "https://huggingface.co./DataPilot/Llama3.1-ArrowSE-v0.4", - "project_name": "Llama3.1-ArrowSE-v0.4", - "downloads": 317, + "description": "databricks/dolly-v2-12b の学習データに使用されたdatabricks-dolly-15k.jsonl を日本語に翻訳したデータセットになります。", + "url": "https://github.com/kunishou/databricks-dolly-15k-ja", + "project_name": "databricks-dolly-15k-ja", + "stargazers_count": 81, + "source": "GitHub", + "score": -0.10180642365093301, + "first_commit": "2023-04-14 23:43:27", + "latest_commit": "2023-07-26 00:08:32", + "languages": [], + "model_or_dataset": "dataset" + }, + { + "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b-instruct", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct", + "downloads": 312, "source": "Hugging Face", - "score": -0.10023222843354808, - "first_commit": "2024-07-24 07:37:16", - "latest_commit": "2024-07-24 12:00:46", + "score": -0.101915119500746, + "first_commit": "2023-11-07 12:04:07", + "latest_commit": "2023-11-17 05:01:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "Japanese Natural Language Inference Model", - "url": "https://huggingface.co./cyberagent/xlm-roberta-large-jnli-jsick", - "project_name": "xlm-roberta-large-jnli-jsick", - "downloads": 314, + "description": "llm-book/t5-base-long-livedoor-news-corpus 「大規模言語モデル入門」の第7章で紹介している要約生成のモデルです。 ", + "url": "https://huggingface.co./llm-book/t5-base-long-livedoor-news-corpus", + "project_name": "t5-base-long-livedoor-news-corpus", + "downloads": 312, "source": "Hugging Face", - "score": -0.10027464880237275, - "first_commit": "2022-12-23 10:51:12", - "latest_commit": "2022-12-23 10:51:12", + "score": -0.101915119500746, + "first_commit": "2023-06-27 13:32:54", + "latest_commit": "2023-07-25 13:10:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "OcuteusのGGUF版です。 ", "url": "https://huggingface.co./Local-Novel-LLM-project/Ocuteus-v1-gguf", "project_name": "Ocuteus-v1-gguf", - "downloads": 305, + "downloads": 311, "source": "Hugging Face", - "score": -0.10040190990884675, + "score": -0.10192972745816899, "first_commit": "2024-05-07 09:57:49", "latest_commit": "2024-05-10 06:18:35", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Introduction Who am I: Qishen Ha", - "url": "https://huggingface.co./haqishen/Llama-3-8B-Japanese-Instruct", - "project_name": "Llama-3-8B-Japanese-Instruct", - "downloads": 305, - "source": "Hugging Face", - "score": -0.10040190990884675, - "first_commit": "2024-04-23 04:41:19", - "latest_commit": "2024-05-02 03:36:10", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k nvidia/Nemotron-4-340B-Instructを用いて作成した、約1000件・各10ターンの日本語ロールプレイの対話を収録した合成対話データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k", - "project_name": "Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k", - "downloads": 296, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-instruct-1_6b", + "project_name": "japanese-stablelm-2-instruct-1_6b", + "downloads": 309, "source": "Hugging Face", - "score": -0.10052917101532075, - "first_commit": "2024-07-03 13:21:22", - "latest_commit": "2024-07-03 13:53:20", + "score": -0.10195894337301496, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 1.64 }, { - "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b-instruct", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct", - "downloads": 294, + "description": "回答と回答が出てくるパラグラフを与えると質問文を生成するモデル SEE: https://github.com/sonoisa/deep-question-generation 本モデルの作成ステップ概要 SQuAD 1.1を日本語に機械翻訳し、不正なデータをクレンジング(有効なデータは約半分)。", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-question-generation", + "project_name": "t5-base-japanese-question-generation", + "downloads": 304, "source": "Hugging Face", - "score": -0.10055745126120386, - "first_commit": "2023-11-07 12:04:07", - "latest_commit": "2023-11-17 05:01:00", + "score": -0.10203198316012989, + "first_commit": "2021-04-03 14:08:55", + "latest_commit": "2022-03-11 02:50:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "rinna/japanese-wav2vec2-base Overview This is a Japanese wav2vec 2.0 Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-wav2vec2-base", - "project_name": "japanese-wav2vec2-base", - "downloads": 292, + "description": "Githubリポジトリstockmarkteam/ner-wikipedia-datasetで公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/ner-wikipedia-dataset", + "project_name": "ner-wikipedia-dataset", + "downloads": 296, "source": "Hugging Face", - "score": -0.10058573150708697, - "first_commit": "2024-03-06 01:07:56", - "latest_commit": "2024-07-22 08:11:46", + "score": -0.10214884681951378, + "first_commit": "2023-04-15 10:43:21", + "latest_commit": "2023-12-12 11:25:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "alabnii/jmedroberta-base-sentencepiece Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece", - "project_name": "jmedroberta-base-sentencepiece", - "downloads": 279, + "description": "Llama-3-EZO-VLM-1 Based on SakanaAI/Llama-3-EvoVLM-JP-v2, it has been enhanced for Japanese usage through additional pre-training and instruction tuning.", + "url": "https://huggingface.co./HODACHI/Llama-3-EZO-VLM-1", + "project_name": "Llama-3-EZO-VLM-1", + "downloads": 289, "source": "Hugging Face", - "score": -0.1007695531053272, - "first_commit": "2022-12-22 17:20:33", - "latest_commit": "2023-03-21 23:57:37", + "score": -0.10225110252147468, + "first_commit": "2024-08-03 17:15:09", + "latest_commit": "2024-08-04 23:20:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.48 }, { "description": "stockmark-100b-gguf stockmarkさんが公開しているstockmark-100bのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/stockmark-100b-gguf", "project_name": "stockmark-100b-gguf", - "downloads": 279, + "downloads": 287, "source": "Hugging Face", - "score": -0.1007695531053272, + "score": -0.10228031843632064, "first_commit": "2024-05-17 12:45:56", "latest_commit": "2024-05-18 09:14:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 96.2 }, { - "description": "Evaluation on MIRACL japanese These models don't train on the MIRACL training data.", - "url": "https://huggingface.co./aken12/splade-japanese-v3", - "project_name": "splade-japanese-v3", - "downloads": 277, + "description": "Introduction Who am I: Qishen Ha", + "url": "https://huggingface.co./haqishen/Llama-3-8B-Japanese-Instruct", + "project_name": "Llama-3-8B-Japanese-Instruct", + "downloads": 283, "source": "Hugging Face", - "score": -0.10079783335121031, - "first_commit": "2024-03-29 12:35:47", - "latest_commit": "2024-05-22 02:59:37", + "score": -0.10233875026601259, + "first_commit": "2024-04-23 04:41:19", + "latest_commit": "2024-05-02 03:36:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "mathstral-7B-v0.1-gguf mistralaiさんが公開しているmathstral-7B-v0.1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/mathstral-7B-v0.1-gguf", - "project_name": "mathstral-7B-v0.1-gguf", - "downloads": 276, + "description": "JaQuAD is developed to provide a SQuAD-like QA dataset in Japanese.", + "url": "https://huggingface.co./datasets/SkelterLabsInc/JaQuAD", + "project_name": "JaQuAD", + "downloads": 282, "source": "Hugging Face", - "score": -0.10081197347415187, - "first_commit": "2024-07-17 17:49:56", - "latest_commit": "2024-07-17 18:54:27", + "score": -0.10235335822343557, + "first_commit": "2022-01-26 01:34:38", + "latest_commit": "2022-10-25 09:06:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-instruct - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-instruct StableLM", - "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", - "project_name": "stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", - "downloads": 275, + "description": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、約10000件の日本語instruction tuning用データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", + "project_name": "Synthetic-JP-Conversations-Magpie-Nemotron-4-10k", + "downloads": 280, "source": "Hugging Face", - "score": -0.10082611359709343, - "first_commit": "2023-11-16 10:25:20", - "latest_commit": "2023-11-16 12:53:33", + "score": -0.10238257413828154, + "first_commit": "2024-07-05 13:53:45", + "latest_commit": "2024-07-05 13:57:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Japanese to Korean translator Japanese to Korean translator model based on EncoderDecoderModel(bert-japanese+kogpt2)", - "url": "https://huggingface.co./sappho192/aihub-ja-ko-translator", - "project_name": "aihub-ja-ko-translator", - "downloads": 275, + "description": "SpeechT5 (TTS task) for Japanese SpeechT5 model fine-tuned for Japanese speech synthesis (text-to-speech)", + "url": "https://huggingface.co./esnya/japanese_speecht5_tts", + "project_name": "japanese_speecht5_tts", + "downloads": 277, "source": "Hugging Face", - "score": -0.10082611359709343, - "first_commit": "2024-02-05 00:51:47", - "latest_commit": "2024-06-28 06:38:39", + "score": -0.10242639801055049, + "first_commit": "2023-08-08 18:37:40", + "latest_commit": "2023-08-09 09:25:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.14400000000000002 + }, + { + "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", + "url": "https://huggingface.co./if001/tiny_mixtral_ja", + "project_name": "tiny_mixtral_ja", + "downloads": 276, + "source": "Hugging Face", + "score": -0.10244100596797348, + "first_commit": "2024-01-22 15:02:21", + "latest_commit": "2024-01-23 00:42:05", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.276 + }, + { + "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-title-generation", + "project_name": "t5-base-japanese-title-generation", + "downloads": 273, + "source": "Hugging Face", + "score": -0.10248482984024244, + "first_commit": "2021-04-04 06:57:18", + "latest_commit": "2022-02-21 13:38:09", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Chat & support: TheBloke's Discord server Want to contribute?", "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GGUF", "project_name": "japanese-stablelm-base-beta-70B-GGUF", - "downloads": 274, + "downloads": 273, "source": "Hugging Face", - "score": -0.10084025372003498, + "score": -0.10248482984024244, "first_commit": "2023-11-06 11:33:47", "latest_commit": "2023-11-06 12:14:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.0 }, { - "description": "JaQuAD is developed to provide a SQuAD-like QA dataset in Japanese.", - "url": "https://huggingface.co./datasets/SkelterLabsInc/JaQuAD", - "project_name": "JaQuAD", - "downloads": 274, + "description": "I'm constantly enhancing these model descriptions to provide you with the most relevant and comprehensive information japanese-stablelm-3b-4e1t-instruct - GGUF Model creator: stabilityai Original model: japanese-stablelm-3b-4e1t-instruct StableLM", + "url": "https://huggingface.co./maddes8cht/stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", + "project_name": "stabilityai-japanese-stablelm-3b-4e1t-instruct-gguf", + "downloads": 270, "source": "Hugging Face", - "score": -0.10084025372003498, - "first_commit": "2022-01-26 01:34:38", - "latest_commit": "2022-10-25 09:06:40", + "score": -0.1025286537125114, + "first_commit": "2023-11-16 10:25:20", + "latest_commit": "2023-11-16 12:53:33", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 2.8 + }, + { + "description": "ichikara-instruction (Non Commercial) LLMのための日本語インストラクションデータ 公開ページ 公開ページより、 本データに関して、言語処理学会第30回年次大会において発表を行います。", + "url": "https://huggingface.co./datasets/p1atdev/ichikara-instruction", + "project_name": "ichikara-instruction", + "downloads": 268, + "source": "Hugging Face", + "score": -0.10255786962735737, + "first_commit": "2024-03-12 07:09:56", + "latest_commit": "2024-03-12 08:36:40", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "ELYZA-japanese-CodeLlama-7b-gguf ELYZAさんが公開しているELYZA-japanese-CodeLlama-7b-instructのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-gguf", "project_name": "ELYZA-japanese-CodeLlama-7b-gguf", - "downloads": 273, + "downloads": 263, "source": "Hugging Face", - "score": -0.10085439384297654, + "score": -0.1026309094144723, "first_commit": "2023-11-15 09:53:42", "latest_commit": "2023-11-16 14:28:03", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama-3-EZO-VLM-1 Based on SakanaAI/Llama-3-EvoVLM-JP-v2, it has been enhanced for Japanese usage through additional pre-training and instruction tuning.", - "url": "https://huggingface.co./HODACHI/Llama-3-EZO-VLM-1", - "project_name": "Llama-3-EZO-VLM-1", - "downloads": 273, - "source": "Hugging Face", - "score": -0.10085439384297654, - "first_commit": "2024-08-03 17:15:09", - "latest_commit": "2024-08-04 23:20:43", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { "description": "Stanza model for Japanese (ja)", "url": "https://huggingface.co./stanfordnlp/stanza-ja", "project_name": "stanza-ja", - "downloads": 267, + "downloads": 263, "source": "Hugging Face", - "score": -0.10093923458062587, + "score": -0.1026309094144723, "first_commit": "2021-09-07 12:05:41", "latest_commit": "2024-07-31 05:09:43", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1.", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "downloads": 259, - "source": "Hugging Face", - "score": -0.10105235556415831, - "first_commit": "2024-01-28 16:13:26", - "latest_commit": "2024-01-28 16:24:30", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This is a model for named entity recognition of Japanese medical documents.", - "url": "https://huggingface.co./sociocom/MedNERN-CR-JA", - "project_name": "MedNERN-CR-JA", - "downloads": 257, - "source": "Hugging Face", - "score": -0.10108063581004142, - "first_commit": "2023-04-13 08:25:56", - "latest_commit": "2024-02-26 13:53:06", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "SpeechT5 (TTS task) for Japanese SpeechT5 model fine-tuned for Japanese speech synthesis (text-to-speech)", - "url": "https://huggingface.co./esnya/japanese_speecht5_tts", - "project_name": "japanese_speecht5_tts", - "downloads": 256, - "source": "Hugging Face", - "score": -0.10109477593298298, - "first_commit": "2023-08-08 18:37:40", - "latest_commit": "2023-08-09 09:25:38", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-diffusion-xl", - "project_name": "japanese-stable-diffusion-xl", - "downloads": 255, + "description": "Japanese Natural Language Inference Model", + "url": "https://huggingface.co./cyberagent/xlm-roberta-large-jnli-jsick", + "project_name": "xlm-roberta-large-jnli-jsick", + "downloads": 261, "source": "Hugging Face", - "score": -0.10110891605592454, - "first_commit": null, - "latest_commit": null, + "score": -0.10266012532931827, + "first_commit": "2022-12-23 10:51:12", + "latest_commit": "2022-12-23 10:51:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "[Llama-3-EZO model card]", - "url": "https://huggingface.co./HODACHI/Llama-3-EZO-8b-Common-it", - "project_name": "Llama-3-EZO-8b-Common-it", - "downloads": 253, + "description": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-A-v1-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-A-v1-7B-gguf", + "project_name": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf", + "downloads": 258, "source": "Hugging Face", - "score": -0.10113719630180765, - "first_commit": "2024-07-13 06:42:31", - "latest_commit": "2024-08-04 06:16:37", + "score": -0.10270394920158722, + "first_commit": "2024-03-21 13:25:41", + "latest_commit": "2024-03-21 14:48:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stablelm-2-instruct-1_6b", - "project_name": "japanese-stablelm-2-instruct-1_6b", - "downloads": 252, + "description": "This is a model for named entity recognition of Japanese medical documents.", + "url": "https://huggingface.co./sociocom/MedNERN-CR-JA", + "project_name": "MedNERN-CR-JA", + "downloads": 254, "source": "Hugging Face", - "score": -0.1011513364247492, - "first_commit": null, - "latest_commit": null, + "score": -0.10276238103127917, + "first_commit": "2023-04-13 08:25:56", + "latest_commit": "2024-02-26 13:53:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.11 }, { - "description": "MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1.", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", - "downloads": 246, + "description": "Evaluation on MIRACL japanese These models don't train on the MIRACL training data.", + "url": "https://huggingface.co./aken12/splade-japanese-v3", + "project_name": "splade-japanese-v3", + "downloads": 252, "source": "Hugging Face", - "score": -0.10123617716239854, - "first_commit": "2024-01-26 06:13:55", - "latest_commit": "2024-01-26 06:36:22", + "score": -0.10279159694612514, + "first_commit": "2024-03-29 12:35:47", + "latest_commit": "2024-05-22 02:59:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "What’s this?", - "url": "https://huggingface.co./globis-university/deberta-v3-japanese-base", - "project_name": "deberta-v3-japanese-base", - "downloads": 246, + "description": "[Llama-3-EZO model card]", + "url": "https://huggingface.co./HODACHI/Llama-3-EZO-8b-Common-it", + "project_name": "Llama-3-EZO-8b-Common-it", + "downloads": 250, "source": "Hugging Face", - "score": -0.10123617716239854, - "first_commit": "2023-09-21 16:19:31", - "latest_commit": "2024-07-05 05:49:13", + "score": -0.10282081286097111, + "first_commit": "2024-07-13 06:42:31", + "latest_commit": "2024-08-04 06:16:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "DataPilot様の ArrowPro-7B-KUJIRA をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/ArrowPro-7B-KUJIRA-GGUF", "project_name": "ArrowPro-7B-KUJIRA-GGUF", - "downloads": 242, + "downloads": 244, "source": "Hugging Face", - "score": -0.10129273765416476, + "score": -0.10290846060550903, "first_commit": "2024-05-09 13:34:05", "latest_commit": "2024-05-09 23:32:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Githubリポジトリstockmarkteam/ner-wikipedia-datasetで公開されているデータセットを利用しています。", - "url": "https://huggingface.co./datasets/llm-book/ner-wikipedia-dataset", - "project_name": "ner-wikipedia-dataset", + "description": "MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1.", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", "downloads": 242, "source": "Hugging Face", - "score": -0.10129273765416476, - "first_commit": "2023-04-15 10:43:21", - "latest_commit": "2023-12-12 11:25:51", + "score": -0.102937676520355, + "first_commit": "2024-01-28 16:13:26", + "latest_commit": "2024-01-28 16:24:30", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stable-diffusion-xl", + "project_name": "japanese-stable-diffusion-xl", + "downloads": 239, + "source": "Hugging Face", + "score": -0.10298150039262395, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "t5-base-japanese-web (with Byte-fallback, 32K) Description megagonlabs/t5-base-japanese-web is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web", "project_name": "t5-base-japanese-web", - "downloads": 240, + "downloads": 237, "source": "Hugging Face", - "score": -0.10132101790004787, + "score": -0.10301071630746993, "first_commit": "2021-08-24 04:41:45", "latest_commit": "2021-09-06 19:32:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "alpaca-guanaco-japanese-gpt-1b 1.3Bパラメータの日本語GPTモデルを使用した対話AIです。", - "url": "https://huggingface.co./inu-ai/alpaca-guanaco-japanese-gpt-1b", - "project_name": "alpaca-guanaco-japanese-gpt-1b", - "downloads": 239, - "source": "Hugging Face", - "score": -0.10133515802298942, - "first_commit": "2023-04-12 00:18:29", - "latest_commit": "2023-04-13 10:25:48", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B", - "project_name": "Fugaku-LLM-13B", - "downloads": 237, + "description": "What’s this?", + "url": "https://huggingface.co./globis-university/deberta-v3-japanese-base", + "project_name": "deberta-v3-japanese-base", + "downloads": 235, "source": "Hugging Face", - "score": -0.10136343826887254, - "first_commit": null, - "latest_commit": null, + "score": -0.10303993222231589, + "first_commit": "2023-09-21 16:19:31", + "latest_commit": "2024-07-05 05:49:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-A-v1-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-A-v1-7B-gguf", - "project_name": "SakanaAI-EvoLLM-JP-A-v1-7B-gguf", + "description": "shisa-7b-v1-gguf augmxntさんが公開しているshisa-7b-v1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/shisa-7b-v1-gguf", + "project_name": "shisa-7b-v1-gguf", "downloads": 235, "source": "Hugging Face", - "score": -0.10139171851475565, - "first_commit": "2024-03-21 13:25:41", - "latest_commit": "2024-03-21 14:48:28", + "score": -0.10303993222231589, + "first_commit": "2023-12-09 14:02:20", + "latest_commit": "2023-12-10 12:24:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.96 }, { - "description": "DataPilot様の Llama3-ArrowSE-8B-v0.3 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Llama3-ArrowSE-8B-v0.3-GGUF", - "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", - "downloads": 233, + "description": "Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k nvidia/Nemotron-4-340B-Instructを用いて作成した、約1000件・各10ターンの日本語ロールプレイの対話を収録した合成対話データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k", + "project_name": "Synthetic-JP-10-Turns-Roleplay-Dialogues-Nemotron-4-1k", + "downloads": 232, "source": "Hugging Face", - "score": -0.10141999876063877, - "first_commit": "2024-07-07 07:53:32", - "latest_commit": "2024-07-07 13:40:26", + "score": -0.10308375609458485, + "first_commit": "2024-07-03 13:21:22", + "latest_commit": "2024-07-03 13:53:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "QuantFactory/llama-3-youko-8b-GGUF", - "url": "https://huggingface.co./QuantFactory/llama-3-youko-8b-GGUF", - "project_name": "llama-3-youko-8b-GGUF", - "downloads": 231, + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressiveの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-GGUF", + "project_name": "Ninja-v1-RP-expressive-GGUF", + "downloads": 229, "source": "Hugging Face", - "score": -0.10144827900652188, - "first_commit": "2024-06-24 05:04:12", - "latest_commit": "2024-06-24 06:35:40", + "score": -0.1031275799668538, + "first_commit": "2024-05-21 12:16:42", + "latest_commit": "2024-05-24 15:11:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for VecTeus-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 VecTeus has the following changes compared to Mistral-7B-v0.1.", - "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-v1", - "project_name": "Vecteus-v1", - "downloads": 231, + "description": "Umievo-itr012-Gleipnir-7B-GGUF", + "url": "https://huggingface.co./QuantFactory/Umievo-itr012-Gleipnir-7B-GGUF", + "project_name": "Umievo-itr012-Gleipnir-7B-GGUF", + "downloads": 228, "source": "Hugging Face", - "score": -0.10144827900652188, - "first_commit": "2024-05-01 02:08:01", - "latest_commit": "2024-05-04 04:07:22", + "score": -0.10314218792427679, + "first_commit": "2024-06-09 03:48:10", + "latest_commit": "2024-06-09 13:12:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "shisa-7b-v1-gguf augmxntさんが公開しているshisa-7b-v1のggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/shisa-7b-v1-gguf", - "project_name": "shisa-7b-v1-gguf", - "downloads": 230, + "description": "MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF Model creator: MaziyarPanahi Original model: MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 Description MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF contains GGUF format model files for MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1.", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1-GGUF", + "downloads": 228, "source": "Hugging Face", - "score": -0.10146241912946344, - "first_commit": "2023-12-09 14:02:20", - "latest_commit": "2023-12-10 12:24:25", + "score": -0.10314218792427679, + "first_commit": "2024-01-26 06:13:55", + "latest_commit": "2024-01-26 06:36:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Ninja-v1-RP-expressiveの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-GGUF", - "project_name": "Ninja-v1-RP-expressive-GGUF", - "downloads": 228, + "description": "Kendamarron/jimba-wiki-instruction-calm3 grapevine-AI/CALM3-22B-Chat-GGUFのQ4_K_Mを使った合成instructionデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/jimba-wiki-instruction-calm3", + "project_name": "jimba-wiki-instruction-calm3", + "downloads": 227, "source": "Hugging Face", - "score": -0.10149069937534655, - "first_commit": "2024-05-21 12:16:42", - "latest_commit": "2024-05-24 15:11:25", + "score": -0.10315679588169978, + "first_commit": "2024-07-09 22:18:35", + "latest_commit": "2024-07-20 12:57:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "このデータセットについて このデータは、日本の官公庁のWebサイトに掲載されている「よくある質問」を手作業で抽出し、インストラクション用のデータセットとしたものです。 ", - "url": "https://huggingface.co./datasets/matsuxr/JaGovFaqs-22k", - "project_name": "JaGovFaqs-22k", - "downloads": 228, + "description": "ElanMT ElanMT-BT-en-ja is a English to Japanese translation model developed by ELAN MITSUA Project / Abstract Engine.", + "url": "https://huggingface.co./Mitsua/elan-mt-bt-en-ja", + "project_name": "elan-mt-bt-en-ja", + "downloads": 226, "source": "Hugging Face", - "score": -0.10149069937534655, - "first_commit": "2023-12-31 13:58:41", - "latest_commit": "2024-02-29 02:51:20", + "score": -0.10317140383912277, + "first_commit": "2024-05-20 01:51:18", + "latest_commit": "2024-05-20 01:53:38", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.0606 }, { - "description": "Synthetic-JP-Coding-Dataset-Magpie-Nemotron-4-10k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、約10000件の日本語のコーディング用対話データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-Coding-Dataset-Magpie-Nemotron-4-10k", - "project_name": "Synthetic-JP-Coding-Dataset-Magpie-Nemotron-4-10k", - "downloads": 228, + "description": "Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k Magpieの手法をnvidia/Nemotron-4-340B-Instructに対して適用し作成した、20000件の日⇔英翻訳データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k", + "project_name": "Synthetic-JP-EN-Translation-Dataset-Magpie-Nemotron-4-20k", + "downloads": 224, "source": "Hugging Face", - "score": -0.10149069937534655, - "first_commit": "2024-07-06 06:53:27", - "latest_commit": "2024-07-06 06:57:57", + "score": -0.10320061975396874, + "first_commit": "2024-07-07 11:08:34", + "latest_commit": "2024-07-07 11:13:47", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "SakanaAI-EvoLLM-JP-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-v1-7Bのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-v1-7B-gguf", - "project_name": "SakanaAI-EvoLLM-JP-v1-7B-gguf", + "description": "オリジナルのサイトと同じものを使用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/livedoor-news-corpus", + "project_name": "livedoor-news-corpus", "downloads": 220, "source": "Hugging Face", - "score": -0.101603820358879, - "first_commit": "2024-03-21 13:04:25", - "latest_commit": "2024-03-21 14:41:04", + "score": -0.10325905158366068, + "first_commit": "2023-06-21 07:16:52", + "latest_commit": "2023-12-12 11:19:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Deepreneur-blue-lizard-gguf Deepreneurさんが公開しているblue-lizardのggufフォーマット変換版です。 ", - "url": "https://huggingface.co./mmnga/Deepreneur-blue-lizard-gguf", - "project_name": "Deepreneur-blue-lizard-gguf", - "downloads": 216, + "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for VecTeus-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 VecTeus has the following changes compared to Mistral-7B-v0.1.", + "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-v1", + "project_name": "Vecteus-v1", + "downloads": 219, "source": "Hugging Face", - "score": -0.10166038085064522, - "first_commit": "2024-02-13 15:18:15", - "latest_commit": "2024-02-13 16:26:26", + "score": -0.10327365954108367, + "first_commit": "2024-05-01 02:08:01", + "latest_commit": "2024-05-04 04:07:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Model Card for Tanrei/GPTSAN-japanese General-purpose Swich transformer based Japanese language model GPTSAN has some unique features.", - "url": "https://huggingface.co./Tanrei/GPTSAN-japanese", - "project_name": "GPTSAN-japanese", - "downloads": 215, + "description": "DataPilot様の Llama3-ArrowSE-8B-v0.3 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Llama3-ArrowSE-8B-v0.3-GGUF", + "project_name": "Llama3-ArrowSE-8B-v0.3-GGUF", + "downloads": 218, "source": "Hugging Face", - "score": -0.10167452097358677, - "first_commit": "2023-01-06 05:41:12", - "latest_commit": "2023-04-21 19:04:49", + "score": -0.10328826749850666, + "first_commit": "2024-07-07 07:53:32", + "latest_commit": "2024-07-07 13:40:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "License:CreativeML Open RAIL-M Additional Copyright: sazyou_roukaku (TwitterID @sazyou_roukaku) as of June 25, 2023 このモデルは『CreativeML Open RAIL-M』でLicenseそのものに変更はありません。 ", "url": "https://huggingface.co./sazyou-roukaku/LittleStepMix", "project_name": "LittleStepMix", - "downloads": 214, + "downloads": 216, "source": "Hugging Face", - "score": -0.10168866109652833, + "score": -0.10331748341335262, "first_commit": "2023-06-25 06:57:42", "latest_commit": "2023-07-04 10:47:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "オリジナルのサイトと同じものを使用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/livedoor-news-corpus", - "project_name": "livedoor-news-corpus", - "downloads": 214, + "description": "mathstral-7B-v0.1-gguf mistralaiさんが公開しているmathstral-7B-v0.1のggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/mathstral-7B-v0.1-gguf", + "project_name": "mathstral-7B-v0.1-gguf", + "downloads": 215, "source": "Hugging Face", - "score": -0.10168866109652833, - "first_commit": "2023-06-21 07:16:52", - "latest_commit": "2023-12-12 11:19:43", + "score": -0.1033320913707756, + "first_commit": "2024-07-17 17:49:56", + "latest_commit": "2024-07-17 18:54:27", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.25 }, { - "description": "Vecteus-V2-7B このモデルは、ベクトルマージなどを用い作成された高性能ベースモデルです。 ", - "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-V2-7B", - "project_name": "Vecteus-V2-7B", + "description": "SakanaAI-EvoLLM-JP-v1-7B-gguf SakanaAIさんが公開しているEvoLLM-JP-v1-7Bのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/SakanaAI-EvoLLM-JP-v1-7B-gguf", + "project_name": "SakanaAI-EvoLLM-JP-v1-7B-gguf", + "downloads": 213, + "source": "Hugging Face", + "score": -0.10336130728562158, + "first_commit": "2024-03-21 13:04:25", + "latest_commit": "2024-03-21 14:41:04", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "Deepreneur-blue-lizard-gguf Deepreneurさんが公開しているblue-lizardのggufフォーマット変換版です。 ", + "url": "https://huggingface.co./mmnga/Deepreneur-blue-lizard-gguf", + "project_name": "Deepreneur-blue-lizard-gguf", "downloads": 212, "source": "Hugging Face", - "score": -0.10171694134241144, - "first_commit": "2024-06-16 03:51:43", - "latest_commit": "2024-07-06 13:39:41", + "score": -0.10337591524304457, + "first_commit": "2024-02-13 15:18:15", + "latest_commit": "2024-02-13 16:26:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { "description": "Local-Novel-LLM-project様の Assistance をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/Assistance-7B-GGUF", "project_name": "Assistance-7B-GGUF", - "downloads": 211, + "downloads": 212, "source": "Hugging Face", - "score": -0.101731081465353, + "score": -0.10337591524304457, "first_commit": "2024-05-03 12:16:29", "latest_commit": "2024-05-04 07:48:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Int4", - "project_name": "Orion-14B-Chat-Int4", + "description": "Model Card for Tanrei/GPTSAN-japanese General-purpose Swich transformer based Japanese language model GPTSAN has some unique features.", + "url": "https://huggingface.co./Tanrei/GPTSAN-japanese", + "project_name": "GPTSAN-japanese", "downloads": 211, "source": "Hugging Face", - "score": -0.101731081465353, - "first_commit": "2024-01-18 09:54:07", - "latest_commit": "2024-03-26 10:04:46", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "databricks/dolly-v2-12b の学習データに使用されたdatabricks-dolly-15k.jsonl を日本語に翻訳したデータセットになります。", - "url": "https://github.com/kunishou/databricks-dolly-15k-ja", - "project_name": "databricks-dolly-15k-ja", - "stargazers_count": 81, - "source": "GitHub", - "score": -0.1017677415468135, - "first_commit": "2023-04-14 23:43:27", - "latest_commit": "2023-07-26 00:08:32", + "score": -0.10339052320046754, + "first_commit": "2023-01-06 05:41:12", + "latest_commit": "2023-04-21 19:04:49", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 2.78 }, { - "description": "更新履歴 2023年5月7日 「oasst1-89k-ja」データセットを追加して対話システムに対応しました。", - "url": "https://huggingface.co./inu-ai/dolly-japanese-gpt-1b", - "project_name": "dolly-japanese-gpt-1b", - "downloads": 208, + "description": "What’s this?", + "url": "https://huggingface.co./globis-university/deberta-v3-japanese-xsmall", + "project_name": "deberta-v3-japanese-xsmall", + "downloads": 210, "source": "Hugging Face", - "score": -0.10177350183417766, - "first_commit": "2023-04-13 22:46:07", - "latest_commit": "2023-08-01 07:55:27", + "score": -0.10340513115789053, + "first_commit": "2023-09-21 16:12:53", + "latest_commit": "2024-07-05 05:48:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-sqlcoder-2x8B-GGUF", - "project_name": "Llama-3-ELYZA-sqlcoder-2x8B-GGUF", - "downloads": 206, + "description": "Rakuda - Questions for Japanese models Repository:", + "url": "https://huggingface.co./datasets/yuzuai/rakuda-questions", + "project_name": "rakuda-questions", + "downloads": 208, "source": "Hugging Face", - "score": -0.10180178208006077, - "first_commit": "2024-06-28 01:51:50", - "latest_commit": "2024-06-28 05:56:23", + "score": -0.10343434707273651, + "first_commit": "2023-06-23 01:08:54", + "latest_commit": "2023-06-23 08:01:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士通株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", - "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct-gguf", - "project_name": "Fugaku-LLM-13B-instruct-gguf", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B", + "project_name": "Fugaku-LLM-13B", "downloads": 205, "source": "Hugging Face", - "score": -0.10181592220300233, + "score": -0.10347817094500546, "first_commit": null, "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SambaLingo-Japanese-Base SambaLingo-Japanese-Base is a pretrained Bi-lingual Japanese and English model that adapts Llama-2-7b to Japanese by training on 42 billion tokens from the Japanese split of the Cultura-X dataset.", - "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Base", - "project_name": "SambaLingo-Japanese-Base", + "description": "QuantFactory/llama-3-youko-8b-GGUF", + "url": "https://huggingface.co./QuantFactory/llama-3-youko-8b-GGUF", + "project_name": "llama-3-youko-8b-GGUF", "downloads": 204, "source": "Hugging Face", - "score": -0.10183006232594388, - "first_commit": "2024-02-15 22:49:08", - "latest_commit": "2024-04-16 22:33:28", + "score": -0.10349277890242845, + "first_commit": "2024-06-24 05:04:12", + "latest_commit": "2024-06-24 06:35:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Japanese-WizardLM2-ChatV-7B-GGUF GGUF conversion of \"Japanese-WizardLM2-ChatV-7B\" This model, Japanese-WizardLM2-ChatV-7B, is based on \"chatntq-ja-7b-v1.0 \", and was created by subtracting \"Mistral-7B-v0.1\" from \"WizardLM-2-7b\" ChatVector was added by a factor of 1.0.", - "url": "https://huggingface.co./umiyuki/Japanese-WizardLM2-ChatV-7B-GGUF", - "project_name": "Japanese-WizardLM2-ChatV-7B-GGUF", - "downloads": 201, + "description": "Vecteus-V2-7B このモデルは、ベクトルマージなどを用い作成された高性能ベースモデルです。 ", + "url": "https://huggingface.co./Local-Novel-LLM-project/Vecteus-V2-7B", + "project_name": "Vecteus-V2-7B", + "downloads": 203, "source": "Hugging Face", - "score": -0.10187248269476855, - "first_commit": "2024-04-16 14:45:30", - "latest_commit": "2024-04-17 01:41:16", + "score": -0.10350738685985143, + "first_commit": "2024-06-16 03:51:43", + "latest_commit": "2024-07-06 13:39:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "Fugaku-LLM利用規約 この利用規約(以下「本規約」といいます)は、富士��株式会社、国立研究開発法人理化学研究所、国立大学法人東京工業大学、国立大学法人東北大学、株式会社サイバーエージェント、国立大学法人東海国立大学機構、及び株式会社Kotoba Technologies Japan (以下「開発者」といいます)による、スーパーコンピュータ「富岳」政策対応枠における大規模言語モデル分散並列学習手法の開発の成果物として公開する大規模言語モデル(以下「Fugaku-LLM」といいます)の利用に関する条件を定めるものです。", + "url": "https://huggingface.co./Fugaku-LLM/Fugaku-LLM-13B-instruct-gguf", + "project_name": "Fugaku-LLM-13B-instruct-gguf", + "downloads": 200, + "source": "Hugging Face", + "score": -0.1035512107321204, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": "model", + "model_size": 13.4 }, { "description": "Kotoba-Speech-v0.1 Kotoba-Speech v0.1 is a 1.2B Transformer-based speech generative model.", "url": "https://huggingface.co./kotoba-tech/kotoba-speech-v0.1", "project_name": "kotoba-speech-v0.1", - "downloads": 198, + "downloads": 199, "source": "Hugging Face", - "score": -0.10191490306359322, + "score": -0.10356581868954337, "first_commit": "2024-03-14 01:21:58", "latest_commit": "2024-04-17 07:54:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Dataset Summary This is the Business Scene Dialogue (BSD) dataset, a Japanese-English parallel corpus containing written conversations in various business scenarios.", - "url": "https://huggingface.co./datasets/ryo0634/bsd_ja_en", - "project_name": "bsd_ja_en", - "downloads": 195, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Int4", + "project_name": "Orion-14B-Chat-Int4", + "downloads": 196, "source": "Hugging Face", - "score": -0.10195732343241788, - "first_commit": "2022-01-25 16:35:02", - "latest_commit": "2024-01-11 07:36:44", + "score": -0.10360964256181233, + "first_commit": "2024-01-18 09:54:07", + "latest_commit": "2024-03-26 10:04:46", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 2.69 }, { - "description": "alpaca_jp_python alpaca_jp_pythonは、 Stanford Alpacaの手法 mistralai/Mixtral-8x22B-Instruct-v0.1 で作った合成データ(Synthetic data)です。", - "url": "https://huggingface.co./datasets/HachiML/alpaca_jp_python", - "project_name": "alpaca_jp_python", - "downloads": 193, + "description": "Japanese-WizardLM2-ChatV-7B-GGUF GGUF conversion of \"Japanese-WizardLM2-ChatV-7B\" This model, Japanese-WizardLM2-ChatV-7B, is based on \"chatntq-ja-7b-v1.0 \", and was created by subtracting \"Mistral-7B-v0.1\" from \"WizardLM-2-7b\" ChatVector was added by a factor of 1.0.", + "url": "https://huggingface.co./umiyuki/Japanese-WizardLM2-ChatV-7B-GGUF", + "project_name": "Japanese-WizardLM2-ChatV-7B-GGUF", + "downloads": 196, "source": "Hugging Face", - "score": -0.10198560367830099, - "first_commit": "2024-05-16 02:02:09", - "latest_commit": "2024-05-20 01:44:32", + "score": -0.10360964256181233, + "first_commit": "2024-04-16 14:45:30", + "latest_commit": "2024-04-17 01:41:16", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-sqlcoder-2x8B-GGUF", + "project_name": "Llama-3-ELYZA-sqlcoder-2x8B-GGUF", + "downloads": 196, + "source": "Hugging Face", + "score": -0.10360964256181233, + "first_commit": "2024-06-28 01:51:50", + "latest_commit": "2024-06-28 05:56:23", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.7 }, { "description": "Additional pretrained BERT base Japanese finance This is a BERT model pretrained on texts in the Japanese language.", "url": "https://huggingface.co./izumi-lab/bert-base-japanese-fin-additional", "project_name": "bert-base-japanese-fin-additional", - "downloads": 190, + "downloads": 193, "source": "Hugging Face", - "score": -0.10202802404712566, + "score": -0.10365346643408128, "first_commit": "2022-03-11 17:41:11", "latest_commit": "2022-12-09 00:40:25", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Umievo-itr012-Gleipnir-7B-GGUF", - "url": "https://huggingface.co./QuantFactory/Umievo-itr012-Gleipnir-7B-GGUF", - "project_name": "Umievo-itr012-Gleipnir-7B-GGUF", - "downloads": 189, - "source": "Hugging Face", - "score": -0.10204216417006721, - "first_commit": "2024-06-09 03:48:10", - "latest_commit": "2024-06-09 13:12:32", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Rakuda - Questions for Japanese models Repository:", - "url": "https://huggingface.co./datasets/yuzuai/rakuda-questions", - "project_name": "rakuda-questions", - "downloads": 187, - "source": "Hugging Face", - "score": -0.10207044441595033, - "first_commit": "2023-06-23 01:08:54", - "latest_commit": "2023-06-23 08:01:35", - "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "データセット概要 手動で作成したDatabricksに関する質問と回答ペアの日本語データセットです。 ", - "url": "https://huggingface.co./datasets/yulanfmy/databricks-qa-ja", - "project_name": "databricks-qa-ja", - "downloads": 185, + "description": "alpaca_jp_python alpaca_jp_pythonは、 Stanford Alpacaの手法 mistralai/Mixtral-8x22B-Instruct-v0.1 で作った合成データ(Synthetic data)です。", + "url": "https://huggingface.co./datasets/HachiML/alpaca_jp_python", + "project_name": "alpaca_jp_python", + "downloads": 192, "source": "Hugging Face", - "score": -0.10209872466183345, - "first_commit": "2023-05-15 13:27:23", - "latest_commit": "2023-05-15 14:55:06", + "score": -0.10366807439150427, + "first_commit": "2024-05-16 02:02:09", + "latest_commit": "2024-05-20 01:44:32", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Converted from clu-ling/whisper-large-v2-japanese-5k-steps using CTranslate2.", - "url": "https://huggingface.co./zh-plus/faster-whisper-large-v2-japanese-5k-steps", - "project_name": "faster-whisper-large-v2-japanese-5k-steps", - "downloads": 181, + "description": "Dataset Summary This is the Business Scene Dialogue (BSD) dataset, a Japanese-English parallel corpus containing written conversations in various business scenarios.", + "url": "https://huggingface.co./datasets/ryo0634/bsd_ja_en", + "project_name": "bsd_ja_en", + "downloads": 190, "source": "Hugging Face", - "score": -0.10215528515359967, - "first_commit": "2023-07-03 08:29:37", - "latest_commit": "2023-07-03 18:42:31", + "score": -0.10369729030635025, + "first_commit": "2022-01-25 16:35:02", + "latest_commit": "2024-01-11 07:36:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "ElanMT ElanMT-BT-en-ja is a English to Japanese translation model developed by ELAN MITSUA Project / Abstract Engine.", - "url": "https://huggingface.co./Mitsua/elan-mt-bt-en-ja", - "project_name": "elan-mt-bt-en-ja", - "downloads": 181, + "description": "This is a model for named entity recognition of Japanese medical documents.", + "url": "https://huggingface.co./sociocom/MedNER-CR-JA", + "project_name": "MedNER-CR-JA", + "downloads": 189, "source": "Hugging Face", - "score": -0.10215528515359967, - "first_commit": "2024-05-20 01:51:18", - "latest_commit": "2024-05-20 01:53:38", + "score": -0.10371189826377324, + "first_commit": "2022-08-23 03:30:43", + "latest_commit": "2024-07-31 07:44:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.11 }, { "description": "Tanuki-ZeRo-gguf kanhatakeyamaさんが公開しているTanuki-ZeRoのggufフォーマット変換版です。 ", "url": "https://huggingface.co./mmnga/Tanuki-ZeRo-gguf", "project_name": "Tanuki-ZeRo-gguf", - "downloads": 181, + "downloads": 180, "source": "Hugging Face", - "score": -0.10215528515359967, + "score": -0.1038433698805801, "first_commit": "2024-03-30 10:49:02", "latest_commit": "2024-03-30 17:01:16", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Sakura_dataset 商用利用可能な超小規模高品質日本語データセット。 ", - "url": "https://huggingface.co./datasets/saldra/sakura_japanese_dataset", - "project_name": "sakura_japanese_dataset", - "downloads": 178, - "source": "Hugging Face", - "score": -0.10219770552242434, - "first_commit": "2023-06-07 05:44:23", - "latest_commit": "2023-06-08 11:31:06", - "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", - "url": "https://huggingface.co./stabilityai/japanese-stable-vlm", - "project_name": "japanese-stable-vlm", - "downloads": 177, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-small-long", + "project_name": "t5-small-long", + "downloads": 179, "source": "Hugging Face", - "score": -0.1022118456453659, - "first_commit": null, - "latest_commit": null, + "score": -0.10385797783800309, + "first_commit": "2023-04-26 08:26:49", + "latest_commit": "2023-05-10 10:01:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Swallow-8Bは追加の日本語継続事前学習により日本語が大変流暢なLlama-3派生モデルです。", @@ -7022,443 +7375,454 @@ "project_name": "Meta-Llama-3.1-8B-Instruct-plus-Swallow", "downloads": 176, "source": "Hugging Face", - "score": -0.10222598576830745, + "score": -0.10390180171027204, "first_commit": "2024-07-24 03:10:38", "latest_commit": "2024-07-24 04:03:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "roberta-base-japanese-jsnli This model is a fine-tuned version of nlp-waseda/roberta-base-japanese on the JSNLI dataset.", - "url": "https://huggingface.co./Formzu/roberta-base-japanese-jsnli", - "project_name": "roberta-base-japanese-jsnli", - "downloads": 171, - "source": "Hugging Face", - "score": -0.10229668638301523, - "first_commit": "2022-10-14 07:50:47", - "latest_commit": "2022-10-19 11:08:59", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "GitHub リポジトリ cl-tohoku/quiz-datasets で公開されているデータセットを利用しています。 ", "url": "https://huggingface.co./datasets/llm-book/aio-retriever", "project_name": "aio-retriever", - "downloads": 170, + "downloads": 174, "source": "Hugging Face", - "score": -0.10231082650595678, + "score": -0.10393101762511801, "first_commit": "2023-07-04 04:53:47", "latest_commit": "2023-10-25 15:31:08", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "DataPilot様の ArrowPro-7B-RobinHood をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/ArrowPro-7B-RobinHood-GGUF", - "project_name": "ArrowPro-7B-RobinHood-GGUF", - "downloads": 169, + "description": "range3/wiki40b-ja This dataset consists of three parquet files from the wiki40b dataset with only Japanese data extracted.", + "url": "https://huggingface.co./datasets/range3/wiki40b-ja", + "project_name": "wiki40b-ja", + "downloads": 173, "source": "Hugging Face", - "score": -0.10232496662889834, - "first_commit": "2024-05-10 12:03:26", - "latest_commit": "2024-05-10 18:14:28", + "score": -0.103945625582541, + "first_commit": "2023-02-04 04:54:17", + "latest_commit": "2023-02-04 05:44:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Ninja-v1-RP-GGUF 概要 Aratako/Ninja-v1-RPの量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-GGUF", - "project_name": "Ninja-v1-RP-GGUF", - "downloads": 169, + "description": "By clicking \"Agree\", you agree to the License Agreement and acknowledge Stability AI's Privacy Policy.", + "url": "https://huggingface.co./stabilityai/japanese-stable-vlm", + "project_name": "japanese-stable-vlm", + "downloads": 172, "source": "Hugging Face", - "score": -0.10232496662889834, - "first_commit": "2024-05-20 17:08:50", - "latest_commit": "2024-05-24 15:11:08", + "score": -0.10396023353996399, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.57 }, { - "description": "WRIME-fine-tuned BERT base Japanese This model is a Japanese BERTBASE fine-tuned on the WRIME dataset.", - "url": "https://huggingface.co./patrickramos/bert-base-japanese-v2-wrime-fine-tune", - "project_name": "bert-base-japanese-v2-wrime-fine-tune", - "downloads": 167, + "description": "Model Card for Japanese BART large Model description", + "url": "https://huggingface.co./ku-nlp/bart-large-japanese", + "project_name": "bart-large-japanese", + "downloads": 169, "source": "Hugging Face", - "score": -0.10235324687478145, - "first_commit": "2022-05-22 09:42:14", - "latest_commit": "2023-03-22 08:11:34", + "score": -0.10400405741223294, + "first_commit": "2023-05-09 07:44:59", + "latest_commit": "2023-05-12 11:05:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-small-long", - "project_name": "t5-small-long", - "downloads": 166, + "description": "DataPilot様の ArrowPro-7B-RobinHood をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/ArrowPro-7B-RobinHood-GGUF", + "project_name": "ArrowPro-7B-RobinHood-GGUF", + "downloads": 168, "source": "Hugging Face", - "score": -0.102367386997723, - "first_commit": "2023-04-26 08:26:49", - "latest_commit": "2023-05-10 10:01:29", + "score": -0.10401866536965593, + "first_commit": "2024-05-10 12:03:26", + "latest_commit": "2024-05-10 18:14:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "What’s this?", - "url": "https://huggingface.co./globis-university/deberta-v3-japanese-xsmall", - "project_name": "deberta-v3-japanese-xsmall", - "downloads": 165, + "description": "Japanese Anime Speech Dataset 日本語はこちら japanese-anime-speech is an audio-text dataset designed for the training of automatic speech recognition models.", + "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech", + "project_name": "japanese-anime-speech", + "downloads": 168, "source": "Hugging Face", - "score": -0.10238152712066456, - "first_commit": "2023-09-21 16:12:53", - "latest_commit": "2024-07-05 05:48:15", + "score": -0.10401866536965593, + "first_commit": "2023-11-07 13:53:40", + "latest_commit": "2024-06-30 10:06:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "umiyuki様の Japanese-Chat-Umievo-itr004-7b をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Japanese-Chat-Umievo-itr004-7b-GGUF", - "project_name": "Japanese-Chat-Umievo-itr004-7b-GGUF", - "downloads": 164, + "description": "alpaca-guanaco-japanese-gpt-1b 1.3Bパラメータの日本語GPTモデルを使用した対話AIです。", + "url": "https://huggingface.co./inu-ai/alpaca-guanaco-japanese-gpt-1b", + "project_name": "alpaca-guanaco-japanese-gpt-1b", + "downloads": 166, "source": "Hugging Face", - "score": -0.10239566724360612, - "first_commit": "2024-05-13 16:28:41", - "latest_commit": "2024-05-13 23:33:49", + "score": -0.1040478812845019, + "first_commit": "2023-04-12 00:18:29", + "latest_commit": "2023-04-13 10:25:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.33 }, { "description": "ryota39様の Tora-7B-v0.1 をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/Tora-7B-v0.1-GGUF", "project_name": "Tora-7B-v0.1-GGUF", - "downloads": 163, + "downloads": 166, "source": "Hugging Face", - "score": -0.10240980736654767, + "score": -0.1040478812845019, "first_commit": "2024-05-07 11:24:35", "latest_commit": "2024-06-15 03:16:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "モデル概要 AWSのtrn1インスタンスを用いて開発した大喜利言語モデルです。", - "url": "https://huggingface.co./watashiha/watashiha-gpt-6b", - "project_name": "watashiha-gpt-6b", - "downloads": 162, - "source": "Hugging Face", - "score": -0.10242394748948923, - "first_commit": "2023-12-28 05:41:38", - "latest_commit": "2024-03-04 05:21:14", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ChatNTQ JA 7B V1.0 Model Description", - "url": "https://huggingface.co./NTQAI/chatntq-ja-7b-v1.0", - "project_name": "chatntq-ja-7b-v1.0", - "downloads": 160, - "source": "Hugging Face", - "score": -0.10245222773537234, - "first_commit": "2023-12-26 06:22:59", - "latest_commit": "2023-12-26 09:22:34", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "RoBERTa base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", - "url": "https://huggingface.co./ybelkada/japanese-roberta-question-answering", - "project_name": "japanese-roberta-question-answering", - "downloads": 159, + "description": "更新履歴 2023年5月7日 「oasst1-89k-ja」データセットを追加して対話システムに対応しました。", + "url": "https://huggingface.co./inu-ai/dolly-japanese-gpt-1b", + "project_name": "dolly-japanese-gpt-1b", + "downloads": 164, "source": "Hugging Face", - "score": -0.1024663678583139, - "first_commit": "2022-04-08 08:52:22", - "latest_commit": "2022-04-08 11:38:39", + "score": -0.10407709719934786, + "first_commit": "2023-04-13 22:46:07", + "latest_commit": "2023-08-01 07:55:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.33 }, { - "description": "Japanese Anime Speech Dataset 日本語はこちら japanese-anime-speech is an audio-text dataset designed for the training of automatic speech recognition models.", - "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech", - "project_name": "japanese-anime-speech", - "downloads": 159, + "description": "Ninja-v1-RP-GGUF 概要 Aratako/Ninja-v1-RPの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-GGUF", + "project_name": "Ninja-v1-RP-GGUF", + "downloads": 164, "source": "Hugging Face", - "score": -0.1024663678583139, - "first_commit": "2023-11-07 13:53:40", - "latest_commit": "2024-06-30 10:06:34", + "score": -0.10407709719934786, + "first_commit": "2024-05-20 17:08:50", + "latest_commit": "2024-05-24 15:11:08", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Japanese Anime Speech Dataset V2 日本語はこちら japanese-anime-speech-v2 is an audio-text dataset designed for training automatic speech recognition models.", - "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech-v2", - "project_name": "japanese-anime-speech-v2", - "downloads": 158, + "description": "モデル説明 (model explanation) CoolJapanDiffusion 2.1.1とWaifuDiffusion 1.4 anime epoch2のマージ。", + "url": "https://huggingface.co./ThePioneer/CoolerWaifuDiffusion", + "project_name": "CoolerWaifuDiffusion", + "downloads": 163, "source": "Hugging Face", - "score": -0.10248050798125545, - "first_commit": "2024-06-26 14:18:01", - "latest_commit": "2024-07-24 19:06:51", + "score": -0.10409170515677085, + "first_commit": "2023-01-20 23:52:39", + "latest_commit": "2023-01-22 19:16:59", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { "description": "QuantFactory/shisa-gamma-7b-v1-GGUF", "url": "https://huggingface.co./QuantFactory/shisa-gamma-7b-v1-GGUF", "project_name": "shisa-gamma-7b-v1-GGUF", - "downloads": 156, + "downloads": 163, "source": "Hugging Face", - "score": -0.10250878822713856, + "score": -0.10409170515677085, "first_commit": "2024-06-12 17:16:36", "latest_commit": "2024-06-18 06:17:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "range3/wiki40b-ja This dataset consists of three parquet files from the wiki40b dataset with only Japanese data extracted.", - "url": "https://huggingface.co./datasets/range3/wiki40b-ja", - "project_name": "wiki40b-ja", - "downloads": 156, + "description": "umiyuki様の Japanese-Chat-Umievo-itr004-7b をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Japanese-Chat-Umievo-itr004-7b-GGUF", + "project_name": "Japanese-Chat-Umievo-itr004-7b-GGUF", + "downloads": 163, "source": "Hugging Face", - "score": -0.10250878822713856, - "first_commit": "2023-02-04 04:54:17", - "latest_commit": "2023-02-04 05:44:21", + "score": -0.10409170515677085, + "first_commit": "2024-05-13 16:28:41", + "latest_commit": "2024-05-13 23:33:49", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Oumuamua-7b-RPの量子化済���GGUF版です。", - "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP-GGUF", - "project_name": "Oumuamua-7b-RP-GGUF", - "downloads": 155, + "description": "Sakura_dataset 商用利用可能な超小規模高品質日本語データセット。 ", + "url": "https://huggingface.co./datasets/saldra/sakura_japanese_dataset", + "project_name": "sakura_japanese_dataset", + "downloads": 163, "source": "Hugging Face", - "score": -0.10252292835008012, - "first_commit": "2024-06-23 13:00:02", - "latest_commit": "2024-06-23 14:45:14", + "score": -0.10409170515677085, + "first_commit": "2023-06-07 05:44:23", + "latest_commit": "2023-06-08 11:31:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "このモデルはluke-japanese-base-liteをファインチューニングして、Question-Answeringに用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-QA", - "project_name": "luke-japanese-base-finetuned-QA", - "downloads": 155, + "description": "JA-VG-VQA-500 Dataset Description JA-VG-VQA-500 is a 500-sample subset of Japanese Visual Genome VQA dataset.", + "url": "https://huggingface.co./datasets/SakanaAI/JA-VG-VQA-500", + "project_name": "JA-VG-VQA-500", + "downloads": 163, "source": "Hugging Face", - "score": -0.10252292835008012, - "first_commit": "2023-01-15 23:38:30", - "latest_commit": "2023-07-21 14:11:02", + "score": -0.10409170515677085, + "first_commit": "2024-03-21 09:51:10", + "latest_commit": "2024-05-14 04:11:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/jawiki-sentences", - "project_name": "jawiki-sentences", - "downloads": 155, + "description": "WRIME-fine-tuned BERT base Japanese This model is a Japanese BERTBASE fine-tuned on the WRIME dataset.", + "url": "https://huggingface.co./patrickramos/bert-base-japanese-v2-wrime-fine-tune", + "project_name": "bert-base-japanese-v2-wrime-fine-tune", + "downloads": 162, "source": "Hugging Face", - "score": -0.10252292835008012, - "first_commit": "2023-06-03 03:02:08", - "latest_commit": "2023-10-25 15:22:05", + "score": -0.10410631311419384, + "first_commit": "2022-05-22 09:42:14", + "latest_commit": "2023-03-22 08:11:34", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-v1.1", - "project_name": "t5-base-japanese-v1.1", - "downloads": 154, + "description": "Japanese Anime Speech Dataset V2 日本語はこちら japanese-anime-speech-v2 is an audio-text dataset designed for training automatic speech recognition models.", + "url": "https://huggingface.co./datasets/joujiboi/japanese-anime-speech-v2", + "project_name": "japanese-anime-speech-v2", + "downloads": 159, "source": "Hugging Face", - "score": -0.10253706847302167, - "first_commit": "2022-08-12 15:41:28", - "latest_commit": "2022-08-27 09:21:01", + "score": -0.1041501369864628, + "first_commit": "2024-06-26 14:18:01", + "latest_commit": "2024-07-24 19:06:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "モデル概要 AWSのtrn1インスタンスを用いて開発した大喜利言語モデルです。", + "url": "https://huggingface.co./watashiha/watashiha-gpt-6b", + "project_name": "watashiha-gpt-6b", + "downloads": 156, + "source": "Hugging Face", + "score": -0.10419396085873175, + "first_commit": "2023-12-28 05:41:38", + "latest_commit": "2024-03-04 05:21:14", + "languages": [], + "model_or_dataset": "model", + "model_size": 5.83 }, { "description": "c4ai-command-r-v01-japanese-instruct-GGUF 概要 Aratako/c4ai-command-r-v01-japanese-instructの量子化済みGGUF版です。", "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct-GGUF", "project_name": "c4ai-command-r-v01-japanese-instruct-GGUF", - "downloads": 154, + "downloads": 155, "source": "Hugging Face", - "score": -0.10253706847302167, + "score": -0.10420856881615474, "first_commit": "2024-04-05 17:10:51", "latest_commit": "2024-04-07 03:19:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 35.0 }, { - "description": "Synthetic-JP-EN-Coding-Dataset-567k Magpieによって作成したコードSFTデータセットであるAratako/Synthetic-JP-EN-Coding-Dataset-Magpie-69kを元に、Evol-Instructのような手法を用いて複数のinstructionとresonseを生成し拡張して作成した、日英混合567077件のコードSFT用合成データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Synthetic-JP-EN-Coding-Dataset-567k", - "project_name": "Synthetic-JP-EN-Coding-Dataset-567k", + "description": "ryota39様の Tora-7B-v0.2 をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Tora-7B-v0.2-GGUF", + "project_name": "Tora-7B-v0.2-GGUF", "downloads": 153, "source": "Hugging Face", - "score": -0.10255120859596323, - "first_commit": "2024-07-14 14:04:30", - "latest_commit": "2024-07-14 14:40:50", + "score": -0.10423778473100072, + "first_commit": "2024-05-06 12:50:49", + "latest_commit": "2024-06-15 03:17:32", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 }, { "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", "url": "https://huggingface.co./TFMC/Japanese-Starling-ChatV-7B", "project_name": "Japanese-Starling-ChatV-7B", - "downloads": 152, + "downloads": 151, "source": "Hugging Face", - "score": -0.10256534871890478, + "score": -0.10426700064584668, "first_commit": "2024-04-14 12:18:31", "latest_commit": "2024-04-14 15:26:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Bluemoon_Top50MB_Sorted_Fixed_ja SicariusSicariiStuff/Bluemoon_Top50MB_Sorted_Fixedを、GENIAC-Team-Ozaki/karakuri-lm-8x7b-chat-v0.1-awqを用いて日本語に翻訳したロールプレイ学習用データセットです。 ", - "url": "https://huggingface.co./datasets/Aratako/Bluemoon_Top50MB_Sorted_Fixed_ja", - "project_name": "Bluemoon_Top50MB_Sorted_Fixed_ja", - "downloads": 152, + "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/jawiki-sentences", + "project_name": "jawiki-sentences", + "downloads": 150, "source": "Hugging Face", - "score": -0.10256534871890478, - "first_commit": "2024-05-18 15:35:17", - "latest_commit": "2024-05-18 15:38:35", + "score": -0.10428160860326967, + "first_commit": "2023-06-03 03:02:08", + "latest_commit": "2023-10-25 15:22:05", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "line-corporation/japanese-large-lm-3.6b-instruction-sft line-corporationさんが公開しているjapanese-large-lm-3.6b-instruction-sftのgguf変換版です。 ", "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", "project_name": "line-corp-japanese-large-lm-3.6b-instruction-sft-gguf", - "downloads": 151, + "downloads": 148, "source": "Hugging Face", - "score": -0.10257948884184634, + "score": -0.10431082451811564, "first_commit": "2023-09-02 18:01:40", "latest_commit": "2023-09-08 02:52:29", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ku-nlp/roberta-large-japanese-char-wwm Model description This is a Japanese RoBERTa large model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./ku-nlp/roberta-large-japanese-char-wwm", - "project_name": "roberta-large-japanese-char-wwm", - "downloads": 151, - "source": "Hugging Face", - "score": -0.10257948884184634, - "first_commit": "2022-09-18 08:10:44", - "latest_commit": "2023-03-19 01:58:12", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ryota39様の Tora-7B-v0.2 をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Tora-7B-v0.2-GGUF", - "project_name": "Tora-7B-v0.2-GGUF", - "downloads": 150, - "source": "Hugging Face", - "score": -0.1025936289647879, - "first_commit": "2024-05-06 12:50:49", - "latest_commit": "2024-06-15 03:17:32", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.71 }, { - "description": "Anime with caption CC-0 dataset このデータセットはイラストに対する日本語キャプションを 倫理的に学習しやすくするためのデータセットです。 ", - "url": "https://huggingface.co./datasets/alfredplpl/anime-with-caption-cc0", - "project_name": "anime-with-caption-cc0", + "description": "Bluemoon_Top50MB_Sorted_Fixed_ja SicariusSicariiStuff/Bluemoon_Top50MB_Sorted_Fixedを、GENIAC-Team-Ozaki/karakuri-lm-8x7b-chat-v0.1-awqを用いて日本語に翻訳したロールプレイ学習用データセットです。 ", + "url": "https://huggingface.co./datasets/Aratako/Bluemoon_Top50MB_Sorted_Fixed_ja", + "project_name": "Bluemoon_Top50MB_Sorted_Fixed_ja", "downloads": 148, "source": "Hugging Face", - "score": -0.102621909210671, - "first_commit": "2024-06-03 04:37:13", - "latest_commit": "2024-06-03 05:49:20", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Model Card for Japanese BART large Model description", - "url": "https://huggingface.co./ku-nlp/bart-large-japanese", - "project_name": "bart-large-japanese", - "downloads": 147, - "source": "Hugging Face", - "score": -0.10263604933361256, - "first_commit": "2023-05-09 07:44:59", - "latest_commit": "2023-05-12 11:05:03", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese Considering the results of the JNLI evaluation result, we recommend using akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick for natural language inference in Japanese.", - "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli", - "project_name": "bert-base-japanese-v3_nli-jsnli", - "downloads": 146, - "source": "Hugging Face", - "score": -0.10265018945655412, - "first_commit": "2024-04-11 05:38:09", - "latest_commit": "2024-04-26 06:27:05", + "score": -0.10431082451811564, + "first_commit": "2024-05-18 15:35:17", + "latest_commit": "2024-05-18 15:38:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Model card for model ID", "url": "https://huggingface.co./retrieva-jp/t5-small-medium", "project_name": "t5-small-medium", - "downloads": 145, + "downloads": 147, "source": "Hugging Face", - "score": -0.10266432957949567, + "score": -0.10432543247553863, "first_commit": "2023-04-26 08:26:19", "latest_commit": "2023-05-10 10:01:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Evol-Alpaca-gen3-500 Evol-Alpaca-gen3-500は、", - "url": "https://huggingface.co./datasets/HachiML/Evol-Alpaca-gen3-500", - "project_name": "Evol-Alpaca-gen3-500", - "downloads": 144, + "description": "Anime with caption CC-0 dataset このデータセットはイラストに対する日本語キャプションを 倫理的に学習しやすくするためのデータセットです。 ", + "url": "https://huggingface.co./datasets/alfredplpl/anime-with-caption-cc0", + "project_name": "anime-with-caption-cc0", + "downloads": 147, "source": "Hugging Face", - "score": -0.10267846970243723, - "first_commit": "2024-05-12 11:13:09", - "latest_commit": "2024-05-20 01:43:43", + "score": -0.10432543247553863, + "first_commit": "2024-06-03 04:37:13", + "latest_commit": "2024-06-03 05:49:20", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Local-Novel-LLM-project様の Vecteus-V2-7B をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/Vecteus-V2-7B-GGUF", "project_name": "Vecteus-V2-7B-GGUF", - "downloads": 143, + "downloads": 146, "source": "Hugging Face", - "score": -0.10269260982537878, + "score": -0.1043400404329616, "first_commit": "2024-06-16 05:26:00", "latest_commit": "2024-06-16 11:32:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "Cross-Encoder for Natural Language Inference(NLI) for Japanese Considering the results of the JNLI evaluation result, we recommend using akiFQC/bert-base-japanese-v3_nli-jsnli-jnli-jsick for natural language inference in Japanese.", + "url": "https://huggingface.co./akiFQC/bert-base-japanese-v3_nli-jsnli", + "project_name": "bert-base-japanese-v3_nli-jsnli", + "downloads": 146, + "source": "Hugging Face", + "score": -0.1043400404329616, + "first_commit": "2024-04-11 05:38:09", + "latest_commit": "2024-04-26 06:27:05", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.111 + }, + { + "description": "RoBERTa base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", + "url": "https://huggingface.co./ybelkada/japanese-roberta-question-answering", + "project_name": "japanese-roberta-question-answering", + "downloads": 145, + "source": "Hugging Face", + "score": -0.1043546483903846, + "first_commit": "2022-04-08 08:52:22", + "latest_commit": "2022-04-08 11:38:39", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "長文用のinstructionデータセットです。 ", "url": "https://huggingface.co./datasets/aixsatoshi/Longcontext-aozora-instruction", "project_name": "Longcontext-aozora-instruction", - "downloads": 143, + "downloads": 142, "source": "Hugging Face", - "score": -0.10269260982537878, + "score": -0.10439847226265356, "first_commit": "2024-03-29 11:35:09", "latest_commit": "2024-03-30 11:44:00", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "Evol-Alpaca-gen3-500 Evol-Alpaca-gen3-500は、", + "url": "https://huggingface.co./datasets/HachiML/Evol-Alpaca-gen3-500", + "project_name": "Evol-Alpaca-gen3-500", + "downloads": 142, + "source": "Hugging Face", + "score": -0.10439847226265356, + "first_commit": "2024-05-12 11:13:09", + "latest_commit": "2024-05-20 01:43:43", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "QuantFactory/shisa-7b-v1-GGUF This is quantized version of augmxnt/shisa-base-7b-v1 created using llama.cpp Model Description shisa-base-7b-v1 takes Mistral 7B and adds an additional 8B tokens of primarily Japanese pre-training.", "url": "https://huggingface.co./QuantFactory/shisa-7b-v1-GGUF", "project_name": "shisa-7b-v1-GGUF", - "downloads": 140, + "downloads": 141, "source": "Hugging Face", - "score": -0.10273503019420345, + "score": -0.10441308022007655, "first_commit": "2024-06-14 01:44:05", "latest_commit": "2024-06-18 05:53:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.96 }, { - "description": "Japanese-TextGen-Kage-v0.1-2x7B Kage is \"影\" in Japanese or \"Shadow\" in English.", - "url": "https://huggingface.co./dddump/Japanese-TextGen-Kage-v0.1-2x7B-gguf", - "project_name": "Japanese-TextGen-Kage-v0.1-2x7B-gguf", - "downloads": 138, + "description": "このモデルはluke-japanese-base-liteをファインチューニングして、Question-Answeringに用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-QA", + "project_name": "luke-japanese-base-finetuned-QA", + "downloads": 141, "source": "Hugging Face", - "score": -0.10276331044008657, - "first_commit": "2024-05-04 07:03:38", - "latest_commit": "2024-05-19 08:54:19", + "score": -0.10441308022007655, + "first_commit": "2023-01-15 23:38:30", + "latest_commit": "2023-07-21 14:11:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.132 + }, + { + "description": "LLaVA-JP Model Card Model detail Model type: LLaVA-JP is a vision-language model that can converse about input images.", + "url": "https://huggingface.co./toshi456/llava-jp-1.3b-v1.0", + "project_name": "llava-jp-1.3b-v1.0", + "downloads": 137, + "source": "Hugging Face", + "score": -0.10447151204976848, + "first_commit": "2023-12-04 13:13:03", + "latest_commit": "2023-12-18 10:21:11", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.73 }, { "description": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf lightblueさんが公開しているKarasu-Mixtral-8x22B-v0.1のggufフォーマット変換版です。 ", @@ -7466,47 +7830,51 @@ "project_name": "lightblue-Karasu-Mixtral-8x22B-v0.1-gguf", "downloads": 134, "source": "Hugging Face", - "score": -0.1028198709318528, + "score": -0.10451533592203743, "first_commit": "2024-05-07 12:53:56", "latest_commit": "2024-05-07 18:07:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 141.0 }, { - "description": "JA-VG-VQA-500 Dataset Description JA-VG-VQA-500 is a 500-sample subset of Japanese Visual Genome VQA dataset.", - "url": "https://huggingface.co./datasets/SakanaAI/JA-VG-VQA-500", - "project_name": "JA-VG-VQA-500", + "description": "Japanese CLIP ViT-H/14 (Wider) Table of Contents Overview Usage Model Details Evaluation Limitations and Biases Citation See Also Contact Information Overview Developed by:", + "url": "https://huggingface.co./hakuhodo-tech/japanese-clip-vit-h-14-bert-wider", + "project_name": "japanese-clip-vit-h-14-bert-wider", "downloads": 134, "source": "Hugging Face", - "score": -0.1028198709318528, - "first_commit": "2024-03-21 09:51:10", - "latest_commit": "2024-05-14 04:11:31", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Model Card for Japanese DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese", - "project_name": "deberta-v2-large-japanese", - "downloads": 133, - "source": "Hugging Face", - "score": -0.10283401105479435, - "first_commit": "2023-01-07 07:45:25", - "latest_commit": "2023-05-12 14:10:35", + "score": -0.10451533592203743, + "first_commit": "2024-03-06 03:30:25", + "latest_commit": "2024-03-06 21:46:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.91 }, { "description": "Dataset Summary SNOW T15:The simplified corpus for the Japanese language.", "url": "https://huggingface.co./datasets/SNOW-NLP/snow_simplified_japanese_corpus", "project_name": "snow_simplified_japanese_corpus", - "downloads": 133, + "downloads": 134, "source": "Hugging Face", - "score": -0.10283401105479435, + "score": -0.10451533592203743, "first_commit": "2022-01-25 16:36:23", "latest_commit": "2024-01-18 11:16:01", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "mbpp-ja", + "url": "https://huggingface.co./datasets/llm-jp/mbpp-ja", + "project_name": "mbpp-ja", + "downloads": 133, + "source": "Hugging Face", + "score": -0.10452994387946042, + "first_commit": "2024-04-19 00:26:56", + "latest_commit": "2024-04-20 06:26:51", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Llama-3-8B-Instruct-JP-nk2t-v0.2 Model Details: Built with Meta Llama 3", @@ -7514,23 +7882,38 @@ "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.2", "downloads": 132, "source": "Hugging Face", - "score": -0.10284815117773591, + "score": -0.10454455183688341, "first_commit": "2024-05-04 04:16:35", "latest_commit": "2024-05-15 12:56:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 + }, + { + "description": "Japanese-TextGen-Kage-v0.1-2x7B Kage is \"影\" in Japanese or \"Shadow\" in English.", + "url": "https://huggingface.co./dddump/Japanese-TextGen-Kage-v0.1-2x7B-gguf", + "project_name": "Japanese-TextGen-Kage-v0.1-2x7B-gguf", + "downloads": 130, + "source": "Hugging Face", + "score": -0.10457376775172939, + "first_commit": "2024-05-04 07:03:38", + "latest_commit": "2024-05-19 08:54:19", + "languages": [], + "model_or_dataset": "model", + "model_size": 12.9 }, { "description": "Local-Novel-LLM-project様の Ninja-V3 をGGUF形式に変換したものです。 ", "url": "https://huggingface.co./MCZK/Ninja-V3-GGUF", "project_name": "Ninja-V3-GGUF", - "downloads": 132, + "downloads": 128, "source": "Hugging Face", - "score": -0.10284815117773591, + "score": -0.10460298366657535, "first_commit": "2024-07-03 11:52:04", "latest_commit": "2024-07-03 16:59:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { "description": "Oumuamua-7b-instruct-GGUF This is quantized version of nitky/Oumuamua-7b-instruct created using llama.cpp Model Description This is a merge of pre-trained language models created using mergekit. ", @@ -7538,23 +7921,38 @@ "project_name": "Oumuamua-7b-instruct-GGUF", "downloads": 127, "source": "Hugging Face", - "score": -0.10291885179244369, + "score": -0.10461759162399834, "first_commit": "2024-06-19 08:52:12", "latest_commit": "2024-06-19 11:40:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { - "description": "mqaデータセットのquery--passageのペアについて重複を削除したデータセットです。 ", - "url": "https://huggingface.co./datasets/hpprc/mqa-ja", - "project_name": "mqa-ja", - "downloads": 127, + "description": "ChatNTQ JA 7B V1.0 Model Description", + "url": "https://huggingface.co./NTQAI/chatntq-ja-7b-v1.0", + "project_name": "chatntq-ja-7b-v1.0", + "downloads": 125, "source": "Hugging Face", - "score": -0.10291885179244369, - "first_commit": "2024-04-07 06:23:02", - "latest_commit": "2024-04-07 15:16:42", + "score": -0.10464680753884431, + "first_commit": "2023-12-26 06:22:59", + "latest_commit": "2023-12-26 09:22:34", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "GGUF conversion of NTQAI/chatntq-ja-7b-v1.0 ChatNTQ-JA-7b-v1.0 is a Japanese chat fine-tuned model built on top of the stabilityai/japanese-stablelm-base-gamma-7b, which is originally based on Mistral 7B v0.1.", + "url": "https://huggingface.co./TFMC/ChatNTQ-JA-7b-v1.0-GGUF", + "project_name": "ChatNTQ-JA-7b-v1.0-GGUF", + "downloads": 124, + "source": "Hugging Face", + "score": -0.1046614154962673, + "first_commit": "2024-04-03 22:42:14", + "latest_commit": "2024-04-04 23:10:54", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 }, { "description": "日本語向け Llama 3 8B はじめに このリポジトリはLlama 3を日本語化しようとしたモデルのリポジトリです。", @@ -7562,23 +7960,25 @@ "project_name": "Llama-3-8B-Instruct-Ja", "downloads": 124, "source": "Hugging Face", - "score": -0.10296127216126835, + "score": -0.1046614154962673, "first_commit": "2024-04-22 05:14:33", "latest_commit": "2024-05-01 19:16:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "This is a Japanese+English sentence-BERT model.", - "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-en-mean-tokens", - "project_name": "sentence-bert-base-ja-en-mean-tokens", + "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", + "url": "https://huggingface.co./cinmodel/electra-small-japanese-discriminator", + "project_name": "electra-small-japanese-discriminator", "downloads": 123, "source": "Hugging Face", - "score": -0.10297541228420991, - "first_commit": "2022-05-08 03:05:08", - "latest_commit": "2022-05-08 03:29:28", + "score": -0.10467602345369029, + "first_commit": "2020-11-13 06:49:25", + "latest_commit": "2020-12-11 22:26:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "nlp-waseda/roberta-large-japanese-with-auto-jumanpp Model description", @@ -7586,167 +7986,142 @@ "project_name": "roberta-large-japanese-with-auto-jumanpp", "downloads": 122, "source": "Hugging Face", - "score": -0.10298955240715146, + "score": -0.10469063141111326, "first_commit": "2022-10-15 05:40:40", "latest_commit": "2022-10-21 15:55:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", - "url": "https://huggingface.co./cinmodel/electra-small-japanese-discriminator", - "project_name": "electra-small-japanese-discriminator", + "description": "Local-Novel-LLM-project様の Ninja-V2-7B をGGUF形式に変換したものです。 ", + "url": "https://huggingface.co./MCZK/Ninja-V2-7B-GGUF", + "project_name": "Ninja-V2-7B-GGUF", "downloads": 121, "source": "Hugging Face", - "score": -0.10300369253009302, - "first_commit": "2020-11-13 06:49:25", - "latest_commit": "2020-12-11 22:26:13", + "score": -0.10470523936853625, + "first_commit": "2024-06-15 16:23:41", + "latest_commit": "2024-06-15 21:25:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "GGUF conversion of NTQAI/chatntq-ja-7b-v1.0 ChatNTQ-JA-7b-v1.0 is a Japanese chat fine-tuned model built on top of the stabilityai/japanese-stablelm-base-gamma-7b, which is originally based on Mistral 7B v0.1.", - "url": "https://huggingface.co./TFMC/ChatNTQ-JA-7b-v1.0-GGUF", - "project_name": "ChatNTQ-JA-7b-v1.0-GGUF", - "downloads": 119, + "description": "deberta-base-japanese-aozora-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-head", + "project_name": "deberta-base-japanese-aozora-ud-head", + "downloads": 120, "source": "Hugging Face", - "score": -0.10303197277597613, - "first_commit": "2024-04-03 22:42:14", - "latest_commit": "2024-04-04 23:10:54", + "score": -0.10471984732595924, + "first_commit": "2022-06-15 04:02:27", + "latest_commit": "2023-03-04 20:10:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "abc-multiple-choice Dataset abc-multiple-choice は、競技クイズの大会「abc」で使用された4択問題を元に作成された、多肢選択式の質問応答データセットです。 ", - "url": "https://huggingface.co./datasets/tohoku-nlp/abc-multiple-choice", - "project_name": "abc-multiple-choice", - "downloads": 119, - "source": "Hugging Face", - "score": -0.10303197277597613, - "first_commit": "2024-03-02 03:58:25", - "latest_commit": "2024-03-12 07:32:13", - "languages": [], - "model_or_dataset": "dataset" + "description": "Python library for CJK (Chinese, Japanese, and Korean) language dictionary", + "url": "https://github.com/cihai/cihai", + "project_name": "cihai", + "stargazers_count": 80, + "source": "GitHub", + "score": -0.1047294641486553, + "first_commit": "2013-12-03 09:42:52", + "latest_commit": "2024-08-10 13:13:53", + "languages": [ + "Python" + ], + "model_or_dataset": null }, { - "description": "COMET-GPT2 ja Finetuned GPT-2 on ATOMIC ja using a causal language modeling (CLM) objective.", - "url": "https://huggingface.co./nlp-waseda/comet-gpt2-small-japanese", - "project_name": "comet-gpt2-small-japanese", - "downloads": 118, - "source": "Hugging Face", - "score": -0.10304611289891769, - "first_commit": "2022-11-15 05:14:35", - "latest_commit": "2023-02-13 10:26:12", + "description": "Neologism dictionary based on the language resources on the Web for mecab-unidic", + "url": "https://github.com/neologd/mecab-unidic-neologd", + "project_name": "mecab-unidic-neologd", + "stargazers_count": 80, + "source": "GitHub", + "score": -0.1047294641486553, + "first_commit": "2015-03-19 10:52:02", + "latest_commit": "2020-09-14 19:58:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset" }, { "description": "bert-japanese_finetuned-sentiment-analysis This model was trained from scratch on the Japanese Sentiment Polarity Dictionary dataset.", "url": "https://huggingface.co./minutillamolinara/bert-japanese_finetuned-sentiment-analysis", "project_name": "bert-japanese_finetuned-sentiment-analysis", - "downloads": 116, + "downloads": 119, "source": "Hugging Face", - "score": -0.1030743931448008, + "score": -0.10473445528338222, "first_commit": "2023-03-31 02:28:09", "latest_commit": "2023-03-31 13:13:37", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Finetuned Waseda RoBERTa to evaluate the generated answers on JTruthfulQA.", - "url": "https://huggingface.co./nlp-waseda/roberta_jtruthfulqa", - "project_name": "roberta_jtruthfulqa", - "downloads": 116, - "source": "Hugging Face", - "score": -0.1030743931448008, - "first_commit": "2023-12-06 01:33:02", - "latest_commit": "2023-12-06 04:31:12", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "J-ResearchCorpus Update: 2024/3/16言語処理学会第30回年次大会(NLP2024)を含む、論文 1,343 本のデータを追加 2024/2/25言語処理学会誌「自然言語処理」のうち CC-BY-4.0 で公開されている論文 360 本のデータを追加 概要 CC-BY-* ライセンスで公開されている日本語論文や学会誌等から抜粋した高品質なテキストのデータセットです。", - "url": "https://huggingface.co./datasets/kunishou/J-ResearchCorpus", - "project_name": "J-ResearchCorpus", - "downloads": 116, - "source": "Hugging Face", - "score": -0.1030743931448008, - "first_commit": "2024-02-12 14:03:42", - "latest_commit": "2024-03-16 07:55:08", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "databricks-dolly-15k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", - "url": "https://huggingface.co./datasets/llm-jp/databricks-dolly-15k-ja", - "project_name": "databricks-dolly-15k-ja", - "downloads": 116, - "source": "Hugging Face", - "score": -0.1030743931448008, - "first_commit": "2024-01-27 07:11:25", - "latest_commit": "2024-01-30 18:09:37", - "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-LongChat", - "project_name": "Orion-14B-LongChat", - "downloads": 115, + "description": "abc-multiple-choice Dataset abc-multiple-choice は、競技クイズの大会「abc」で使用された4択問題を元に作成された、多肢選択式の質問応答データセットです。 ", + "url": "https://huggingface.co./datasets/tohoku-nlp/abc-multiple-choice", + "project_name": "abc-multiple-choice", + "downloads": 119, "source": "Hugging Face", - "score": -0.10308853326774235, - "first_commit": "2024-01-19 07:15:36", - "latest_commit": "2024-03-26 10:10:34", + "score": -0.10473445528338222, + "first_commit": "2024-03-02 03:58:25", + "latest_commit": "2024-03-12 07:32:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Aerner LM-v2 事前学習から全部日本語で学習させたモデルのバージョン2です。 ", - "url": "https://huggingface.co./aerner/lm-v2", - "project_name": "lm-v2", - "downloads": 115, + "description": "Finetuned Waseda RoBERTa to evaluate the generated answers on JTruthfulQA.", + "url": "https://huggingface.co./nlp-waseda/roberta_jtruthfulqa", + "project_name": "roberta_jtruthfulqa", + "downloads": 118, "source": "Hugging Face", - "score": -0.10308853326774235, - "first_commit": "2023-06-09 15:19:12", - "latest_commit": "2023-06-09 16:08:47", + "score": -0.10474906324080521, + "first_commit": "2023-12-06 01:33:02", + "latest_commit": "2023-12-06 04:31:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.337 }, { - "description": "reazonspeech-espnet-next ReazonSpeech is a project to maintain freely-available Japanese audio datasets and ML models.", - "url": "https://huggingface.co./reazon-research/reazonspeech-espnet-next", - "project_name": "reazonspeech-espnet-next", - "downloads": 115, + "description": "J-ResearchCorpus Update: 2024/3/16言語処理学会第30回年次大会(NLP2024)を含む、論文 1,343 本のデータを追加 2024/2/25言語処理学会誌「自然言語処理」のうち CC-BY-4.0 で公開されている論文 360 本のデータを追加 概要 CC-BY-* ライセンスで公開されている日本語論文や学会誌等から抜粋した高品質なテキストのデータセットです。", + "url": "https://huggingface.co./datasets/kunishou/J-ResearchCorpus", + "project_name": "J-ResearchCorpus", + "downloads": 116, "source": "Hugging Face", - "score": -0.10308853326774235, - "first_commit": "2023-03-29 07:20:03", - "latest_commit": "2023-03-29 17:28:01", + "score": -0.10477827915565117, + "first_commit": "2024-02-12 14:03:42", + "latest_commit": "2024-03-16 07:55:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "deberta-base-japanese-aozora-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-aozora-ud-head", - "project_name": "deberta-base-japanese-aozora-ud-head", + "description": "databricks-dolly-15k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", + "url": "https://huggingface.co./datasets/llm-jp/databricks-dolly-15k-ja", + "project_name": "databricks-dolly-15k-ja", "downloads": 114, "source": "Hugging Face", - "score": -0.10310267339068391, - "first_commit": "2022-06-15 04:02:27", - "latest_commit": "2023-03-04 20:10:16", + "score": -0.10480749507049715, + "first_commit": "2024-01-27 07:11:25", + "latest_commit": "2024-01-30 18:09:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Local-Novel-LLM-project様の Ninja-V2-7B をGGUF形式に変換したものです。 ", - "url": "https://huggingface.co./MCZK/Ninja-V2-7B-GGUF", - "project_name": "Ninja-V2-7B-GGUF", - "downloads": 113, + "description": "Dataset 5M (5121625) clean Japanese full sentence with the context.", + "url": "https://huggingface.co./datasets/AhmedSSabir/Japanese-wiki-dump-sentence-dataset", + "project_name": "Japanese-wiki-dump-sentence-dataset", + "downloads": 114, "source": "Hugging Face", - "score": -0.10311681351362546, - "first_commit": "2024-06-15 16:23:41", - "latest_commit": "2024-06-15 21:25:59", + "score": -0.10480749507049715, + "first_commit": "2022-06-08 11:34:04", + "latest_commit": "2023-07-11 12:22:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "What’s this?", @@ -7754,143 +8129,194 @@ "project_name": "deberta-v3-japanese-large", "downloads": 113, "source": "Hugging Face", - "score": -0.10311681351362546, + "score": -0.10482210302792014, "first_commit": "2023-09-21 16:15:15", "latest_commit": "2024-07-05 05:50:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoで利用しているモデルです。", - "url": "https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b", - "project_name": "japanese-novel-gpt-j-6b", - "downloads": 112, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-LongChat", + "project_name": "Orion-14B-LongChat", + "downloads": 113, "source": "Hugging Face", - "score": -0.10313095363656702, - "first_commit": "2023-08-11 00:52:32", - "latest_commit": "2023-08-26 04:20:51", + "score": -0.10482210302792014, + "first_commit": "2024-01-19 07:15:36", + "latest_commit": "2024-03-26 10:10:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "yacis-electra-small-cyberbullying", "url": "https://huggingface.co./ptaszynski/yacis-electra-small-japanese-cyberbullying", "project_name": "yacis-electra-small-japanese-cyberbullying", - "downloads": 111, + "downloads": 112, "source": "Hugging Face", - "score": -0.10314509375950857, + "score": -0.10483671098534313, "first_commit": "2022-01-12 03:57:13", "latest_commit": "2022-01-16 13:51:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "BERT base Japanese - JaQuAD Description A Japanese Question Answering model fine-tuned on JaQuAD.", "url": "https://huggingface.co./SkelterLabsInc/bert-base-japanese-jaquad", "project_name": "bert-base-japanese-jaquad", - "downloads": 111, + "downloads": 112, "source": "Hugging Face", - "score": -0.10314509375950857, + "score": -0.10483671098534313, "first_commit": "2022-01-27 08:08:53", "latest_commit": "2022-02-04 02:39:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Dataset 5M (5121625) clean Japanese full sentence with the context.", - "url": "https://huggingface.co./datasets/AhmedSSabir/Japanese-wiki-dump-sentence-dataset", - "project_name": "Japanese-wiki-dump-sentence-dataset", + "description": "MobileBERT 日本語事前学習済みモデル爆誕!! ", + "url": "https://huggingface.co./ysakuramoto/mobilebert-ja", + "project_name": "mobilebert-ja", "downloads": 111, "source": "Hugging Face", - "score": -0.10314509375950857, - "first_commit": "2022-06-08 11:34:04", - "latest_commit": "2023-07-11 12:22:09", + "score": -0.10485131894276611, + "first_commit": "2022-01-23 11:29:39", + "latest_commit": "2022-01-24 05:25:31", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ichikara-instruction (Non Commercial) LLMのための日本語インストラクションデータ 公開ページ 公開ページより、 本データに関して、言語処理学会第30回年次大会において発表を行います。", - "url": "https://huggingface.co./datasets/p1atdev/ichikara-instruction", - "project_name": "ichikara-instruction", - "downloads": 110, + "description": "COMET-GPT2 ja Finetuned GPT-2 on ATOMIC ja using a causal language modeling (CLM) objective.", + "url": "https://huggingface.co./nlp-waseda/comet-gpt2-small-japanese", + "project_name": "comet-gpt2-small-japanese", + "downloads": 111, "source": "Hugging Face", - "score": -0.10315923388245013, - "first_commit": "2024-03-12 07:09:56", - "latest_commit": "2024-03-12 08:36:40", + "score": -0.10485131894276611, + "first_commit": "2022-11-15 05:14:35", + "latest_commit": "2023-02-13 10:26:12", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "reazonspeech-espnet-next ReazonSpeech is a project to maintain freely-available Japanese audio datasets and ML models.", + "url": "https://huggingface.co./reazon-research/reazonspeech-espnet-next", + "project_name": "reazonspeech-espnet-next", + "downloads": 108, + "source": "Hugging Face", + "score": -0.10489514281503506, + "first_commit": "2023-03-29 07:20:03", + "latest_commit": "2023-03-29 17:28:01", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "AIBunCho/japanese-novel-gpt-j-6b AI BunChoで利用しているモデルです。", + "url": "https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b", + "project_name": "japanese-novel-gpt-j-6b", + "downloads": 108, + "source": "Hugging Face", + "score": -0.10489514281503506, + "first_commit": "2023-08-11 00:52:32", + "latest_commit": "2023-08-26 04:20:51", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "upskyy/gte-korean-base This model is korsts and kornli finetuning model from Alibaba-NLP/gte-multilingual-base.", + "url": "https://huggingface.co./upskyy/gte-base-korean", + "project_name": "gte-base-korean", + "downloads": 107, + "source": "Hugging Face", + "score": -0.10490975077245805, + "first_commit": "2024-08-08 14:34:44", + "latest_commit": "2024-08-08 15:29:27", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.305 }, { "description": "albert-base-japanese-v1-with-japanese 日本語事前学習済みALBERTモデルですこのモデルではTokenizerにBertJapaneseTokenizerクラスを利用していますalbert-base-japanese-v1よりトークナイズ処理が楽になっています How to use ファインチューニング このモデルはPreTrainedモデルです基本的には各種タスク用にファインチューニングして使用されることを想定しています Fill-Mask for PyTorch from transformers import ( AutoModelForMaskedLM, AutoTokenizer ) tokenizer = AutoTokenizer.from_pretrained(\"ken11/albert-base-japanese-v1-with-japanese-tokenizer\")", "url": "https://huggingface.co./ken11/albert-base-japanese-v1-with-japanese-tokenizer", "project_name": "albert-base-japanese-v1-with-japanese-tokenizer", - "downloads": 108, + "downloads": 106, "source": "Hugging Face", - "score": -0.10318751412833324, + "score": -0.10492435872988104, "first_commit": "2022-04-20 16:34:22", "latest_commit": "2022-04-21 02:28:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "AutoWikiQA 東工大が公開しているSwallow-MXを用いて、Wikipedia中のテキストを入力として「質問(query)」と「回答(answer)」を生成し、生成された質問と回答についてフィルタリングを行ったデータセットです。", - "url": "https://huggingface.co./datasets/cl-nagoya/auto-wiki-qa", - "project_name": "auto-wiki-qa", - "downloads": 105, + "description": "Model Card for Japanese DeBERTa V2 large Model description This is a Japanese DeBERTa V2 large model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/deberta-v2-large-japanese", + "project_name": "deberta-v2-large-japanese", + "downloads": 103, "source": "Hugging Face", - "score": -0.10322993449715791, - "first_commit": "2024-03-28 01:33:42", - "latest_commit": "2024-04-20 12:17:33", + "score": -0.10496818260214999, + "first_commit": "2023-01-07 07:45:25", + "latest_commit": "2023-05-12 14:10:35", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.373 }, { - "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", - "url": "https://huggingface.co./cinmodel/electra-small-japanese-generator", - "project_name": "electra-small-japanese-generator", + "description": "Ninja-v1-RP-expressive-GGUF 概要 Aratako/Oumuamua-7b-RPの量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/Oumuamua-7b-RP-GGUF", + "project_name": "Oumuamua-7b-RP-GGUF", "downloads": 103, "source": "Hugging Face", - "score": -0.10325821474304102, - "first_commit": "2020-11-13 06:49:52", - "latest_commit": "2020-12-11 22:26:17", + "score": -0.10496818260214999, + "first_commit": "2024-06-23 13:00:02", + "latest_commit": "2024-06-23 14:45:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./nold/Orion-14B-Base-GGUF", - "project_name": "Orion-14B-Base-GGUF", - "downloads": 103, + "description": "doc2query/msmarco-japanese-mt5-base-v1 This is a doc2query model based on mT5 (also known as docT5query).", + "url": "https://huggingface.co./doc2query/msmarco-japanese-mt5-base-v1", + "project_name": "msmarco-japanese-mt5-base-v1", + "downloads": 102, "source": "Hugging Face", - "score": -0.10325821474304102, - "first_commit": "2024-03-07 14:56:51", - "latest_commit": "2024-03-07 19:33:53", + "score": -0.10498279055957298, + "first_commit": "2022-04-29 12:05:21", + "latest_commit": "2022-04-29 14:05:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "mbpp-ja", - "url": "https://huggingface.co./datasets/llm-jp/mbpp-ja", - "project_name": "mbpp-ja", - "downloads": 103, + "description": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1 上記のモデルを訓練し、アダルト用語を認識できるようにしたものです。", + "url": "https://huggingface.co./swdq/Visual-novel-whisper", + "project_name": "Visual-novel-whisper", + "downloads": 100, "source": "Hugging Face", - "score": -0.10325821474304102, - "first_commit": "2024-04-19 00:26:56", - "latest_commit": "2024-04-20 06:26:51", + "score": -0.10501200647441895, + "first_commit": "2024-07-24 10:09:29", + "latest_commit": "2024-07-24 10:29:47", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.756 }, { - "description": "Japanese CLIP ViT-H/14 (Wider) Table of Contents Overview Usage Model Details Evaluation Limitations and Biases Citation See Also Contact Information Overview Developed by:", - "url": "https://huggingface.co./hakuhodo-tech/japanese-clip-vit-h-14-bert-wider", - "project_name": "japanese-clip-vit-h-14-bert-wider", - "downloads": 102, + "description": "Japanese ELECTRA-small We provide a Japanese ELECTRA-Small model, as described in ELECTRA: Pre-training Text Encoders as Discriminators Rather Than Generators.", + "url": "https://huggingface.co./cinmodel/electra-small-japanese-generator", + "project_name": "electra-small-japanese-generator", + "downloads": 100, "source": "Hugging Face", - "score": -0.10327235486598257, - "first_commit": "2024-03-06 03:30:25", - "latest_commit": "2024-03-06 21:46:11", + "score": -0.10501200647441895, + "first_commit": "2020-11-13 06:49:52", + "latest_commit": "2020-12-11 22:26:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "electra-base-cyberbullying This is an ELECTRA Base model for the Japanese language finetuned for automatic cyberbullying detection.", @@ -7898,383 +8324,441 @@ "project_name": "transformers-ud-japanese-electra-base-discriminator-cyberbullying", "downloads": 99, "source": "Hugging Face", - "score": -0.10331477523480724, + "score": -0.10502661443184194, "first_commit": "2022-09-09 04:08:15", "latest_commit": "2022-11-01 07:18:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "LLaVA-JP Model Card Model detail Model type: LLaVA-JP is a vision-language model that can converse about input images.", - "url": "https://huggingface.co./toshi456/llava-jp-1.3b-v1.0", - "project_name": "llava-jp-1.3b-v1.0", + "description": "Aerner LM-v2 事前学習から全部日本語で学習させたモデルのバージョン2です。 ", + "url": "https://huggingface.co./aerner/lm-v2", + "project_name": "lm-v2", "downloads": 99, "source": "Hugging Face", - "score": -0.10331477523480724, - "first_commit": "2023-12-04 13:13:03", - "latest_commit": "2023-12-18 10:21:11", + "score": -0.10502661443184194, + "first_commit": "2023-06-09 15:19:12", + "latest_commit": "2023-06-09 16:08:47", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Fine-tuned XLSR-53 large model for speech diarization in Japanese phone-call 2 speakers diarization model which was fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using phone-call data CallHome.", - "url": "https://huggingface.co./Ivydata/wav2vec2-large-speech-diarization-jp", - "project_name": "wav2vec2-large-speech-diarization-jp", + "description": "We provide an Amazon product reviews dataset for multilingual text classification.", + "url": "https://huggingface.co./datasets/defunct-datasets/amazon_reviews_multi", + "project_name": "amazon_reviews_multi", + "downloads": 99, + "source": "Hugging Face", + "score": -0.10502661443184194, + "first_commit": "2022-01-25 16:34:54", + "latest_commit": "2023-11-02 14:52:21", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-v1.1", + "project_name": "t5-base-japanese-v1.1", "downloads": 98, "source": "Hugging Face", - "score": -0.1033289153577488, - "first_commit": "2023-05-08 10:10:43", - "latest_commit": "2023-05-10 00:32:23", + "score": -0.10504122238926492, + "first_commit": "2022-08-12 15:41:28", + "latest_commit": "2022-08-27 09:21:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "https://huggingface.co./kotoba-tech/kotoba-whisper-v1.1 上記のモデルを訓練し、アダルト用語を認識できるようにしたものです。", - "url": "https://huggingface.co./swdq/Visual-novel-whisper", - "project_name": "Visual-novel-whisper", - "downloads": 96, + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./nold/Orion-14B-Base-GGUF", + "project_name": "Orion-14B-Base-GGUF", + "downloads": 97, "source": "Hugging Face", - "score": -0.10335719560363191, - "first_commit": "2024-07-24 10:09:29", - "latest_commit": "2024-07-24 10:29:47", + "score": -0.1050558303466879, + "first_commit": "2024-03-07 14:56:51", + "latest_commit": "2024-03-07 19:33:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.5 }, { - "description": "doc2query/msmarco-japanese-mt5-base-v1 This is a doc2query model based on mT5 (also known as docT5query).", - "url": "https://huggingface.co./doc2query/msmarco-japanese-mt5-base-v1", - "project_name": "msmarco-japanese-mt5-base-v1", + "description": "[github].", + "url": "https://huggingface.co./datasets/fujiki/japanese_alpaca_data", + "project_name": "japanese_alpaca_data", "downloads": 96, "source": "Hugging Face", - "score": -0.10335719560363191, - "first_commit": "2022-04-29 12:05:21", - "latest_commit": "2022-04-29 14:05:37", + "score": -0.10507043830411089, + "first_commit": "2023-05-18 07:13:15", + "latest_commit": "2023-05-19 12:54:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "line-corporation/japanese-large-lm-3.6b line-corporationさんが公開しているjapanese-large-lm-3.6bのgguf変換版です。 ", - "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-gguf", - "project_name": "line-corp-japanese-large-lm-3.6b-gguf", + "description": "Fine-tuned XLSR-53 large model for speech diarization in Japanese phone-call 2 speakers diarization model which was fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using phone-call data CallHome.", + "url": "https://huggingface.co./Ivydata/wav2vec2-large-speech-diarization-jp", + "project_name": "wav2vec2-large-speech-diarization-jp", "downloads": 95, "source": "Hugging Face", - "score": -0.10337133572657346, - "first_commit": "2023-09-02 18:18:41", - "latest_commit": "2023-09-08 02:53:05", + "score": -0.10508504626153388, + "first_commit": "2023-05-08 10:10:43", + "latest_commit": "2023-05-10 00:32:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "nlp-waseda/roberta-large-japanese Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", - "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese", - "project_name": "roberta-large-japanese", - "downloads": 93, + "description": "Japanese stopwords for nagisa", + "url": "https://huggingface.co./datasets/taishi-i/nagisa_stopwords", + "project_name": "nagisa_stopwords", + "downloads": 95, "source": "Hugging Face", - "score": -0.10339961597245657, - "first_commit": "2022-05-10 08:37:48", - "latest_commit": "2022-10-21 14:48:46", + "score": -0.10508504626153388, + "first_commit": "2023-08-06 17:10:10", + "latest_commit": "2023-08-07 02:58:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "upskyy/gte-korean-base This model is korsts and kornli finetuning model from Alibaba-NLP/gte-multilingual-base.", - "url": "https://huggingface.co./upskyy/gte-base-korean", - "project_name": "gte-base-korean", - "downloads": 91, + "description": "mqaデータセットのquery--passageのペアについて重複を削除したデータセットです。 ", + "url": "https://huggingface.co./datasets/hpprc/mqa-ja", + "project_name": "mqa-ja", + "downloads": 90, "source": "Hugging Face", - "score": -0.1034278962183397, - "first_commit": "2024-08-08 14:34:44", - "latest_commit": "2024-08-08 15:29:27", + "score": -0.1051580860486488, + "first_commit": "2024-04-07 06:23:02", + "latest_commit": "2024-04-07 15:16:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "c4ai-command-r-v01-japanese-instruct GGUF版はこちら/Click here for the GGUF version 概要 CohereForAI/c4ai-command-r-v01を、ichikara-instructionを使って追加で日本語インストラクションチューニングを施したモデルです。 ", - "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct", - "project_name": "c4ai-command-r-v01-japanese-instruct", + "description": "AutoWikiQA 東工大が公開しているSwallow-MXを用いて、Wikipedia中のテキストを入力として「質問(query)」と「回答(answer)」を生成し、生成された質問と回答についてフィルタリングを行ったデータセットです。", + "url": "https://huggingface.co./datasets/cl-nagoya/auto-wiki-qa", + "project_name": "auto-wiki-qa", "downloads": 90, "source": "Hugging Face", - "score": -0.10344203634128125, - "first_commit": "2024-04-04 03:56:52", - "latest_commit": "2024-04-07 15:18:37", + "score": -0.1051580860486488, + "first_commit": "2024-03-28 01:33:42", + "latest_commit": "2024-04-20 12:17:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "AnzuMixSeries VAEの内臓はないぞ!と言わせないぞ!!!! ", - "url": "https://huggingface.co./natsusakiyomi/AnzuMix", - "project_name": "AnzuMix", - "downloads": 90, + "description": "This is a Japanese+English sentence-BERT model.", + "url": "https://huggingface.co./sonoisa/sentence-bert-base-ja-en-mean-tokens", + "project_name": "sentence-bert-base-ja-en-mean-tokens", + "downloads": 89, "source": "Hugging Face", - "score": -0.10344203634128125, - "first_commit": "2023-07-30 13:10:10", - "latest_commit": "2023-11-15 12:39:10", + "score": -0.10517269400607179, + "first_commit": "2022-05-08 03:05:08", + "latest_commit": "2022-05-08 03:29:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "alabnii/jmedroberta-base-sentencepiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece-vocab50000", - "project_name": "jmedroberta-base-sentencepiece-vocab50000", + "description": "Model Card for Japanese character-level GPT-2 Medium Model description This is a Japanese character-level GPT-2 Medium (310M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./ku-nlp/gpt2-medium-japanese-char", + "project_name": "gpt2-medium-japanese-char", "downloads": 89, "source": "Hugging Face", - "score": -0.10345617646422281, - "first_commit": "2022-12-22 17:22:14", - "latest_commit": "2023-06-27 03:44:17", + "score": -0.10517269400607179, + "first_commit": "2023-05-18 06:29:28", + "latest_commit": "2023-06-08 05:34:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.335 }, { - "description": "モデル説明 (model explanation) CoolJapanDiffusion 2.1.1とWaifuDiffusion 1.4 anime epoch2のマージ。", - "url": "https://huggingface.co./ThePioneer/CoolerWaifuDiffusion", - "project_name": "CoolerWaifuDiffusion", + "description": "Wikidata parallel descriptions en-ja Parallel corpus for machine translation generated from wikidata dump (2024-05-06).", + "url": "https://huggingface.co./datasets/Mitsua/wikidata-parallel-descriptions-en-ja", + "project_name": "wikidata-parallel-descriptions-en-ja", "downloads": 88, "source": "Hugging Face", - "score": -0.10347031658716437, - "first_commit": "2023-01-20 23:52:39", - "latest_commit": "2023-01-22 19:16:59", + "score": -0.10518730196349478, + "first_commit": "2024-05-13 12:02:43", + "latest_commit": "2024-05-17 00:25:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "JaWiki WikipediaのHTML形式のダンプファイルから抽出したテキストデータセットです。 ", - "url": "https://huggingface.co./datasets/hpprc/jawiki", - "project_name": "jawiki", + "description": "Llama 3 Youko 70B (rinna/llama-3-youko-70b)", + "url": "https://huggingface.co./rinna/llama-3-youko-70b", + "project_name": "llama-3-youko-70b", "downloads": 87, "source": "Hugging Face", - "score": -0.10348445671010592, - "first_commit": "2024-02-02 06:36:01", - "latest_commit": "2024-02-13 15:19:49", + "score": -0.10520190992091775, + "first_commit": "2024-07-21 14:13:34", + "latest_commit": "2024-07-25 05:16:28", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "oasst2-33k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", - "url": "https://huggingface.co./datasets/llm-jp/oasst2-33k-ja", - "project_name": "oasst2-33k-ja", + "description": "sonoisa/t5-base-japaneseをファインチューニングして、タイトル生成に用いれるようにしたモデルです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/t5-CAMERA-title-generation", + "project_name": "t5-CAMERA-title-generation", "downloads": 86, "source": "Hugging Face", - "score": -0.10349859683304748, - "first_commit": "2024-04-28 16:24:00", - "latest_commit": "2024-04-28 16:39:03", + "score": -0.10521651787834074, + "first_commit": "2023-03-21 10:49:27", + "latest_commit": "2023-07-21 14:11:13", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.223 }, { - "description": "[github].", - "url": "https://huggingface.co./datasets/fujiki/japanese_alpaca_data", - "project_name": "japanese_alpaca_data", + "description": "line-corporation/japanese-large-lm-3.6b line-corporationさんが公開しているjapanese-large-lm-3.6bのgguf変換版です。 ", + "url": "https://huggingface.co./mmnga/line-corp-japanese-large-lm-3.6b-gguf", + "project_name": "line-corp-japanese-large-lm-3.6b-gguf", + "downloads": 86, + "source": "Hugging Face", + "score": -0.10521651787834074, + "first_commit": "2023-09-02 18:18:41", + "latest_commit": "2023-09-08 02:53:05", + "languages": [], + "model_or_dataset": "model", + "model_size": 3.71 + }, + { + "description": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", + "project_name": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", "downloads": 85, "source": "Hugging Face", - "score": -0.10351273695598903, - "first_commit": "2023-05-18 07:13:15", - "latest_commit": "2023-05-19 12:54:13", + "score": -0.10523112583576373, + "first_commit": "2023-09-26 06:16:23", + "latest_commit": "2023-09-28 00:02:06", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 1.17 }, { - "description": "Llama 3 Youko 70B (rinna/llama-3-youko-70b)", - "url": "https://huggingface.co./rinna/llama-3-youko-70b", - "project_name": "llama-3-youko-70b", + "description": "alabnii/jmedroberta-base-sentencepiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-sentencepiece-vocab50000", + "project_name": "jmedroberta-base-sentencepiece-vocab50000", "downloads": 84, "source": "Hugging Face", - "score": -0.10352687707893059, - "first_commit": "2024-07-21 14:13:34", - "latest_commit": "2024-07-25 05:16:28", + "score": -0.10524573379318672, + "first_commit": "2022-12-22 17:22:14", + "latest_commit": "2023-06-27 03:44:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.124 }, { - "description": "This is a little bit different version of kunishou/hh-rlhf-49k-ja without ng_translation == 1 examples.", - "url": "https://huggingface.co./datasets/fujiki/japanese_hh-rlhf-49k", - "project_name": "japanese_hh-rlhf-49k", - "downloads": 84, + "description": "nlp-waseda/roberta-large-japanese Model description This is a Japanese RoBERTa large model pretrained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./nlp-waseda/roberta-large-japanese", + "project_name": "roberta-large-japanese", + "downloads": 83, "source": "Hugging Face", - "score": -0.10352687707893059, - "first_commit": "2023-05-28 05:55:53", - "latest_commit": "2023-05-28 06:08:04", + "score": -0.1052603417506097, + "first_commit": "2022-05-10 08:37:48", + "latest_commit": "2022-10-21 14:48:46", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-gpt-1b-PII-masking Model Description japanese-gpt-1b-PII-masking は、 日本語事前学習済み1B GPTモデルをベースとして、日本語の文章から個人情報をマスキングするように学習したモデルです。 ", - "url": "https://huggingface.co./cameltech/japanese-gpt-1b-PII-masking", - "project_name": "japanese-gpt-1b-PII-masking", + "description": "oasst2-33k-ja This repository provides an instruction tuning dataset developed by LLM-jp, a collaborative project launched in Japan.", + "url": "https://huggingface.co./datasets/llm-jp/oasst2-33k-ja", + "project_name": "oasst2-33k-ja", "downloads": 83, "source": "Hugging Face", - "score": -0.10354101720187214, - "first_commit": "2024-04-05 07:26:29", - "latest_commit": "2024-05-17 11:42:00", + "score": -0.1052603417506097, + "first_commit": "2024-04-28 16:24:00", + "latest_commit": "2024-04-28 16:39:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "We provide an Amazon product reviews dataset for multilingual text classification.", - "url": "https://huggingface.co./datasets/defunct-datasets/amazon_reviews_multi", - "project_name": "amazon_reviews_multi", + "description": "whisper-large-v3-japanese-4k-steps This model is a fine-tuned version of openai/whisper-large-v3 on the Common Voice 16.1 dataset.", + "url": "https://huggingface.co./drewschaub/whisper-large-v3-japanese-4k-steps", + "project_name": "whisper-large-v3-japanese-4k-steps", "downloads": 82, "source": "Hugging Face", - "score": -0.1035551573248137, - "first_commit": "2022-01-25 16:34:54", - "latest_commit": "2023-11-02 14:52:21", + "score": -0.1052749497080327, + "first_commit": "2024-02-17 01:01:51", + "latest_commit": "2024-02-18 01:31:35", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 1.54 }, { - "description": "zenz-v1 zenz-v1はGPT-2アーキテクチャに基づくかな漢字変換タスクに特化した言語モデルです。", - "url": "https://huggingface.co./Miwa-Keita/zenz-v1", - "project_name": "zenz-v1", - "downloads": 80, + "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", + "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b", + "project_name": "ELYZA-japanese-CodeLlama-7b", + "downloads": 81, "source": "Hugging Face", - "score": -0.10358343757069681, - "first_commit": "2024-05-12 15:48:46", - "latest_commit": "2024-05-13 16:34:02", + "score": -0.10528955766545567, + "first_commit": "2023-11-07 12:48:15", + "latest_commit": "2023-11-15 00:38:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "Japanese-LLaMA-2-13B-GGUF Japanese-LLaMA-2-13B-GGUFはJapanese-LLaMA-2-13BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-2-13b-gguf", - "project_name": "japanese-llama-2-13b-gguf", - "downloads": 79, + "description": "Japanese InstructBLIP Alpha Model Details Japanese InstructBLIP Alpha is a vision-language instruction-following model that enables to generate Japanese descriptions for input images and optionally input texts such as questions.", + "url": "https://huggingface.co./stabilityai/japanese-instructblip-alpha", + "project_name": "japanese-instructblip-alpha", + "downloads": 80, "source": "Hugging Face", - "score": -0.10359757769363837, - "first_commit": "2023-12-20 05:37:09", - "latest_commit": "2023-12-26 11:45:15", + "score": -0.10530416562287866, + "first_commit": "2023-08-16 23:49:58", + "latest_commit": "2023-11-17 03:57:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "whisper-large-v3-japanese-4k-steps This model is a fine-tuned version of openai/whisper-large-v3 on the Common Voice 16.1 dataset.", - "url": "https://huggingface.co./drewschaub/whisper-large-v3-japanese-4k-steps", - "project_name": "whisper-large-v3-japanese-4k-steps", + "description": "recruit-jp/japanese-typo-detector-roberta-base モデルの概要 日本語の文章を入力すると各文字ごとに誤字脱字である確率を出力します 各ラベルの意味は以下の通りです id label meaning 0 OK 誤字なし 1 deletion 1文字の抜け 2 insertion_a 余分な1文字の挿入 3 insertion_b 直前の文字列と一致する2文字以上の余分な文字の挿入 4 kanji-conversion_a 同一の読みを持つ漢字の入れ替え(誤変換) 5 kanji-conversion_b 近い読みを持つ漢字の入れ替え(誤変換) 6 substitution 1文字の入れ替え 7 transposition 隣接する2文字間の転置 8 others その他の入力誤り 誤り種類の詳細については学習データセットの元論文をご参照ください 日本語 Wikipedia の編集履歴に基づく 入力誤りデータセットと訂正システムの改良 その他、モデルの詳細については当社ブログ記事をご参照ください 誤字脱字検出モデルをHugging Face Hubに公開しました (Re", + "url": "https://huggingface.co./recruit-jp/japanese-typo-detector-roberta-base", + "project_name": "japanese-typo-detector-roberta-base", "downloads": 79, "source": "Hugging Face", - "score": -0.10359757769363837, - "first_commit": "2024-02-17 01:01:51", - "latest_commit": "2024-02-18 01:31:35", + "score": -0.10531877358030164, + "first_commit": "2023-11-09 06:27:40", + "latest_commit": "2023-12-21 03:07:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.0996 }, { - "description": "Dataset overview This dataset identifies whether a GitHub repository description pertains to Japanese natural language processing (NLP).", - "url": "https://huggingface.co./datasets/taishi-i/awesome-japanese-nlp-classification-dataset", - "project_name": "awesome-japanese-nlp-classification-dataset", - "downloads": 77, + "description": "Heron BLIP Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", + "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", + "downloads": 79, "source": "Hugging Face", - "score": -0.10362585793952148, - "first_commit": "2023-09-09 06:37:36", - "latest_commit": "2023-09-09 20:09:04", + "score": -0.10531877358030164, + "first_commit": "2024-02-27 13:48:02", + "latest_commit": "2024-02-27 13:59:23", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "自動生成Q&A 種々のデータソースから、MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUFを使ってQ&Aを自動生成したものです。 CC-BY系またはApatch-2.0のデータソースを改変して生成しています。 ", - "url": "https://huggingface.co./datasets/hatakeyama-llm-team/AutoGeneratedJapaneseQA", - "project_name": "AutoGeneratedJapaneseQA", - "downloads": 77, + "description": "Miwa-Keita/zenz-v1-checkpoints を optimum 用に ONNX に変換したモデルです。", + "url": "https://huggingface.co./p1atdev/zenz-v1-onnx", + "project_name": "zenz-v1-onnx", + "downloads": 79, "source": "Hugging Face", - "score": -0.10362585793952148, - "first_commit": "2024-04-27 04:32:20", - "latest_commit": "2024-05-19 03:22:08", + "score": -0.10531877358030164, + "first_commit": "2024-06-29 03:03:03", + "latest_commit": "2024-06-29 03:40:34", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "project_name": "japanese-large-lm-3.6b-instruction-sft-8bit-1g-actorder_True", - "downloads": 75, + "description": "This is a little bit different version of kunishou/hh-rlhf-49k-ja without ng_translation == 1 examples.", + "url": "https://huggingface.co./datasets/fujiki/japanese_hh-rlhf-49k", + "project_name": "japanese_hh-rlhf-49k", + "downloads": 79, "source": "Hugging Face", - "score": -0.10365413818540459, - "first_commit": "2023-09-26 06:16:23", - "latest_commit": "2023-09-28 00:02:06", + "score": -0.10531877358030164, + "first_commit": "2023-05-28 05:55:53", + "latest_commit": "2023-05-28 06:08:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "ELYZA-japanese-CodeLlama-7b Model Description ELYZA-japanese-CodeLlama-7b は、 Code Llamaをベースとして日本語能力を拡張するために追加事前学習を行ったモデルです。 ", - "url": "https://huggingface.co./elyza/ELYZA-japanese-CodeLlama-7b", - "project_name": "ELYZA-japanese-CodeLlama-7b", - "downloads": 75, + "description": "Wikipediaを用いた日本語の固有表現抽出データセット GitHub: https://github.com/stockmarkteam/ner-wikipedia-dataset/ LICENSE: CC-BY-SA 3.0 Developed by Stockmark Inc.", + "url": "https://huggingface.co./datasets/stockmark/ner-wikipedia-dataset", + "project_name": "ner-wikipedia-dataset", + "downloads": 78, "source": "Hugging Face", - "score": -0.10365413818540459, - "first_commit": "2023-11-07 12:48:15", - "latest_commit": "2023-11-15 00:38:12", + "score": -0.10533338153772463, + "first_commit": "2023-09-02 14:38:55", + "latest_commit": "2023-09-02 14:42:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "recruit-jp/japanese-typo-detector-roberta-base モデルの概要 日本語の文章を入力すると各文字ごとに誤字脱字である確率を出力します 各ラベルの意味は以下の通りです id label meaning 0 OK 誤字なし 1 deletion 1文字の抜け 2 insertion_a 余分な1文字の挿入 3 insertion_b 直前の文字列と一致する2文字以上の余分な文字の挿入 4 kanji-conversion_a 同一の読みを持つ漢字の入れ替え(誤変換) 5 kanji-conversion_b 近い読みを持つ漢字の入れ替え(誤変換) 6 substitution 1文字の入れ替え 7 transposition 隣接する2文字間の転置 8 others その他の入力誤り 誤り種類の詳細については学習データセットの元論文をご参照ください 日本語 Wikipedia の編集履歴に基づく 入力誤りデータセットと訂正システムの改良 その他、モデルの詳細については当社ブログ記事をご参照ください 誤字脱字検出モデルをHugging Face Hubに公開しました (Re", - "url": "https://huggingface.co./recruit-jp/japanese-typo-detector-roberta-base", - "project_name": "japanese-typo-detector-roberta-base", - "downloads": 75, + "description": "Japanese-LLaMA-2-13B-GGUF Japanese-LLaMA-2-13B-GGUFはJapanese-LLaMA-2-13BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-llama-2-13b-gguf", + "project_name": "japanese-llama-2-13b-gguf", + "downloads": 77, "source": "Hugging Face", - "score": -0.10365413818540459, - "first_commit": "2023-11-09 06:27:40", - "latest_commit": "2023-12-21 03:07:31", + "score": -0.10534798949514762, + "first_commit": "2023-12-20 05:37:09", + "latest_commit": "2023-12-26 11:45:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.3 }, { - "description": "Japanese InstructBLIP Alpha Model Details Japanese InstructBLIP Alpha is a vision-language instruction-following model that enables to generate Japanese descriptions for input images and optionally input texts such as questions.", - "url": "https://huggingface.co./stabilityai/japanese-instructblip-alpha", - "project_name": "japanese-instructblip-alpha", - "downloads": 75, + "description": "Dataset overview This dataset identifies whether a GitHub repository description pertains to Japanese natural language processing (NLP).", + "url": "https://huggingface.co./datasets/taishi-i/awesome-japanese-nlp-classification-dataset", + "project_name": "awesome-japanese-nlp-classification-dataset", + "downloads": 77, "source": "Hugging Face", - "score": -0.10365413818540459, - "first_commit": "2023-08-16 23:49:58", - "latest_commit": "2023-11-17 03:57:41", + "score": -0.10534798949514762, + "first_commit": "2023-09-09 06:37:36", + "latest_commit": "2023-09-09 20:09:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Genji-JP 6B Please check our blog post for more details, samples, evaluations and more: Blogpost Model Description Genji-JP 6B is a model finetuned on our Japanese storytelling dataset based on EleutherAI's GPT-J 6B model.", - "url": "https://huggingface.co./NovelAI/genji-jp", - "project_name": "genji-jp", - "downloads": 75, + "description": "This is a Japanese sentence-LUKE model.", + "url": "https://huggingface.co./cheonboy/sentence_embedding_japanese", + "project_name": "sentence_embedding_japanese", + "downloads": 76, "source": "Hugging Face", - "score": -0.10365413818540459, - "first_commit": "2021-11-03 15:07:47", - "latest_commit": "2022-08-09 17:36:02", + "score": -0.10536259745257061, + "first_commit": "2023-10-05 05:10:25", + "latest_commit": "2023-10-05 05:13:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Miwa-Keita/zenz-v1-checkpoints を optimum 用に ONNX に変換したモデルです。", - "url": "https://huggingface.co./p1atdev/zenz-v1-onnx", - "project_name": "zenz-v1-onnx", - "downloads": 74, + "description": "The English document is here. ", + "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft", + "project_name": "Watashiha-Llama-2-13B-Ogiri-sft", + "downloads": 75, "source": "Hugging Face", - "score": -0.10366827830834614, - "first_commit": "2024-06-29 03:03:03", - "latest_commit": "2024-06-29 03:40:34", + "score": -0.10537720540999358, + "first_commit": "2024-01-19 06:59:08", + "latest_commit": "2024-03-04 05:24:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Chat", - "project_name": "SambaLingo-Japanese-Chat", - "downloads": 74, + "description": "japanese-gpt-1b-PII-masking Model Description japanese-gpt-1b-PII-masking は、 日本語事前学習済み1B GPTモデルをベースとして、日本語の文章から個人情報をマスキングするように学習したモデルです。 ", + "url": "https://huggingface.co./cameltech/japanese-gpt-1b-PII-masking", + "project_name": "japanese-gpt-1b-PII-masking", + "downloads": 75, "source": "Hugging Face", - "score": -0.10366827830834614, - "first_commit": "2024-02-15 22:45:08", - "latest_commit": "2024-04-16 22:32:15", + "score": -0.10537720540999358, + "first_commit": "2024-04-05 07:26:29", + "latest_commit": "2024-05-17 11:42:00", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.3 }, { - "description": "roberta_qa_japanese (Japanese caption : 日本語の (抽出型) 質問応答のモデル)", - "url": "https://huggingface.co./tsmatz/roberta_qa_japanese", - "project_name": "roberta_qa_japanese", + "description": "LLM-jp Toxicity Dataset 日本語有害文書データセット「LLM-jp Toxicity Dataset」 See https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-toxicity-dataset", + "url": "https://huggingface.co./datasets/p1atdev/LLM-jp-Toxicity-Dataset", + "project_name": "LLM-jp-Toxicity-Dataset", "downloads": 74, "source": "Hugging Face", - "score": -0.10366827830834614, - "first_commit": "2022-12-11 03:41:07", - "latest_commit": "2024-07-12 00:00:07", + "score": -0.10539181336741657, + "first_commit": "2024-08-07 07:11:08", + "latest_commit": "2024-08-07 07:21:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "nlp-waseda/roberta-large-japanese-seq512-with-auto-jumanpp Model description", @@ -8282,71 +8766,64 @@ "project_name": "roberta-large-japanese-seq512-with-auto-jumanpp", "downloads": 73, "source": "Hugging Face", - "score": -0.1036824184312877, + "score": -0.10540642132483956, "first_commit": "2022-10-15 06:04:06", "latest_commit": "2022-10-21 15:56:38", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Japanese-LLaMA-2-7B-GGUF Japanese-LLaMA-2-7B-GGUFはJapanese-LLaMA-2-7BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-2-7b-gguf", - "project_name": "japanese-llama-2-7b-gguf", - "downloads": 73, - "source": "Hugging Face", - "score": -0.1036824184312877, - "first_commit": "2024-01-22 03:00:02", - "latest_commit": "2024-06-05 02:30:01", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "275.86Mのmixtralを日本語データセットでpretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", - "url": "https://huggingface.co./hibikaze/tiny_mixtral_ja_with_tokenizer", - "project_name": "tiny_mixtral_ja_with_tokenizer", + "description": "Genji-JP 6B Please check our blog post for more details, samples, evaluations and more: Blogpost Model Description Genji-JP 6B is a model finetuned on our Japanese storytelling dataset based on EleutherAI's GPT-J 6B model.", + "url": "https://huggingface.co./NovelAI/genji-jp", + "project_name": "genji-jp", "downloads": 73, "source": "Hugging Face", - "score": -0.1036824184312877, - "first_commit": "2024-07-20 05:30:59", - "latest_commit": "2024-07-20 05:33:38", + "score": -0.10540642132483956, + "first_commit": "2021-11-03 15:07:47", + "latest_commit": "2022-08-09 17:36:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-GGUF", - "project_name": "SambaLingo-Japanese-Chat-GGUF", + "description": "roberta_qa_japanese (Japanese caption : 日本語の (抽出型) 質問応答のモデル)", + "url": "https://huggingface.co./tsmatz/roberta_qa_japanese", + "project_name": "roberta_qa_japanese", "downloads": 72, "source": "Hugging Face", - "score": -0.10369655855422925, - "first_commit": "2024-03-07 06:38:01", - "latest_commit": "2024-03-07 06:48:27", + "score": -0.10542102928226255, + "first_commit": "2022-12-11 03:41:07", + "latest_commit": "2024-07-12 00:00:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.11 }, { - "description": "こちらでアップロードできないので、civitaiにて先に公開しています。 ", - "url": "https://huggingface.co./sazyou-roukaku/AfterRealXL", - "project_name": "AfterRealXL", + "description": "c4ai-command-r-v01-japanese-instruct GGUF版はこちら/Click here for the GGUF version 概要 CohereForAI/c4ai-command-r-v01を、ichikara-instructionを使って追加で日本語インストラクションチューニングを施したモデルです。 ", + "url": "https://huggingface.co./Aratako/c4ai-command-r-v01-japanese-instruct", + "project_name": "c4ai-command-r-v01-japanese-instruct", "downloads": 72, "source": "Hugging Face", - "score": -0.10369655855422925, - "first_commit": "2023-09-23 08:43:02", - "latest_commit": "2023-10-01 18:12:09", + "score": -0.10542102928226255, + "first_commit": "2024-04-04 03:56:52", + "latest_commit": "2024-04-07 15:18:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 35.0 }, { - "description": "Japanese stopwords for nagisa", - "url": "https://huggingface.co./datasets/taishi-i/nagisa_stopwords", - "project_name": "nagisa_stopwords", + "description": "このデータセットについて このデータは、日本の官公庁のWebサイトに掲載されている「よくある質問」を手作業で抽出し、インストラクション用のデータセットとしたもの���す。 ", + "url": "https://huggingface.co./datasets/matsuxr/JaGovFaqs-22k", + "project_name": "JaGovFaqs-22k", "downloads": 72, "source": "Hugging Face", - "score": -0.10369655855422925, - "first_commit": "2023-08-06 17:10:10", - "latest_commit": "2023-08-07 02:58:31", + "score": -0.10542102928226255, + "first_commit": "2023-12-31 13:58:41", + "latest_commit": "2024-02-29 02:51:20", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "モデルの説明(English explanation is below.", @@ -8354,131 +8831,142 @@ "project_name": "swallow-3-8B-sqlcoder-2x8B-GGUF", "downloads": 71, "source": "Hugging Face", - "score": -0.10371069867717081, + "score": -0.10543563723968553, "first_commit": "2024-07-03 11:02:45", "latest_commit": "2024-07-04 07:20:41", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Wikidata parallel descriptions en-ja Parallel corpus for machine translation generated from wikidata dump (2024-05-06).", - "url": "https://huggingface.co./datasets/Mitsua/wikidata-parallel-descriptions-en-ja", - "project_name": "wikidata-parallel-descriptions-en-ja", - "downloads": 71, - "source": "Hugging Face", - "score": -0.10371069867717081, - "first_commit": "2024-05-13 12:02:43", - "latest_commit": "2024-05-17 00:25:10", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "The English document is here. ", - "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft", - "project_name": "Watashiha-Llama-2-13B-Ogiri-sft", - "downloads": 69, - "source": "Hugging Face", - "score": -0.10373897892305392, - "first_commit": "2024-01-19 06:59:08", - "latest_commit": "2024-03-04 05:24:31", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.7 }, { "description": "electra-base-cyberbullying This is a BERT Base model for the Japanese language finetuned for automatic cyberbullying detection.", "url": "https://huggingface.co./kit-nlp/bert-base-japanese-sentiment-cyberbullying", "project_name": "bert-base-japanese-sentiment-cyberbullying", - "downloads": 69, + "downloads": 70, "source": "Hugging Face", - "score": -0.10373897892305392, + "score": -0.10545024519710852, "first_commit": "2022-09-09 02:16:34", "latest_commit": "2022-11-01 07:18:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "bert-base-japanese-v3-jcommonsenseqa 「大規模言語モデル入門」の第5章で紹介している(多肢選択式質問応答)のモデルです。 ", "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-jcommonsenseqa", "project_name": "bert-base-japanese-v3-jcommonsenseqa", - "downloads": 69, + "downloads": 70, "source": "Hugging Face", - "score": -0.10373897892305392, + "score": -0.10545024519710852, "first_commit": "2023-06-20 07:01:53", "latest_commit": "2023-07-24 06:49:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-Alpaca-2-13B-GGUF Japanese-Alpaca-2-13B-GGUFはJapanese-Alpaca-2-13BのGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b-gguf", - "project_name": "japanese-alpaca-2-13b-gguf", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-GGUF", + "project_name": "SambaLingo-Japanese-Chat-GGUF", "downloads": 69, "source": "Hugging Face", - "score": -0.10373897892305392, - "first_commit": "2023-12-20 10:56:08", - "latest_commit": "2023-12-26 11:46:41", + "score": -0.1054648531545315, + "first_commit": "2024-03-07 06:38:01", + "latest_commit": "2024-03-07 06:48:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.95 }, { - "description": "common voice, google fleurs, JSUTv1.1, JAS_v2 (joujiboi/japanese-anime-speech-v2)", - "url": "https://huggingface.co./datasets/sin2piusc/jgca_v2_50k_2", - "project_name": "jgca_v2_50k_2", + "description": "Japanese-LLaMA-2-7B-GGUF Japanese-LLaMA-2-7B-GGUFはJapanese-LLaMA-2-7BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-llama-2-7b-gguf", + "project_name": "japanese-llama-2-7b-gguf", "downloads": 69, "source": "Hugging Face", - "score": -0.10373897892305392, - "first_commit": "2024-07-08 18:06:27", - "latest_commit": "2024-07-24 18:58:08", + "score": -0.1054648531545315, + "first_commit": "2024-01-22 03:00:02", + "latest_commit": "2024-06-05 02:30:01", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 6.97 }, { - "description": "shisa-v1-qwen2-7b-gguf (English explanation is below.", - "url": "https://huggingface.co./keitokei1994/shisa-v1-qwen2-7b-GGUF", - "project_name": "shisa-v1-qwen2-7b-GGUF", + "description": "Heron GIT Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-git-ja-stablelm-base-7b-v1", + "project_name": "heron-chat-git-ja-stablelm-base-7b-v1", "downloads": 68, "source": "Hugging Face", - "score": -0.10375311904599548, - "first_commit": "2024-06-09 08:58:45", - "latest_commit": "2024-07-04 07:44:00", + "score": -0.10547946111195448, + "first_commit": "2024-03-29 09:09:32", + "latest_commit": "2024-05-02 07:55:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.32 }, { - "description": "Heron GIT Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-git-ja-stablelm-base-7b-v1", - "project_name": "heron-chat-git-ja-stablelm-base-7b-v1", + "description": "Hibiki ASR Phonemizer This model is a Phoneme Level Speech Recognition network, originally a fine-tuned version of openai/whisper-large-v3 on a mixture of Different Japanese datasets.", + "url": "https://huggingface.co./Respair/Hibiki_ASR_Phonemizer_v0.2", + "project_name": "Hibiki_ASR_Phonemizer_v0.2", + "downloads": 68, + "source": "Hugging Face", + "score": -0.10547946111195448, + "first_commit": "2024-08-12 01:30:08", + "latest_commit": "2024-08-19 18:13:01", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.54 + }, + { + "description": "Japanese-Alpaca-2-13B-GGUF Japanese-Alpaca-2-13B-GGUFはJapanese-Alpaca-2-13BのGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b-gguf", + "project_name": "japanese-alpaca-2-13b-gguf", "downloads": 67, "source": "Hugging Face", - "score": -0.10376725916893703, - "first_commit": "2024-03-29 09:09:32", - "latest_commit": "2024-05-02 07:55:57", + "score": -0.10549406906937747, + "first_commit": "2023-12-20 10:56:08", + "latest_commit": "2023-12-26 11:46:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.3 }, { - "description": "Heron BLIP Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", - "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1-llava-620k", + "description": "自動生成Q&A 種々のデータソースから、MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUFを使ってQ&Aを自動生成したものです。 CC-BY系またはApatch-2.0のデータソースを改変して生成しています。 ", + "url": "https://huggingface.co./datasets/hatakeyama-llm-team/AutoGeneratedJapaneseQA", + "project_name": "AutoGeneratedJapaneseQA", "downloads": 67, "source": "Hugging Face", - "score": -0.10376725916893703, - "first_commit": "2024-02-27 13:48:02", - "latest_commit": "2024-02-27 13:59:23", + "score": -0.10549406906937747, + "first_commit": "2024-04-27 04:32:20", + "latest_commit": "2024-05-19 03:22:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "DeBERTa V2 base Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", - "url": "https://huggingface.co./izumi-lab/deberta-v2-base-japanese", - "project_name": "deberta-v2-base-japanese", - "downloads": 64, + "description": "JaWiki WikipediaのHTML形式のダンプファイルから抽出したテキストデータセットです。 ", + "url": "https://huggingface.co./datasets/hpprc/jawiki", + "project_name": "jawiki", + "downloads": 66, "source": "Hugging Face", - "score": -0.1038096795377617, - "first_commit": "2023-10-21 13:24:11", - "latest_commit": "2024-07-19 03:07:57", + "score": -0.10550867702680046, + "first_commit": "2024-02-02 06:36:01", + "latest_commit": "2024-02-13 15:19:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "つくよみちゃんデータセットを用いて calm-2-7b-chat をファインチューニングしたモデルです。", + "url": "https://huggingface.co./offtoung/tsukuyomi-chan-calm2-7b", + "project_name": "tsukuyomi-chan-calm2-7b", + "downloads": 65, + "source": "Hugging Face", + "score": -0.10552328498422345, + "first_commit": "2023-12-21 08:46:37", + "latest_commit": "2023-12-27 04:07:20", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.01 }, { "description": "ShareGPT-Processed The RyokoAI/ShareGPT52K dataset, converted to Markdown and labeled with the language used.", @@ -8486,23 +8974,25 @@ "project_name": "ShareGPT-Processed", "downloads": 64, "source": "Hugging Face", - "score": -0.1038096795377617, + "score": -0.10553789294164644, "first_commit": "2023-05-16 19:50:04", "latest_commit": "2023-05-21 03:50:14", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "recruit-jp/japanese-image-classification-evaluation-dataset Overview Developed by: Recruit Co.", - "url": "https://huggingface.co./datasets/recruit-jp/japanese-image-classification-evaluation-dataset", - "project_name": "japanese-image-classification-evaluation-dataset", - "downloads": 63, + "description": "range3/cc100-ja This dataset consists of parquet files from the cc100 dataset with only the Japanese language extracted and sharded.", + "url": "https://huggingface.co./datasets/range3/cc100-ja", + "project_name": "cc100-ja", + "downloads": 64, "source": "Hugging Face", - "score": -0.10382381966070325, - "first_commit": "2023-12-19 09:17:24", - "latest_commit": "2024-01-22 10:48:13", + "score": -0.10553789294164644, + "first_commit": "2023-02-04 05:10:34", + "latest_commit": "2023-02-04 05:43:32", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "ja-stackoverflow 日本語版 Stack Overflow の スタック・オーバーフロー のデータダンプ をもとにデータを加工し、質問文と回答文のペアになるように調整した QA データセット。 ", @@ -8510,35 +9000,12 @@ "project_name": "ja-stackoverflow", "downloads": 63, "source": "Hugging Face", - "score": -0.10382381966070325, + "score": -0.10555250089906941, "first_commit": "2023-12-16 02:41:12", "latest_commit": "2023-12-21 05:30:24", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "sonoisa/t5-base-japaneseをファインチューニングして、タイトル生成に用いれるようにしたモデルです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/t5-CAMERA-title-generation", - "project_name": "t5-CAMERA-title-generation", - "downloads": 62, - "source": "Hugging Face", - "score": -0.10383795978364481, - "first_commit": "2023-03-21 10:49:27", - "latest_commit": "2023-07-21 14:11:13", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "whisper-large-v2-mix-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-mix-jp to the CTranslate2 model format.", - "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-mix-jp", - "project_name": "faster-whisper-large-v2-mix-jp", - "downloads": 62, - "source": "Hugging Face", - "score": -0.10383795978364481, - "first_commit": "2023-07-07 05:53:52", - "latest_commit": "2023-07-07 17:56:03", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "electra-base-cyberbullying This is a BERT Base model for the Japanese language finetuned for automatic cyberbullying detection.", @@ -8546,47 +9013,25 @@ "project_name": "bert-base-japanese-basic-char-v2-cyberbullying", "downloads": 61, "source": "Hugging Face", - "score": -0.10385209990658636, + "score": -0.10558171681391539, "first_commit": "2022-09-08 09:09:39", - "latest_commit": "2022-11-01 07:20:52", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Umievo-itr012-Gleipnir-7B このモデルは強力な4つの日本語モデルを進化的アルゴリズムで進化的マージしたものです。", - "url": "https://huggingface.co./umiyuki/Umievo-itr012-Gleipnir-7B", - "project_name": "Umievo-itr012-Gleipnir-7B", - "downloads": 61, - "source": "Hugging Face", - "score": -0.10385209990658636, - "first_commit": "2024-05-29 12:32:29", - "latest_commit": "2024-05-29 13:51:31", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "自動生成Q&A データソースから、MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUFを使ってQ&Aを自動生成したものです。 Common Crawlをもとに生成しています。 ", - "url": "https://huggingface.co./datasets/hatakeyama-llm-team/AutoGeneratedJapaneseQA-CC", - "project_name": "AutoGeneratedJapaneseQA-CC", - "downloads": 61, - "source": "Hugging Face", - "score": -0.10385209990658636, - "first_commit": "2024-05-18 03:55:41", - "latest_commit": "2024-05-19 09:25:43", + "latest_commit": "2022-11-01 07:20:52", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "つくよみちゃんデータセットを用いて calm-2-7b-chat をファインチューニングしたモデルです。", - "url": "https://huggingface.co./offtoung/tsukuyomi-chan-calm2-7b", - "project_name": "tsukuyomi-chan-calm2-7b", - "downloads": 60, + "description": "Japanese to Korean translator Japanese to Korean translator model based on EncoderDecoderModel(bert-japanese+kogpt2)", + "url": "https://huggingface.co./sappho192/aihub-ja-ko-translator", + "project_name": "aihub-ja-ko-translator", + "downloads": 61, "source": "Hugging Face", - "score": -0.10386624002952792, - "first_commit": "2023-12-21 08:46:37", - "latest_commit": "2023-12-27 04:07:20", + "score": -0.10558171681391539, + "first_commit": "2024-02-05 00:51:47", + "latest_commit": "2024-06-28 06:38:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.265 }, { "description": "japanese-sexual-moderation-v2は、studio-ousia/luke-japanese-large-liteをファインチューニングしたモデルです。", @@ -8594,47 +9039,64 @@ "project_name": "japanese-sexual-moderation-v2", "downloads": 60, "source": "Hugging Face", - "score": -0.10386624002952792, + "score": -0.10559632477133837, "first_commit": "2024-01-03 04:58:17", "latest_commit": "2024-01-03 07:09:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.41400000000000003 }, { - "description": "range3/cc100-ja This dataset consists of parquet files from the cc100 dataset with only the Japanese language extracted and sharded.", - "url": "https://huggingface.co./datasets/range3/cc100-ja", - "project_name": "cc100-ja", - "downloads": 60, + "description": "DeBERTa V2 base Japanese This is a DeBERTaV2 model pretrained on Japanese texts.", + "url": "https://huggingface.co./izumi-lab/deberta-v2-base-japanese", + "project_name": "deberta-v2-base-japanese", + "downloads": 59, "source": "Hugging Face", - "score": -0.10386624002952792, - "first_commit": "2023-02-04 05:10:34", - "latest_commit": "2023-02-04 05:43:32", + "score": -0.10561093272876136, + "first_commit": "2023-10-21 13:24:11", + "latest_commit": "2024-07-19 03:07:57", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "nlp-waseda/bigbird-base-japanese Model description This is a Japanese BigBird base model pretrained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./nlp-waseda/bigbird-base-japanese", - "project_name": "bigbird-base-japanese", + "description": "Kanji Diffusion v1-4 Model Card Kanji Diffusion is a latent text-to-image diffusion model capable of hallucinating Kanji characters given any English prompt.", + "url": "https://huggingface.co./yashvoladoddi37/kanji-diffusion-v1-4", + "project_name": "kanji-diffusion-v1-4", "downloads": 57, "source": "Hugging Face", - "score": -0.10390866039835259, - "first_commit": "2023-06-03 12:51:12", - "latest_commit": "2023-06-20 10:49:17", + "score": -0.10564014864360732, + "first_commit": "2024-08-13 06:06:21", + "latest_commit": "2024-08-16 12:14:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Japanese character-level GPT-2 Large Model description", - "url": "https://huggingface.co./ku-nlp/gpt2-large-japanese-char", - "project_name": "gpt2-large-japanese-char", + "description": "モデル概要 このモ���ルは、 sonoisa/sentence-luke-japanese-base-lite をSNS上のコメントに人手で攻撃性評価を行ったデータセットでFine-tuningすることで作成しました。 ", + "url": "https://huggingface.co./TomokiFujihara/luke-japanese-base-lite-offensiveness-estimation", + "project_name": "luke-japanese-base-lite-offensiveness-estimation", "downloads": 57, "source": "Hugging Face", - "score": -0.10390866039835259, - "first_commit": "2023-12-27 11:18:45", - "latest_commit": "2023-12-27 12:07:30", + "score": -0.10564014864360732, + "first_commit": "2023-12-08 03:20:14", + "latest_commit": "2024-03-24 12:35:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.133 + }, + { + "description": "whisper-large-v2-mix-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-mix-jp to the CTranslate2 model format.", + "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-mix-jp", + "project_name": "faster-whisper-large-v2-mix-jp", + "downloads": 57, + "source": "Hugging Face", + "score": -0.10564014864360732, + "first_commit": "2023-07-07 05:53:52", + "latest_commit": "2023-07-07 17:56:03", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "日本語 gpt2 蒸留モデル このモデルはrinna/japanese-gpt2-meduimを教師として蒸留したものです。 ", @@ -8642,107 +9104,116 @@ "project_name": "japanese-distilgpt2", "downloads": 57, "source": "Hugging Face", - "score": -0.10390866039835259, + "score": -0.10564014864360732, "first_commit": "2022-04-14 09:32:23", "latest_commit": "2022-04-15 06:00:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "luke-large-defamation-detection-japanese 日本語誹謗中傷検出器", - "url": "https://huggingface.co./kubota/luke-large-defamation-detection-japanese", - "project_name": "luke-large-defamation-detection-japanese", + "description": "自動生成Q&A データソースから、MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUFを使ってQ&Aを自動生成したものです。 Common Crawlをもとに生成しています。 ", + "url": "https://huggingface.co./datasets/hatakeyama-llm-team/AutoGeneratedJapaneseQA-CC", + "project_name": "AutoGeneratedJapaneseQA-CC", + "downloads": 57, + "source": "Hugging Face", + "score": -0.10564014864360732, + "first_commit": "2024-05-18 03:55:41", + "latest_commit": "2024-05-19 09:25:43", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "bert-base-japanese-jsnli This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on the JSNLI dataset.", + "url": "https://huggingface.co./Formzu/bert-base-japanese-jsnli", + "project_name": "bert-base-japanese-jsnli", "downloads": 56, "source": "Hugging Face", - "score": -0.10392280052129414, - "first_commit": "2023-01-23 06:25:08", - "latest_commit": "2023-02-07 15:49:33", + "score": -0.10565475660103031, + "first_commit": "2022-10-14 07:50:13", + "latest_commit": "2022-10-18 12:13:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-128g-actorder_False", - "downloads": 54, + "downloads": 56, "source": "Hugging Face", - "score": -0.10395108076717725, + "score": -0.10565475660103031, "first_commit": "2023-09-26 06:16:04", "latest_commit": "2023-09-27 23:54:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.771 }, { - "description": "BERT for Japanese Twitter", - "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter", - "project_name": "bert-for-japanese-twitter", - "downloads": 54, + "description": "nlp-waseda/bigbird-base-japanese Model description This is a Japanese BigBird base model pretrained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", + "url": "https://huggingface.co./nlp-waseda/bigbird-base-japanese", + "project_name": "bigbird-base-japanese", + "downloads": 55, "source": "Hugging Face", - "score": -0.10395108076717725, - "first_commit": "2024-05-06 15:53:21", - "latest_commit": "2024-08-09 12:24:35", + "score": -0.1056693645584533, + "first_commit": "2023-06-03 12:51:12", + "latest_commit": "2023-06-20 10:49:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This pre-trained model is work in progress!", - "url": "https://huggingface.co./naclbit/gpt-j-japanese-6.8b", - "project_name": "gpt-j-japanese-6.8b", - "downloads": 54, + "description": "Umievo-itr012-Gleipnir-7B このモデルは強力な4つの日本語モデルを進化的アルゴリズムで進化的マージしたものです。", + "url": "https://huggingface.co./umiyuki/Umievo-itr012-Gleipnir-7B", + "project_name": "Umievo-itr012-Gleipnir-7B", + "downloads": 55, "source": "Hugging Face", - "score": -0.10395108076717725, - "first_commit": "2021-10-17 08:02:54", - "latest_commit": "2021-11-10 15:28:57", + "score": -0.1056693645584533, + "first_commit": "2024-05-29 12:32:29", + "latest_commit": "2024-05-29 13:51:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "transformers-ud-japanese-electra-ginza-520 (sudachitra-wordpiece, mC4 Japanese)", - "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-520", - "project_name": "transformers-ud-japanese-electra-base-ginza-520", + "description": "BERT for Japanese Twitter", + "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter", + "project_name": "bert-for-japanese-twitter", "downloads": 54, "source": "Hugging Face", - "score": -0.10395108076717725, - "first_commit": "2023-09-21 14:14:04", - "latest_commit": "2023-09-21 17:45:45", + "score": -0.10568397251587629, + "first_commit": "2024-05-06 15:53:21", + "latest_commit": "2024-08-09 12:24:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "Wikipediaを用いた日本語の固有表現抽出データセット GitHub: https://github.com/stockmarkteam/ner-wikipedia-dataset/ LICENSE: CC-BY-SA 3.0 Developed by Stockmark Inc.", - "url": "https://huggingface.co./datasets/stockmark/ner-wikipedia-dataset", - "project_name": "ner-wikipedia-dataset", + "description": "GPT-2 small Japanese model This repository contains a GPT2-small model trained on Japanese Wikipedia dataset.", + "url": "https://huggingface.co./colorfulscoop/gpt2-small-ja", + "project_name": "gpt2-small-ja", "downloads": 54, "source": "Hugging Face", - "score": -0.10395108076717725, - "first_commit": "2023-09-02 14:38:55", - "latest_commit": "2023-09-02 14:42:18", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "MobileBERT 日本語事前学習済みモデル爆誕!! ", - "url": "https://huggingface.co./ysakuramoto/mobilebert-ja", - "project_name": "mobilebert-ja", - "downloads": 53, - "source": "Hugging Face", - "score": -0.10396522089011881, - "first_commit": "2022-01-23 11:29:39", - "latest_commit": "2022-01-24 05:25:31", + "score": -0.10568397251587629, + "first_commit": "2021-03-27 02:27:05", + "latest_commit": "2021-09-27 20:50:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "GPT2 Japanese base model version 2 Prerequisites transformers==4.19.2 Model architecture This model uses GPT2 base setttings except vocabulary size.", - "url": "https://huggingface.co./ClassCat/gpt2-base-japanese-v2", - "project_name": "gpt2-base-japanese-v2", + "description": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", "downloads": 53, "source": "Hugging Face", - "score": -0.10396522089011881, - "first_commit": "2022-06-04 02:30:34", - "latest_commit": "2022-06-25 15:36:22", + "score": -0.10569858047329928, + "first_commit": "2023-09-26 06:15:16", + "latest_commit": "2023-09-29 03:19:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.446 }, { "description": "自動生成Q&A データソースから、MaziyarPanahi/Mixtral-8x22B-Instruct-v0.1-GGUFを使��てQ&Aを自動生成したものです。 チームで作成したデータおよび「Common Crawlをもとに生成しています。 ", @@ -8750,23 +9221,12 @@ "project_name": "AutoGeneratedJapaneseQA-other", "downloads": 53, "source": "Hugging Face", - "score": -0.10396522089011881, + "score": -0.10569858047329928, "first_commit": "2024-05-19 02:45:59", "latest_commit": "2024-05-19 14:17:58", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "GPT-2 small Japanese model This repository contains a GPT2-small model trained on Japanese Wikipedia dataset.", - "url": "https://huggingface.co./colorfulscoop/gpt2-small-ja", - "project_name": "gpt2-small-ja", - "downloads": 52, - "source": "Hugging Face", - "score": -0.10397936101306036, - "first_commit": "2021-03-27 02:27:05", - "latest_commit": "2021-09-27 20:50:17", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Heron BLIP Japanese StableLM", @@ -8774,59 +9234,64 @@ "project_name": "heron-chat-blip-ja-stablelm-base-7b-v1", "downloads": 52, "source": "Hugging Face", - "score": -0.10397936101306036, + "score": -0.10571318843072226, "first_commit": "2024-02-20 11:32:57", "latest_commit": "2024-02-27 13:57:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-short", - "project_name": "t5-base-short", + "description": "GPT2 Japanese base model version 2 Prerequisites transformers==4.19.2 Model architecture This model uses GPT2 base setttings except vocabulary size.", + "url": "https://huggingface.co./ClassCat/gpt2-base-japanese-v2", + "project_name": "gpt2-base-japanese-v2", "downloads": 52, "source": "Hugging Face", - "score": -0.10397936101306036, - "first_commit": "2023-04-26 08:20:52", - "latest_commit": "2023-05-10 10:00:23", + "score": -0.10571318843072226, + "first_commit": "2022-06-04 02:30:34", + "latest_commit": "2022-06-25 15:36:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SpiralAI Spiral-RetNet-3b-base We have conducted pre-training from scratch on the RetNet (https://arxiv.org/abs/2307.08621)", - "url": "https://huggingface.co./Spiral-AI/Spiral-RetNet-3b-base", - "project_name": "Spiral-RetNet-3b-base", - "downloads": 51, + "description": "This pre-trained model is work in progress!", + "url": "https://huggingface.co./naclbit/gpt-j-japanese-6.8b", + "project_name": "gpt-j-japanese-6.8b", + "downloads": 52, "source": "Hugging Face", - "score": -0.10399350113600192, - "first_commit": "2024-04-30 09:33:26", - "latest_commit": "2024-05-01 04:54:26", + "score": -0.10571318843072226, + "first_commit": "2021-10-17 08:02:54", + "latest_commit": "2021-11-10 15:28:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "karasu-lora-jp-qa-chat karasu fine tuned model by lora method with the original Q&A dataset.", - "url": "https://huggingface.co./aipib/karasu-lora-jp-qa-chat", - "project_name": "karasu-lora-jp-qa-chat", - "downloads": 50, + "description": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", + "project_name": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", + "downloads": 52, "source": "Hugging Face", - "score": -0.10400764125894348, - "first_commit": "2024-04-24 02:26:58", - "latest_commit": "2024-06-03 01:02:33", + "score": -0.10571318843072226, + "first_commit": "2023-09-26 06:15:31", + "latest_commit": "2023-09-29 03:09:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.625 }, { - "description": "Llama-3-8B-Instruct-JP-nk2t-v0.3 Model Details: Built with Meta Llama 3 llama-3-8bの日本語継続学習モデルにChatVectorを適用し、さらにQLoraでファインチューニングしたモデルです。 ", - "url": "https://huggingface.co./nk2t/Llama-3-8B-Instruct-japanese-nk2t-v0.3", - "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.3", - "downloads": 50, + "description": "275.86Mのmixtralを日本語データセット��pretrainingしたものです sample from transformers import AutoTokenizer, AutoModelForCausalLM model = AutoModelForCausalLM.from_pretrained(\"if001/tiny_mixtral_ja\")", + "url": "https://huggingface.co./hibikaze/tiny_mixtral_ja_with_tokenizer", + "project_name": "tiny_mixtral_ja_with_tokenizer", + "downloads": 51, "source": "Hugging Face", - "score": -0.10400764125894348, - "first_commit": "2024-05-15 12:24:06", - "latest_commit": "2024-05-22 11:02:28", + "score": -0.10572779638814524, + "first_commit": "2024-07-20 05:30:59", + "latest_commit": "2024-07-20 05:33:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.276 }, { "description": "名言推論モデル", @@ -8834,11 +9299,12 @@ "project_name": "meigen_generate_Japanese", "downloads": 50, "source": "Hugging Face", - "score": -0.10400764125894348, + "score": -0.10574240434556822, "first_commit": "2021-10-13 15:30:14", "latest_commit": "2021-10-26 01:19:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "studio-ousia/luke-japanese-baseに対して次の変更を加えたモデルです。 ", @@ -8846,47 +9312,77 @@ "project_name": "luke-japanese-wordpiece-base", "downloads": 49, "source": "Hugging Face", - "score": -0.10402178138188503, + "score": -0.10575701230299121, "first_commit": "2023-08-10 06:04:58", "latest_commit": "2023-11-28 13:35:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GPTQ", - "project_name": "japanese-stablelm-instruct-beta-70B-GPTQ", + "description": "SpiralAI Spiral-RetNet-3b-base We have conducted pre-training from scratch on the RetNet (https://arxiv.org/abs/2307.08621)", + "url": "https://huggingface.co./Spiral-AI/Spiral-RetNet-3b-base", + "project_name": "Spiral-RetNet-3b-base", "downloads": 49, "source": "Hugging Face", - "score": -0.10402178138188503, - "first_commit": "2023-11-02 15:45:24", - "latest_commit": "2023-11-02 20:04:07", + "score": -0.10575701230299121, + "first_commit": "2024-04-30 09:33:26", + "latest_commit": "2024-05-01 04:54:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.86 }, { "description": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", "url": "https://huggingface.co./line-corporation/japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", "project_name": "japanese-large-lm-3.6b-instruction-sft-4bit-32g-actorder_False", - "downloads": 47, + "downloads": 49, "source": "Hugging Face", - "score": -0.10405006162776814, + "score": -0.10575701230299121, "first_commit": "2023-09-26 06:15:51", "latest_commit": "2023-09-27 23:56:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.861 }, { - "description": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", - "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-128g-actorder_False", + "description": "transformers-ud-japanese-electra-ginza-520 (sudachitra-wordpiece, mC4 Japanese)", + "url": "https://huggingface.co./megagonlabs/transformers-ud-japanese-electra-base-ginza-520", + "project_name": "transformers-ud-japanese-electra-base-ginza-520", + "downloads": 48, + "source": "Hugging Face", + "score": -0.1057716202604142, + "first_commit": "2023-09-21 14:14:04", + "latest_commit": "2023-09-21 17:45:45", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "karasu-lora-jp-qa-chat karasu fine tuned model by lora method with the original Q&A dataset.", + "url": "https://huggingface.co./aipib/karasu-lora-jp-qa-chat", + "project_name": "karasu-lora-jp-qa-chat", + "downloads": 48, + "source": "Hugging Face", + "score": -0.1057716202604142, + "first_commit": "2024-04-24 02:26:58", + "latest_commit": "2024-06-03 01:02:33", + "languages": [], + "model_or_dataset": "model", + "model_size": 1.1 + }, + { + "description": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", + "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", + "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", "downloads": 47, "source": "Hugging Face", - "score": -0.10405006162776814, - "first_commit": "2023-09-26 06:15:16", - "latest_commit": "2023-09-29 03:19:23", + "score": -0.10578622821783719, + "first_commit": "2023-09-26 06:14:25", + "latest_commit": "2023-09-27 01:23:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.487 }, { "description": "One more step before getting this model.", @@ -8894,107 +9390,103 @@ "project_name": "japanese-stable-diffusion", "downloads": 47, "source": "Hugging Face", - "score": -0.10405006162776814, + "score": -0.10578622821783719, "first_commit": null, "latest_commit": null, "languages": [], - "model_or_dataset": "model" - }, - { - "description": "日本語T5 Prefix Language Model", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-adapt", - "project_name": "t5-base-japanese-adapt", - "downloads": 46, - "source": "Hugging Face", - "score": -0.10406420175070971, - "first_commit": "2022-08-27 08:51:11", - "latest_commit": "2022-11-05 09:34:10", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "project_name": "japanese-large-lm-1.7b-instruction-sft-8bit-1g-actorder_True", - "downloads": 45, + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-GPTQ", + "project_name": "japanese-stablelm-instruct-beta-70B-GPTQ", + "downloads": 47, "source": "Hugging Face", - "score": -0.10407834187365127, - "first_commit": "2023-09-26 06:15:31", - "latest_commit": "2023-09-29 03:09:03", + "score": -0.10578622821783719, + "first_commit": "2023-11-02 15:45:24", + "latest_commit": "2023-11-02 20:04:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.1 }, { - "description": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "url": "https://huggingface.co./line-corporation/japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "project_name": "japanese-large-lm-1.7b-instruction-sft-4bit-32g-actorder_False", - "downloads": 45, + "description": "モデル ベースモデル:microsoft/Phi-3-mini-4k-instruct 学習データセット:llm-jp/hh-rlhf-12k-ja 学習方式:フルパラメータチューニング サンプル import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", trust_remote_code=True, ) model = AutoModelForCausalLM.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", device_map=\"auto\", torch_dtype='auto', trust_remote_code=True, ) text = \"<|user|>\\n与えられた質問に対して英語で思考し、日本語で答えてください。", + "url": "https://huggingface.co./ryota39/Phi-3-mini-4k-instruct-dpo", + "project_name": "Phi-3-mini-4k-instruct-dpo", + "downloads": 47, "source": "Hugging Face", - "score": -0.10407834187365127, - "first_commit": "2023-09-26 06:14:25", - "latest_commit": "2023-09-27 01:23:34", + "score": -0.10578622821783719, + "first_commit": "2024-04-24 16:21:32", + "latest_commit": "2024-05-01 07:41:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.82 }, { - "description": "Kanji Diffusion v1-4 Model Card Kanji Diffusion is a latent text-to-image diffusion model capable of hallucinating Kanji characters given any English prompt.", - "url": "https://huggingface.co./yashvoladoddi37/kanji-diffusion-v1-4", - "project_name": "kanji-diffusion-v1-4", + "description": "luke-large-defamation-detection-japanese 日本語誹謗中傷検出器", + "url": "https://huggingface.co./kubota/luke-large-defamation-detection-japanese", + "project_name": "luke-large-defamation-detection-japanese", "downloads": 45, "source": "Hugging Face", - "score": -0.10407834187365127, - "first_commit": "2024-08-13 06:06:21", - "latest_commit": "2024-08-16 12:14:22", + "score": -0.10581544413268315, + "first_commit": "2023-01-23 06:25:08", + "latest_commit": "2023-02-07 15:49:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Ninja-v1-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP", - "project_name": "Ninja-v1-RP", + "description": "rinna/nekomata-14b-gguf Overview The model is the GGUF version of rinna/nekomata-14b.", + "url": "https://huggingface.co./rinna/nekomata-14b-gguf", + "project_name": "nekomata-14b-gguf", "downloads": 44, "source": "Hugging Face", - "score": -0.10409248199659282, - "first_commit": "2024-05-20 13:04:23", - "latest_commit": "2024-05-24 15:10:41", + "score": -0.10583005209010614, + "first_commit": "2023-12-19 08:11:51", + "latest_commit": "2024-07-20 08:29:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.2 }, { - "description": "Danbooru2023:", - "url": "https://huggingface.co./datasets/nyanko7/danbooru2023", - "project_name": "danbooru2023", + "description": "Original Model Optical character recognition for Japanese text, with the main focus being Japanese manga.", + "url": "https://huggingface.co./TareHimself/manga-ocr-base", + "project_name": "manga-ocr-base", "downloads": 44, "source": "Hugging Face", - "score": -0.10409248199659282, - "first_commit": "2024-01-07 19:51:58", - "latest_commit": "2024-05-22 18:43:24", + "score": -0.10583005209010614, + "first_commit": "2023-09-14 04:15:52", + "latest_commit": "2024-06-03 05:10:11", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "Ninja-v1-RP-expressive GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive", - "project_name": "Ninja-v1-RP-expressive", + "description": "日本語T5 Prefix Language Model", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-adapt", + "project_name": "t5-base-japanese-adapt", "downloads": 43, "source": "Hugging Face", - "score": -0.10410662211953438, - "first_commit": "2024-05-21 12:11:38", - "latest_commit": "2024-05-24 15:11:43", + "score": -0.10584466004752913, + "first_commit": "2022-08-27 08:51:11", + "latest_commit": "2022-11-05 09:34:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Original Model Optical character recognition for Japanese text, with the main focus being Japanese manga.", - "url": "https://huggingface.co./TareHimself/manga-ocr-base", - "project_name": "manga-ocr-base", + "description": "Model Card for Japanese character-level GPT-2 Large Model description", + "url": "https://huggingface.co./ku-nlp/gpt2-large-japanese-char", + "project_name": "gpt2-large-japanese-char", "downloads": 43, "source": "Hugging Face", - "score": -0.10410662211953438, - "first_commit": "2023-09-14 04:15:52", - "latest_commit": "2024-06-03 05:10:11", + "score": -0.10584466004752913, + "first_commit": "2023-12-27 11:18:45", + "latest_commit": "2023-12-27 12:07:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "ELECTRA base Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", @@ -9002,35 +9494,51 @@ "project_name": "electra-base-japanese-discriminator", "downloads": 42, "source": "Hugging Face", - "score": -0.10412076224247593, + "score": -0.10585926800495211, "first_commit": "2021-11-15 17:39:41", "latest_commit": "2022-12-09 00:43:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "rinna/nekomata-14b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-14b-instruction.", - "url": "https://huggingface.co./rinna/nekomata-14b-instruction-gguf", - "project_name": "nekomata-14b-instruction-gguf", + "description": "Kokuwa lamettaの改良でマージさせるモデル探しをしていたらKiwiMixという面白そうなモデルを見つけました。 ", + "url": "https://huggingface.co./Lasorco/Kokuwa", + "project_name": "Kokuwa", "downloads": 42, "source": "Hugging Face", - "score": -0.10412076224247593, - "first_commit": "2023-12-19 08:12:06", - "latest_commit": "2024-07-20 08:34:05", + "score": -0.10585926800495211, + "first_commit": "2023-10-24 14:10:27", + "latest_commit": "2023-10-26 04:22:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Japanese character-level GPT-2 Medium Model description This is a Japanese character-level GPT-2 Medium (310M parameters) language model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/gpt2-medium-japanese-char", - "project_name": "gpt2-medium-japanese-char", + "description": "Danbooru2023:", + "url": "https://huggingface.co./datasets/nyanko7/danbooru2023", + "project_name": "danbooru2023", "downloads": 42, "source": "Hugging Face", - "score": -0.10412076224247593, - "first_commit": "2023-05-18 06:29:28", - "latest_commit": "2023-06-08 05:34:26", + "score": -0.10585926800495211, + "first_commit": "2024-01-07 19:51:58", + "latest_commit": "2024-05-22 18:43:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "JSNLI Version 1.1 のデータセットのうち、フィルタリング後の訓練セット (train_w_filtering)", + "url": "https://huggingface.co./datasets/llm-book/jsnli", + "project_name": "jsnli", + "downloads": 42, + "source": "Hugging Face", + "score": -0.10585926800495211, + "first_commit": "2023-06-19 12:31:46", + "latest_commit": "2023-10-25 15:22:46", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "cosmopedia-100k のindex 20k ~ 100k を日本語に自動翻訳したデータになります(テキストが長すぎて翻訳エラーになったレコードは除外しています)。", @@ -9038,215 +9546,181 @@ "project_name": "cosmopedia-100k-ja-preview", "downloads": 42, "source": "Hugging Face", - "score": -0.10412076224247593, + "score": -0.10585926800495211, "first_commit": "2024-02-28 07:58:55", "latest_commit": "2024-03-05 23:30:38", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Japanese-LLaMA-3-8B-Instruct-v2-GGUF Japanese-LLaMA-3-8B-Instruct-v2-GGUFはJapanese-LLaMA-3-8B-Instruct-v2のGGUF形式です。 ", - "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2-gguf", - "project_name": "japanese-llama-3-8b-instruct-v2-gguf", - "downloads": 41, - "source": "Hugging Face", - "score": -0.10413490236541749, - "first_commit": "2024-06-10 11:21:01", - "latest_commit": "2024-06-21 06:35:03", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Llama-3-Umievo-itr014-Shizuko-8b このモデルは日本語に対応しているLlama-3ベースの4つのモデルを進化的アルゴリズムで進化的マージしたものです。", - "url": "https://huggingface.co./umiyuki/Llama-3-Umievo-itr014-Shizuko-8b", - "project_name": "Llama-3-Umievo-itr014-Shizuko-8b", - "downloads": 41, - "source": "Hugging Face", - "score": -0.10413490236541749, - "first_commit": "2024-06-08 05:25:05", - "latest_commit": "2024-06-08 07:47:59", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "実験モデルです。", - "url": "https://huggingface.co./Akimite/Qwen2-7b-Instruct-Boku-v2", - "project_name": "Qwen2-7b-Instruct-Boku-v2", - "downloads": 41, - "source": "Hugging Face", - "score": -0.10413490236541749, - "first_commit": "2024-06-13 03:23:05", - "latest_commit": "2024-06-15 14:58:10", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "bert-base-japanese-jsnli This model is a fine-tuned version of cl-tohoku/bert-base-japanese-v2 on the JSNLI dataset.", - "url": "https://huggingface.co./Formzu/bert-base-japanese-jsnli", - "project_name": "bert-base-japanese-jsnli", + "description": "roberta-base-japanese-jsnli This model is a fine-tuned version of nlp-waseda/roberta-base-japanese on the JSNLI dataset.", + "url": "https://huggingface.co./Formzu/roberta-base-japanese-jsnli", + "project_name": "roberta-base-japanese-jsnli", "downloads": 41, "source": "Hugging Face", - "score": -0.10413490236541749, - "first_commit": "2022-10-14 07:50:13", - "latest_commit": "2022-10-18 12:13:20", + "score": -0.1058738759623751, + "first_commit": "2022-10-14 07:50:47", + "latest_commit": "2022-10-19 11:08:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Kokuwa lamettaの改良でマージさせるモデル探しをしていたらKiwiMixという面白そうなモデルを見つけました。 ", - "url": "https://huggingface.co./Lasorco/Kokuwa", - "project_name": "Kokuwa", + "description": "Ninja-v1-RP-expressive GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive", + "project_name": "Ninja-v1-RP-expressive", "downloads": 41, "source": "Hugging Face", - "score": -0.10413490236541749, - "first_commit": "2023-10-24 14:10:27", - "latest_commit": "2023-10-26 04:22:46", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Heron BLIP Japanese StableLM", - "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v0", - "project_name": "heron-chat-blip-ja-stablelm-base-7b-v0", - "downloads": 40, - "source": "Hugging Face", - "score": -0.10414904248835904, - "first_commit": "2023-09-06 09:31:44", - "latest_commit": "2023-09-07 16:59:14", + "score": -0.1058738759623751, + "first_commit": "2024-05-21 12:11:38", + "latest_commit": "2024-05-24 15:11:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "rinna/nekomata-14b-gguf Overview The model is the GGUF version of rinna/nekomata-14b.", - "url": "https://huggingface.co./rinna/nekomata-14b-gguf", - "project_name": "nekomata-14b-gguf", + "description": "Overview of bert-japanese-12M The bert-japanese-12M model is a transformer-based model with BERT architecture, which is designed to be used on Japanese text.", + "url": "https://huggingface.co./nptdat/bert-japanese-12M", + "project_name": "bert-japanese-12M", "downloads": 40, "source": "Hugging Face", - "score": -0.10414904248835904, - "first_commit": "2023-12-19 08:11:51", - "latest_commit": "2024-07-20 08:29:58", + "score": -0.10588848391979809, + "first_commit": "2024-08-16 16:46:49", + "latest_commit": "2024-08-19 02:56:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデル ベースモデル:microsoft/Phi-3-mini-4k-instruct 学習データセット:llm-jp/hh-rlhf-12k-ja 学習方式:フルパラメータチューニング サンプル import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", trust_remote_code=True, ) model = AutoModelForCausalLM.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", device_map=\"auto\", torch_dtype='auto', trust_remote_code=True, ) text = \"<|user|>\\n与えられた質問に対して英語で思考し、日本語で答えてください。", - "url": "https://huggingface.co./ryota39/Phi-3-mini-4k-instruct-dpo", - "project_name": "Phi-3-mini-4k-instruct-dpo", + "description": "Ninja-v1-RP GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP", + "project_name": "Ninja-v1-RP", "downloads": 40, "source": "Hugging Face", - "score": -0.10414904248835904, - "first_commit": "2024-04-24 16:21:32", - "latest_commit": "2024-05-01 07:41:46", + "score": -0.10588848391979809, + "first_commit": "2024-05-20 13:04:23", + "latest_commit": "2024-05-24 15:10:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "GPT-Neo 1.3B pre-trained model for Japanese Model Description GPT2/GPT3 like model trained on Japanese.corpus.", - "url": "https://huggingface.co./yellowback/gpt-neo-japanese-1.3B", - "project_name": "gpt-neo-japanese-1.3B", - "downloads": 40, + "description": "実験モデルです。", + "url": "https://huggingface.co./Akimite/Qwen2-7b-Instruct-Boku-v2", + "project_name": "Qwen2-7b-Instruct-Boku-v2", + "downloads": 39, "source": "Hugging Face", - "score": -0.10414904248835904, - "first_commit": "2021-12-09 08:09:40", - "latest_commit": "2021-12-09 17:59:05", + "score": -0.10590309187722106, + "first_commit": "2024-06-13 03:23:05", + "latest_commit": "2024-06-15 14:58:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.62 }, { - "description": "rinna/japanese-data2vec-audio-base Overview This is a Japanese data2vec Audio Base model trained by rinna Co.", - "url": "https://huggingface.co./rinna/japanese-data2vec-audio-base", - "project_name": "japanese-data2vec-audio-base", - "downloads": 39, + "description": "タイトルから記事本文を生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-article-generation", + "project_name": "t5-base-japanese-article-generation", + "downloads": 38, "source": "Hugging Face", - "score": -0.1041631826113006, - "first_commit": "2024-03-05 10:32:32", - "latest_commit": "2024-07-22 08:12:56", + "score": -0.10591769983464405, + "first_commit": "2021-04-03 13:55:25", + "latest_commit": "2024-04-17 11:39:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.223 }, { - "description": "JSNLI Version 1.1 のデータセットのうち、フィルタリング後の訓練セット (train_w_filtering)", - "url": "https://huggingface.co./datasets/llm-book/jsnli", - "project_name": "jsnli", - "downloads": 39, + "description": "Japanese-LLaMA-3-8B-Instruct-v2-GGUF Japanese-LLaMA-3-8B-Instruct-v2-GGUFはJapanese-LLaMA-3-8B-Instruct-v2のGGUF形式です。 ", + "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2-gguf", + "project_name": "japanese-llama-3-8b-instruct-v2-gguf", + "downloads": 38, "source": "Hugging Face", - "score": -0.1041631826113006, - "first_commit": "2023-06-19 12:31:46", - "latest_commit": "2023-10-25 15:22:46", + "score": -0.10591769983464405, + "first_commit": "2024-06-10 11:21:01", + "latest_commit": "2024-06-21 06:35:03", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-Umievo-Shizuko-sqlcoder-2x8B", - "project_name": "Llama-3-Umievo-Shizuko-sqlcoder-2x8B", + "description": "Mixtral-8x7B-v0.1-japanese Mixtral-8x7B-v0.1-japaneseはMixtral-8x7B-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-v0.1-japanese", + "project_name": "Mixtral-8x7B-v0.1-japanese", "downloads": 37, "source": "Hugging Face", - "score": -0.10419146285718371, - "first_commit": "2024-06-09 12:17:00", - "latest_commit": "2024-06-11 07:39:45", + "score": -0.10593230779206704, + "first_commit": "2024-04-16 03:06:14", + "latest_commit": "2024-04-20 09:14:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 46.9 }, { - "description": "タイトルから記事本文を生成するモデル SEE: https://qiita.com/sonoisa/items/a9af64ff641f0bbfed44", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-article-generation", - "project_name": "t5-base-japanese-article-generation", - "downloads": 37, + "description": "Japanese Stable Diffusion Pokemon Model Card Stable-Diffusion-Pokemon-ja is a Japanese-specific latent text-to-image diffusion model capable of generating Pokemon images given any text input.", + "url": "https://huggingface.co./svjack/Stable-Diffusion-Pokemon-ja", + "project_name": "Stable-Diffusion-Pokemon-ja", + "downloads": 36, "source": "Hugging Face", - "score": -0.10419146285718371, - "first_commit": "2021-04-03 13:55:25", - "latest_commit": "2024-04-17 11:39:12", + "score": -0.10594691574949003, + "first_commit": "2022-10-30 08:19:13", + "latest_commit": "2023-05-16 09:23:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Aya_ja このデータセットはCohereForAI/aya_datasetの日本語インストラクションデータのみを抽出したデータセットです。 ", - "url": "https://huggingface.co./datasets/ryota39/Aya_ja", - "project_name": "Aya_ja", - "downloads": 37, + "description": "Heron BLIP Japanese StableLM", + "url": "https://huggingface.co./turing-motors/heron-chat-blip-ja-stablelm-base-7b-v0", + "project_name": "heron-chat-blip-ja-stablelm-base-7b-v0", + "downloads": 36, "source": "Hugging Face", - "score": -0.10419146285718371, - "first_commit": "2024-02-14 08:03:42", - "latest_commit": "2024-02-14 08:25:06", + "score": -0.10594691574949003, + "first_commit": "2023-09-06 09:31:44", + "latest_commit": "2023-09-07 16:59:14", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base-Int4", - "project_name": "Orion-14B-Base-Int4", + "description": "Llama-3-8B-Instruct-JP-nk2t-v0.3 Model Details: Built with Meta Llama 3 llama-3-8bの日本語継続学習モデルにChatVectorを適用し、さらにQLoraでファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./nk2t/Llama-3-8B-Instruct-japanese-nk2t-v0.3", + "project_name": "Llama-3-8B-Instruct-japanese-nk2t-v0.3", "downloads": 36, "source": "Hugging Face", - "score": -0.10420560298012527, - "first_commit": "2024-01-18 09:50:31", - "latest_commit": "2024-03-26 09:55:37", + "score": -0.10594691574949003, + "first_commit": "2024-05-15 12:24:06", + "latest_commit": "2024-05-22 11:02:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Model Card for Model ID このモデルはrinna/japanese-gpt-1bをベースモデルとして、 コンテキストからの抽出型QAと、解答を新たなコンテキストでリファインするための学習を行ったモデルです。 ", - "url": "https://huggingface.co./oshizo/qa-refine-japanese-gpt-1b", - "project_name": "qa-refine-japanese-gpt-1b", - "downloads": 35, + "description": "rinna/japanese-data2vec-audio-base Overview This is a Japanese data2vec Audio Base model trained by rinna Co.", + "url": "https://huggingface.co./rinna/japanese-data2vec-audio-base", + "project_name": "japanese-data2vec-audio-base", + "downloads": 36, "source": "Hugging Face", - "score": -0.10421974310306682, - "first_commit": "2023-01-18 15:43:39", - "latest_commit": "2023-01-19 10:14:36", + "score": -0.10594691574949003, + "first_commit": "2024-03-05 10:32:32", + "latest_commit": "2024-07-22 08:12:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.0932 }, { - "description": "Oumuamua-7b-instruct This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct", - "project_name": "Oumuamua-7b-instruct", + "description": "jpn-heb source group: Japanese target group:", + "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-he", + "project_name": "opus-mt-ja-he", "downloads": 35, "source": "Hugging Face", - "score": -0.10421974310306682, - "first_commit": "2024-06-01 10:40:37", - "latest_commit": "2024-06-01 15:55:51", + "score": -0.10596152370691302, + "first_commit": "2020-08-19 00:28:58", + "latest_commit": "2023-08-16 11:59:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Model card for model ID", @@ -9254,107 +9728,116 @@ "project_name": "t5-xl", "downloads": 35, "source": "Hugging Face", - "score": -0.10421974310306682, + "score": -0.10596152370691302, "first_commit": "2023-04-26 07:19:08", "latest_commit": "2023-05-10 10:01:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/30876467ad5a8a81821f", - "url": "https://huggingface.co./sonoisa/t5-qiita-title-generation", - "project_name": "t5-qiita-title-generation", + "description": "Ninja-v1-RP-expressive-v2 GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-v2", + "project_name": "Ninja-v1-RP-expressive-v2", "downloads": 35, "source": "Hugging Face", - "score": -0.10421974310306682, - "first_commit": "2021-10-17 14:46:56", - "latest_commit": "2022-02-21 13:39:01", + "score": -0.10596152370691302, + "first_commit": "2024-05-25 16:56:18", + "latest_commit": "2024-05-26 15:20:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "rinna/nekomata-7b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-7b-instruction.", - "url": "https://huggingface.co./rinna/nekomata-7b-instruction-gguf", - "project_name": "nekomata-7b-instruction-gguf", + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-Umievo-Shizuko-sqlcoder-2x8B", + "project_name": "Llama-3-Umievo-Shizuko-sqlcoder-2x8B", "downloads": 35, "source": "Hugging Face", - "score": -0.10421974310306682, - "first_commit": "2023-12-19 08:11:08", - "latest_commit": "2024-07-20 08:38:34", + "score": -0.10596152370691302, + "first_commit": "2024-06-09 12:17:00", + "latest_commit": "2024-06-11 07:39:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.7 }, { - "description": "jpn-heb source group: Japanese target group:", - "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-he", - "project_name": "opus-mt-ja-he", - "downloads": 34, + "description": "Model Card for Model ID このモデルはrinna/japanese-gpt-1bをベースモデルとして、 コンテキストからの抽出型QAと、解答を新たなコンテキストでリファインするための学習を行ったモデルです。 ", + "url": "https://huggingface.co./oshizo/qa-refine-japanese-gpt-1b", + "project_name": "qa-refine-japanese-gpt-1b", + "downloads": 35, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2020-08-19 00:28:58", - "latest_commit": "2023-08-16 11:59:12", + "score": -0.10596152370691302, + "first_commit": "2023-01-18 15:43:39", + "latest_commit": "2023-01-19 10:14:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", - "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000", - "project_name": "jmedroberta-base-manbyo-wordpiece-vocab50000", + "description": "This is a Japanese sentence-T5 model.", + "url": "https://huggingface.co./sonoisa/sentence-t5-base-ja-mean-tokens", + "project_name": "sentence-t5-base-ja-mean-tokens", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2022-12-22 17:19:15", - "latest_commit": "2023-03-08 01:47:12", + "score": -0.105976131664336, + "first_commit": "2021-12-27 11:57:10", + "latest_commit": "2022-07-31 07:54:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Ninja-v1-RP-expressive-breadcrumbs GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-breadcrumbs", - "project_name": "Ninja-v1-RP-expressive-breadcrumbs", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Base-Int4", + "project_name": "Orion-14B-Base-Int4", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2024-05-26 06:36:42", - "latest_commit": "2024-06-01 11:54:18", + "score": -0.105976131664336, + "first_commit": "2024-01-18 09:50:31", + "latest_commit": "2024-03-26 09:55:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.69 }, { - "description": "bert-base-japanese-v3-bpr-passage-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRのパッセージエンコーダです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-passage-aio", - "project_name": "bert-base-japanese-v3-bpr-passage-aio", + "description": "このモデルはluke-japanese-baseをファインチューニングして、MARC-ja(positive or negativeの二値分類)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-marcja", + "project_name": "luke-japanese-base-marcja", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2023-06-06 08:22:28", - "latest_commit": "2023-07-24 07:14:59", + "score": -0.105976131664336, + "first_commit": "2023-03-02 03:57:33", + "latest_commit": "2023-07-21 14:10:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.279 }, { - "description": "rinna/nekomata-7b-gguf Overview The model is the GGUF version of rinna/nekomata-7b.", - "url": "https://huggingface.co./rinna/nekomata-7b-gguf", - "project_name": "nekomata-7b-gguf", + "description": "alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000 Model description This is a Japanese RoBERTa base model pre-trained on academic articles in medical sciences collected by Japan Science and Technology Agency (JST).", + "url": "https://huggingface.co./alabnii/jmedroberta-base-manbyo-wordpiece-vocab50000", + "project_name": "jmedroberta-base-manbyo-wordpiece-vocab50000", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2023-12-19 08:10:42", - "latest_commit": "2024-07-20 08:36:15", + "score": -0.105976131664336, + "first_commit": "2022-12-22 17:19:15", + "latest_commit": "2023-03-08 01:47:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "English - Japanese pairs taken from https://tatoeba.org/en/downloads and then deduplicated.", - "url": "https://huggingface.co./datasets/Verah/tatoeba_dedupe_en-jp_2024-March-01", - "project_name": "tatoeba_dedupe_en-jp_2024-March-01", + "description": "GPT-Neo 1.3B pre-trained model for Japanese Model Description GPT2/GPT3 like model trained on Japanese.corpus.", + "url": "https://huggingface.co./yellowback/gpt-neo-japanese-1.3B", + "project_name": "gpt-neo-japanese-1.3B", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, - "first_commit": "2024-03-05 13:46:13", - "latest_commit": "2024-03-06 08:34:02", + "score": -0.105976131664336, + "first_commit": "2021-12-09 08:09:40", + "latest_commit": "2021-12-09 17:59:05", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { "description": "defamation_japanese_twitter Twitter日本語誹謗中傷検出データセット Dataset Summary SNSにおける誹謗中傷検出のためのデータセットです. ", @@ -9362,275 +9845,207 @@ "project_name": "defamation-japanese-twitter", "downloads": 34, "source": "Hugging Face", - "score": -0.10423388322600838, + "score": -0.105976131664336, "first_commit": "2023-01-20 06:50:46", "latest_commit": "2023-02-06 18:26:10", "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "This is for (private) DEMO only.", - "url": "https://huggingface.co./Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition", - "project_name": "wav2vec2-xlsr-japanese-speech-emotion-recognition", - "downloads": 33, - "source": "Hugging Face", - "score": -0.10424802334894993, - "first_commit": "2021-09-22 04:10:36", - "latest_commit": "2023-10-19 01:31:17", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "deberta-large-japanese-wikipedia Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia", - "project_name": "deberta-large-japanese-wikipedia", - "downloads": 33, + "description": "English - Japanese pairs taken from https://tatoeba.org/en/downloads and then deduplicated.", + "url": "https://huggingface.co./datasets/Verah/tatoeba_dedupe_en-jp_2024-March-01", + "project_name": "tatoeba_dedupe_en-jp_2024-March-01", + "downloads": 34, "source": "Hugging Face", - "score": -0.10424802334894993, - "first_commit": "2022-07-05 22:01:16", - "latest_commit": "2023-02-27 10:15:35", + "score": -0.105976131664336, + "first_commit": "2024-03-05 13:46:13", + "latest_commit": "2024-03-06 08:34:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "nlp-waseda/gpt2-small-japanese This model is Japanese GPT-2 pretrained on Japanese Wikipedia and CC-100.", - "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese", - "project_name": "gpt2-small-japanese", + "description": "Oumuamua-7b-instruct This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./nitky/Oumuamua-7b-instruct", + "project_name": "Oumuamua-7b-instruct", "downloads": 33, "source": "Hugging Face", - "score": -0.10424802334894993, - "first_commit": "2022-03-30 03:34:11", - "latest_commit": "2022-03-30 04:28:17", + "score": -0.10599073962175898, + "first_commit": "2024-06-01 10:40:37", + "latest_commit": "2024-06-01 15:55:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.33 }, { - "description": "[Under Construction]", - "url": "https://huggingface.co./datasets/bclavie/mmarco-japanese-hard-negatives", - "project_name": "mmarco-japanese-hard-negatives", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Chat", + "project_name": "SambaLingo-Japanese-Chat", "downloads": 33, "source": "Hugging Face", - "score": -0.10424802334894993, - "first_commit": "2023-12-24 13:04:27", - "latest_commit": "2023-12-24 18:52:04", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "nlp-waseda/gpt2-xl-japanese This is Japanese GPT2 with approximately 1.5B parameters pretrained on Japanese Wikipedia and CC-100", - "url": "https://huggingface.co./nlp-waseda/gpt2-xl-japanese", - "project_name": "gpt2-xl-japanese", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2022-11-30 04:33:31", - "latest_commit": "2023-06-21 04:29:10", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for Ninja-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 Ninja has the following changes compared to Mistral-7B-v0.1.", - "url": "https://huggingface.co./Local-Novel-LLM-project/Ninja-v1", - "project_name": "Ninja-v1", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2024-04-24 10:28:30", - "latest_commit": "2024-05-04 04:07:09", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "About This model is Lightblue's QLoRA finetune of OpenOrca's Open-Orca/OpenOrcaxOpenChat-Preview2-13B model on Japanese fine-tuning datasets.", - "url": "https://huggingface.co./lightblue/openorca_stx", - "project_name": "openorca_stx", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2023-09-12 09:29:10", - "latest_commit": "2023-10-02 10:25:36", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "このモデルはluke-japanese-baseをファインチューニングして、MARC-ja(positive or negativeの二値分類)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-marcja", - "project_name": "luke-japanese-base-marcja", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2023-03-02 03:57:33", - "latest_commit": "2023-07-21 14:10:48", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Japanese Stable Diffusion Pokemon Model Card Stable-Diffusion-Pokemon-ja is a Japanese-specific latent text-to-image diffusion model capable of generating Pokemon images given any text input.", - "url": "https://huggingface.co./svjack/Stable-Diffusion-Pokemon-ja", - "project_name": "Stable-Diffusion-Pokemon-ja", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2022-10-30 08:19:13", - "latest_commit": "2023-05-16 09:23:49", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", - "url": "https://huggingface.co./TeamFnord/manga-ocr", - "project_name": "manga-ocr", - "downloads": 32, - "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2022-01-15 17:39:06", - "latest_commit": "2022-02-10 07:50:15", + "score": -0.10599073962175898, + "first_commit": "2024-02-15 22:45:08", + "latest_commit": "2024-04-16 22:32:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.95 }, { - "description": "This is a Japanese sentence-T5 model.", - "url": "https://huggingface.co./sonoisa/sentence-t5-base-ja-mean-tokens", - "project_name": "sentence-t5-base-ja-mean-tokens", + "description": "[EZO model card]", + "url": "https://huggingface.co./HODACHI/EZO-InternVL2-26B", + "project_name": "EZO-InternVL2-26B", "downloads": 32, "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2021-12-27 11:57:10", - "latest_commit": "2022-07-31 07:54:13", + "score": -0.10600534757918197, + "first_commit": "2024-08-19 08:03:55", + "latest_commit": "2024-08-19 10:54:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 25.5 }, { - "description": "t5-base-japanese-web-8k (with Byte-fallback, 8K) Description megagonlabs/t5-base-japanese-web-8k is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", - "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web-8k", - "project_name": "t5-base-japanese-web-8k", + "description": "bert-base-japanese-v3-bpr-passage-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRのパッセージエンコーダです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-passage-aio", + "project_name": "bert-base-japanese-v3-bpr-passage-aio", "downloads": 32, "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": "2021-09-06 10:13:42", - "latest_commit": "2023-07-04 07:05:38", + "score": -0.10600534757918197, + "first_commit": "2023-06-06 08:22:28", + "latest_commit": "2023-07-24 07:14:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "You agree to the terms of the LICENSE when using this dataset. ", - "url": "https://huggingface.co./datasets/litagin/ehehe-corpus", - "project_name": "ehehe-corpus", + "description": "Ninja-v1-RP-expressive-breadcrumbs GGUF版はこちら/Click here for the GGUF version 概要 This is a merge of pre-trained language models created using mergekit. ", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-expressive-breadcrumbs", + "project_name": "Ninja-v1-RP-expressive-breadcrumbs", "downloads": 32, "source": "Hugging Face", - "score": -0.10426216347189149, - "first_commit": null, - "latest_commit": null, + "score": -0.10600534757918197, + "first_commit": "2024-05-26 06:36:42", + "latest_commit": "2024-06-01 11:54:18", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "bert-base-japanese-v3-bpr-question-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRの質問エンコーダです。 ", - "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-question-aio", - "project_name": "bert-base-japanese-v3-bpr-question-aio", - "downloads": 31, + "description": "記事本文からタイトルを生成するモデル SEE: https://qiita.com/sonoisa/items/30876467ad5a8a81821f", + "url": "https://huggingface.co./sonoisa/t5-qiita-title-generation", + "project_name": "t5-qiita-title-generation", + "downloads": 32, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2023-06-06 08:21:13", - "latest_commit": "2023-07-24 07:12:05", + "score": -0.10600534757918197, + "first_commit": "2021-10-17 14:46:56", + "latest_commit": "2022-02-21 13:39:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Japanese DeBERTa V2 base Model description This is a Japanese DeBERTa V2 base model pre-trained on Japanese Wikipedia, the Japanese portion of CC-100, and the Japanese portion of OSCAR.", - "url": "https://huggingface.co./ku-nlp/deberta-v2-base-japanese-with-auto-jumanpp", - "project_name": "deberta-v2-base-japanese-with-auto-jumanpp", + "description": "yacis-electra-small", + "url": "https://huggingface.co./ptaszynski/yacis-electra-small-japanese", + "project_name": "yacis-electra-small-japanese", "downloads": 31, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2023-09-07 06:04:29", - "latest_commit": "2023-11-20 06:00:08", + "score": -0.10601995553660495, + "first_commit": "2022-01-12 01:48:13", + "latest_commit": "2022-01-13 01:43:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Heron GIT Japanese ELYZA Llama 2 Fast 7B Model Details Heron GIT Japanese ELYZA Llama 2 Fast 7B is a vision-language model that can converse about input images.", - "url": "https://huggingface.co./turing-motors/heron-chat-git-ELYZA-fast-7b-v0", - "project_name": "heron-chat-git-ELYZA-fast-7b-v0", + "description": "nlp-waseda/gpt2-xl-japanese This is Japanese GPT2 with approximately 1.5B parameters pretrained on Japanese Wikipedia and CC-100", + "url": "https://huggingface.co./nlp-waseda/gpt2-xl-japanese", + "project_name": "gpt2-xl-japanese", "downloads": 31, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2023-09-06 09:04:40", - "latest_commit": "2023-09-11 16:56:39", + "score": -0.10601995553660495, + "first_commit": "2022-11-30 04:33:31", + "latest_commit": "2023-06-21 04:29:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.61 }, { - "description": "t5-base-xlsum-ja", - "url": "https://huggingface.co./p1atdev/t5-base-xlsum-ja", - "project_name": "t5-base-xlsum-ja", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-AWQ", + "project_name": "japanese-stablelm-base-beta-70B-AWQ", "downloads": 31, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2023-10-06 03:18:28", - "latest_commit": "2023-11-20 09:25:16", + "score": -0.10601995553660495, + "first_commit": "2023-11-06 11:33:47", + "latest_commit": "2023-11-09 18:16:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.68 }, { - "description": "Chat-Vector-LLaVA-v1.5-7b-JA Model Card Model detail Model type: Chat-Vector-LLaVA-v1.5-7b-JA is a vision-language model that can converse about input images in Japanese.", - "url": "https://huggingface.co./toshi456/chat-vector-llava-v1.5-7b-ja", - "project_name": "chat-vector-llava-v1.5-7b-ja", + "description": "rinna/nekomata-14b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-14b-instruction.", + "url": "https://huggingface.co./rinna/nekomata-14b-instruction-gguf", + "project_name": "nekomata-14b-instruction-gguf", "downloads": 31, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2024-05-06 04:07:19", - "latest_commit": "2024-05-06 11:33:32", + "score": -0.10601995553660495, + "first_commit": "2023-12-19 08:12:06", + "latest_commit": "2024-07-20 08:34:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.2 }, { - "description": "Dataset details: Each entry in this dataset is a sentence-aligned Japanese web novel chapter and English fan translation.", - "url": "https://huggingface.co./datasets/NilanE/ParallelFiction-Ja_En-100k", - "project_name": "ParallelFiction-Ja_En-100k", + "description": "Japanese-LLaMA-3-8B Japanese-LLaMA-3-8Bは基盤モデル、フルモデルです。 ", + "url": "https://huggingface.co./owner203/japanese-llama-3-8b", + "project_name": "japanese-llama-3-8b", "downloads": 31, "source": "Hugging Face", - "score": -0.10427630359483304, - "first_commit": "2024-03-25 23:41:17", - "latest_commit": "2024-06-02 18:03:38", + "score": -0.10601995553660495, + "first_commit": "2024-06-05 02:19:05", + "latest_commit": "2024-06-21 06:35:41", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "ELECTRA base Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-base-japanese-generator", - "project_name": "electra-base-japanese-generator", - "downloads": 30, + "description": "nlp-waseda/gpt2-small-japanese This model is Japanese GPT-2 pretrained on Japanese Wikipedia and CC-100.", + "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese", + "project_name": "gpt2-small-japanese", + "downloads": 31, "source": "Hugging Face", - "score": -0.1042904437177746, - "first_commit": "2021-11-15 17:23:50", - "latest_commit": "2023-10-21 13:21:16", + "score": -0.10601995553660495, + "first_commit": "2022-03-30 03:34:11", + "latest_commit": "2022-03-30 04:28:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "yacis-electra-small", - "url": "https://huggingface.co./ptaszynski/yacis-electra-small-japanese", - "project_name": "yacis-electra-small-japanese", - "downloads": 30, + "description": "[Under Construction]", + "url": "https://huggingface.co./datasets/bclavie/mmarco-japanese-hard-negatives", + "project_name": "mmarco-japanese-hard-negatives", + "downloads": 31, "source": "Hugging Face", - "score": -0.1042904437177746, - "first_commit": "2022-01-12 01:48:13", - "latest_commit": "2022-01-13 01:43:17", + "score": -0.10601995553660495, + "first_commit": "2023-12-24 13:04:27", + "latest_commit": "2023-12-24 18:52:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "llm-jp-13b-instruct-lora-jaster-v1.0", - "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-v1.0", - "project_name": "llm-jp-13b-instruct-lora-jaster-v1.0", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-AWQ", + "project_name": "japanese-stablelm-instruct-beta-70B-AWQ", "downloads": 30, "source": "Hugging Face", - "score": -0.1042904437177746, - "first_commit": "2023-10-18 18:53:58", - "latest_commit": "2023-10-20 08:41:20", + "score": -0.10603456349402794, + "first_commit": "2023-11-02 15:45:23", + "latest_commit": "2023-11-09 18:16:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.68 }, { "description": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf ELYZA-japanese-Llama-2-13b-fast-instructの GGUF 変換モデルです。", @@ -9638,35 +10053,51 @@ "project_name": "ELYZA-japanese-Llama-2-13b-fast-instruct-gguf", "downloads": 30, "source": "Hugging Face", - "score": -0.1042904437177746, + "score": -0.10603456349402794, "first_commit": "2024-01-23 14:14:39", "latest_commit": "2024-01-25 06:30:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 13.1 }, { - "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/t5-base-japanese-mC4-Wikipedia", - "project_name": "t5-base-japanese-mC4-Wikipedia", + "description": "Japanese-LLaMA-3-8B-Instruct-v2 Japanese-LLaMA-3-8B-Instruct-v2は指示実行モデル、フルモデルです。 ", + "url": "https://huggingface.co./owner203/japanese-llama-3-8b-instruct-v2", + "project_name": "japanese-llama-3-8b-instruct-v2", "downloads": 30, "source": "Hugging Face", - "score": -0.1042904437177746, - "first_commit": "2021-06-30 12:53:09", - "latest_commit": "2021-09-23 18:29:58", + "score": -0.10603456349402794, + "first_commit": "2024-06-10 10:10:19", + "latest_commit": "2024-06-21 06:35:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59363 Validation Metrics Loss: 0.12651239335536957 Accuracy: 0.9532079853817648 Precision: 0.9729688278823665 Recall: 0.9744633462616643 AUC: 0.9717333684823413 F1: 0.9737155136027014 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", - "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59363", - "project_name": "autonlp-japanese-sentiment-59363", + "description": "About This model is Lightblue's QLoRA finetune of OpenOrca's Open-Orca/OpenOrcaxOpenChat-Preview2-13B model on Japanese fine-tuning datasets.", + "url": "https://huggingface.co./lightblue/openorca_stx", + "project_name": "openorca_stx", "downloads": 30, "source": "Hugging Face", - "score": -0.1042904437177746, - "first_commit": "2021-04-21 11:28:24", - "latest_commit": "2021-05-18 22:56:15", + "score": -0.10603456349402794, + "first_commit": "2023-09-12 09:29:10", + "latest_commit": "2023-10-02 10:25:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "deberta-large-japanese-wikipedia Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia", + "project_name": "deberta-large-japanese-wikipedia", + "downloads": 30, + "source": "Hugging Face", + "score": -0.10603456349402794, + "first_commit": "2022-07-05 22:01:16", + "latest_commit": "2023-02-27 10:15:35", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "roberta-long-japanese (jumanpp + sentencepiece, mC4 Japanese)", @@ -9674,35 +10105,77 @@ "project_name": "roberta-long-japanese", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, + "score": -0.10604917145145093, "first_commit": "2022-09-04 14:31:06", "latest_commit": "2022-10-04 23:36:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Donut (base-sized model, fine-tuned on visual novel like synthetic dataset ) ビジュアルノベル風画像の合成データセットでnaver-clova-ix/donut-baseを訓練したモデルです。 ", - "url": "https://huggingface.co./oshizo/donut-base-japanese-visual-novel", - "project_name": "donut-base-japanese-visual-novel", + "description": "日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/t5-base-japanese-mC4-Wikipedia", + "project_name": "t5-base-japanese-mC4-Wikipedia", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, - "first_commit": "2023-05-03 04:53:49", - "latest_commit": "2023-05-03 09:25:19", + "score": -0.10604917145145093, + "first_commit": "2021-06-30 12:53:09", + "latest_commit": "2021-09-23 18:29:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GPTQ", - "project_name": "japanese-stablelm-instruct-gamma-7B-GPTQ", + "description": "JAINU-Model (T5 fine-tuned model) JAINU is a Japanese - Ainu language machine translation model. ", + "url": "https://huggingface.co./astremo/JAINU", + "project_name": "JAINU", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, - "first_commit": "2023-10-28 19:03:17", - "latest_commit": "2023-10-28 20:24:40", + "score": -0.10604917145145093, + "first_commit": "2022-04-30 13:57:31", + "latest_commit": "2022-05-22 05:51:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-short", + "project_name": "t5-base-short", + "downloads": 29, + "source": "Hugging Face", + "score": -0.10604917145145093, + "first_commit": "2023-04-26 08:20:52", + "latest_commit": "2023-05-10 10:00:23", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Chat-Vector-LLaVA-v1.5-7b-JA Model Card Model detail Model type: Chat-Vector-LLaVA-v1.5-7b-JA is a vision-language model that can converse about input images in Japanese.", + "url": "https://huggingface.co./toshi456/chat-vector-llava-v1.5-7b-ja", + "project_name": "chat-vector-llava-v1.5-7b-ja", + "downloads": 29, + "source": "Hugging Face", + "score": -0.10604917145145093, + "first_commit": "2024-05-06 04:07:19", + "latest_commit": "2024-05-06 11:33:32", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.06 + }, + { + "description": "Heron GIT Japanese ELYZA Llama 2 Fast 7B Model Details Heron GIT Japanese ELYZA Llama 2 Fast 7B is a vision-language model that can converse about input images.", + "url": "https://huggingface.co./turing-motors/heron-chat-git-ELYZA-fast-7b-v0", + "project_name": "heron-chat-git-ELYZA-fast-7b-v0", + "downloads": 29, + "source": "Hugging Face", + "score": -0.10604917145145093, + "first_commit": "2023-09-06 09:04:40", + "latest_commit": "2023-09-11 16:56:39", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "このモデルはcl-tohoku/bert-large-japanese-v2をファインチューニングして、固有表現抽出(NER)に用いれるようにしたものです。 ", @@ -9710,11 +10183,12 @@ "project_name": "bert-large-japanese-v2-finetuned-ner", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, + "score": -0.10604917145145093, "first_commit": "2023-05-26 09:38:08", "latest_commit": "2023-07-21 14:10:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.336 }, { "description": "日本語医療固有表現抽出モデル 概要 ソーシャル・コンピューティング研究室さまより公開されているMedTxt-CRを用いて、alabniiさまより公開されているRoBERTaをfine-tuningした固有表現抽出モデルです。 ", @@ -9722,71 +10196,77 @@ "project_name": "medtxt_ner_roberta", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, + "score": -0.10604917145145093, "first_commit": "2023-02-13 10:48:22", "latest_commit": "2023-02-15 13:43:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-base-japanese-wikipedia Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia", - "project_name": "deberta-base-japanese-wikipedia", + "description": "recruit-jp/japanese-image-classification-evaluation-dataset Overview Developed by: Recruit Co.", + "url": "https://huggingface.co./datasets/recruit-jp/japanese-image-classification-evaluation-dataset", + "project_name": "japanese-image-classification-evaluation-dataset", "downloads": 29, "source": "Hugging Face", - "score": -0.10430458384071616, - "first_commit": "2022-06-25 03:46:58", - "latest_commit": "2023-01-27 17:51:51", + "score": -0.10604917145145093, + "first_commit": "2023-12-19 09:17:24", + "latest_commit": "2024-01-22 10:48:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "BERT base Japanese model This repository contains a BERT base model trained on Japanese Wikipedia dataset.", - "url": "https://huggingface.co./colorfulscoop/bert-base-ja", - "project_name": "bert-base-ja", + "description": "deberta-base-japanese-wikipedia Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-wikipedia", + "project_name": "deberta-base-japanese-wikipedia", "downloads": 28, "source": "Hugging Face", - "score": -0.10431872396365771, - "first_commit": "2021-07-30 10:11:35", - "latest_commit": "2021-09-23 15:46:05", + "score": -0.10606377940887392, + "first_commit": "2022-06-25 03:46:58", + "latest_commit": "2023-01-27 17:51:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-large-japanese-unidic-ud-head Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-ud-head", - "project_name": "deberta-large-japanese-unidic-ud-head", + "description": "bert-base-japanese-v3-bpr-question-aio 「大規模言語モデル入門」の第9章で紹介している文書検索モデルBPRの質問エンコーダです。 ", + "url": "https://huggingface.co./llm-book/bert-base-japanese-v3-bpr-question-aio", + "project_name": "bert-base-japanese-v3-bpr-question-aio", "downloads": 28, "source": "Hugging Face", - "score": -0.10431872396365771, - "first_commit": "2022-06-19 00:10:56", - "latest_commit": "2023-11-05 17:51:08", + "score": -0.10606377940887392, + "first_commit": "2023-06-06 08:21:13", + "latest_commit": "2023-07-24 07:12:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-base-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", - "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", + "description": "llm-jp-13b-instruct-lora-jaster-v1.0", + "url": "https://huggingface.co./llm-jp/llm-jp-13b-instruct-lora-jaster-v1.0", + "project_name": "llm-jp-13b-instruct-lora-jaster-v1.0", "downloads": 28, "source": "Hugging Face", - "score": -0.10431872396365771, - "first_commit": "2024-01-17 04:41:20", - "latest_commit": "2024-01-17 04:46:18", + "score": -0.10606377940887392, + "first_commit": "2023-10-18 18:53:58", + "latest_commit": "2023-10-20 08:41:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルの概略 東方Projectのキャラクターである霧雨魔理沙とおしゃべりできるモデルです。 ", - "url": "https://huggingface.co./tsukemono/japanese-novel-gpt-j-6b-f16-marisa", - "project_name": "japanese-novel-gpt-j-6b-f16-marisa", + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-base-medium", + "project_name": "t5-base-medium", "downloads": 28, "source": "Hugging Face", - "score": -0.10431872396365771, - "first_commit": "2023-09-02 15:15:51", - "latest_commit": "2023-09-06 18:42:50", + "score": -0.10606377940887392, + "first_commit": "2023-04-26 08:27:09", + "latest_commit": "2023-05-10 10:00:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "英語+日本語T5事前学習済みモデル This is a T5 (Text-to-Text Transfer Transformer) model pretrained on English and Japanese balanced corpus. ", @@ -9794,83 +10274,77 @@ "project_name": "t5-base-english-japanese", "downloads": 28, "source": "Hugging Face", - "score": -0.10431872396365771, + "score": -0.10606377940887392, "first_commit": "2022-07-28 11:31:28", "latest_commit": "2022-08-27 09:07:53", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "BERT for Sentiment Analysis of Japanese Twitter", - "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter-sentiment", - "project_name": "bert-for-japanese-twitter-sentiment", - "downloads": 27, - "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2024-05-13 10:19:52", - "latest_commit": "2024-08-09 12:03:25", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "シサム語による説明 アイヌ語と日本語の双方向機械翻訳モデルです。 ", - "url": "https://huggingface.co./SoMiyagawa/AinuTrans-2.0", - "project_name": "AinuTrans-2.0", - "downloads": 27, + "description": "zenz-v2 zenz-v2はGPT-2アーキテクチャに基づくかな漢字変換タスクに特化した言語モデルです。", + "url": "https://huggingface.co./Miwa-Keita/zenz-v2-gguf", + "project_name": "zenz-v2-gguf", + "downloads": 28, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": null, - "latest_commit": null, + "score": -0.10606377940887392, + "first_commit": "2024-07-31 15:15:16", + "latest_commit": "2024-08-04 09:35:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.09509999999999999 }, { - "description": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-instruct-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", - "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", - "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", - "downloads": 27, + "description": "rinna/nekomata-7b-gguf Overview The model is the GGUF version of rinna/nekomata-7b.", + "url": "https://huggingface.co./rinna/nekomata-7b-gguf", + "project_name": "nekomata-7b-gguf", + "downloads": 28, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2024-01-16 12:23:01", - "latest_commit": "2024-01-16 12:27:54", + "score": -0.10606377940887392, + "first_commit": "2023-12-19 08:10:42", + "latest_commit": "2024-07-20 08:36:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.72 }, { - "description": "Mixtral-8x7B-v0.1-japanese Mixtral-8x7B-v0.1-japaneseはMixtral-8x7B-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", - "url": "https://huggingface.co./abeja/Mixtral-8x7B-v0.1-japanese", - "project_name": "Mixtral-8x7B-v0.1-japanese", + "description": "BERT for Sentiment Analysis of Japanese Twitter", + "url": "https://huggingface.co./LoneWolfgang/bert-for-japanese-twitter-sentiment", + "project_name": "bert-for-japanese-twitter-sentiment", "downloads": 27, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2024-04-16 03:06:14", - "latest_commit": "2024-04-20 09:14:10", + "score": -0.10607838736629689, + "first_commit": "2024-05-13 10:19:52", + "latest_commit": "2024-08-09 12:03:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "Convert from: drewschaub/whisper-large-v3-japanese-4k-steps Whisper large-v3 model for CTranslate2 This repository contains the conversion of drewschaub/whisper-large-v3-japanese-4k-steps to the CTranslate2 model format.", - "url": "https://huggingface.co./JhonVanced/whisper-large-v3-japanese-4k-steps-ct2", - "project_name": "whisper-large-v3-japanese-4k-steps-ct2", + "description": "This model is a merged version of qwen-14b-vntl and Qwen1.5-14B-Chat , aiming for the translation of Japanese context into Chinese.", + "url": "https://huggingface.co./GralchemOz/Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", + "project_name": "Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", "downloads": 27, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2024-02-20 13:41:17", - "latest_commit": "2024-02-22 01:11:59", + "score": -0.10607838736629689, + "first_commit": "2024-03-03 02:29:43", + "latest_commit": "2024-03-03 03:17:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "MPT-7B-base このモデルは、MosaicMLのllm-foundryリポジトリを使用してmosaicml/mpt-7bをファインチューニングしたモデルです。 ", - "url": "https://huggingface.co./Jumtra/mpt-7b-base", - "project_name": "mpt-7b-base", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-GPTQ", + "project_name": "japanese-stablelm-instruct-gamma-7B-GPTQ", "downloads": 27, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2023-05-24 14:30:09", - "latest_commit": "2023-06-26 01:08:31", + "score": -0.10607838736629689, + "first_commit": "2023-10-28 19:03:17", + "latest_commit": "2023-10-28 20:24:40", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.2 }, { "description": "SFCOCO Stable Diffusion Model Card SFCOCO Stable Diffusion is a Japanese-specific latent text-to-image diffusion model capable of generating photo-realistic images given any text input.", @@ -9878,47 +10352,64 @@ "project_name": "sfc2022-stable-diffusion", "downloads": 27, "source": "Hugging Face", - "score": -0.10433286408659927, + "score": -0.10607838736629689, "first_commit": "2022-12-18 04:50:47", "latest_commit": "2022-12-18 07:20:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-upos", - "project_name": "deberta-large-japanese-upos", + "description": "This is for (private) DEMO only.", + "url": "https://huggingface.co./Bagus/wav2vec2-xlsr-japanese-speech-emotion-recognition", + "project_name": "wav2vec2-xlsr-japanese-speech-emotion-recognition", "downloads": 27, "source": "Hugging Face", - "score": -0.10433286408659927, - "first_commit": "2022-05-27 06:50:55", - "latest_commit": "2024-07-26 16:00:59", + "score": -0.10607838736629689, + "first_commit": "2021-09-22 04:10:36", + "latest_commit": "2023-10-19 01:31:17", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.316 }, { - "description": "COMET-T5 ja Finetuned T5 on ATOMIC ja using a text-to-text language modeling objective.", - "url": "https://huggingface.co./nlp-waseda/comet-t5-base-japanese", - "project_name": "comet-t5-base-japanese", + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", + "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", "downloads": 26, "source": "Hugging Face", - "score": -0.10434700420954082, - "first_commit": "2022-11-12 15:07:40", - "latest_commit": "2023-02-08 09:26:55", + "score": -0.10609299532371988, + "first_commit": "2023-10-28 20:16:15", + "latest_commit": "2023-10-28 15:16:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card for Model ID Fine tunned ASR model from distil-whisper/distil-large-v2.", - "url": "https://huggingface.co./spow12/Visual-novel-transcriptor", - "project_name": "Visual-novel-transcriptor", + "description": "Mixtral-8x7B-Instruct-v0.1-japanese Mixtral-8x7B-Instruct-v0.1-japaneseはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese", + "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese", "downloads": 26, "source": "Hugging Face", - "score": -0.10434700420954082, - "first_commit": "2024-04-15 01:43:08", - "latest_commit": "2024-08-12 12:39:52", + "score": -0.10609299532371988, + "first_commit": "2024-04-17 02:56:15", + "latest_commit": "2024-04-20 09:14:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 46.9 + }, + { + "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "downloads": 26, + "source": "Hugging Face", + "score": -0.10609299532371988, + "first_commit": "2024-07-26 21:06:54", + "latest_commit": "2024-07-26 21:37:02", + "languages": [], + "model_or_dataset": "model", + "model_size": 11.0 }, { "description": "HuggingFaceFW/fineweb-edu-classifierを再現するために、日本語データでtohoku-nlp/bert-base-japanese-v3を学習したモデルです。 ", @@ -9926,23 +10417,25 @@ "project_name": "fineweb-edu-classifier-ja-v2", "downloads": 26, "source": "Hugging Face", - "score": -0.10434700420954082, + "score": -0.10609299532371988, "first_commit": "2024-06-14 13:20:22", "latest_commit": "2024-06-14 13:28:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "NLLB-200 1.3B fine-tuned on Ascendance of a Bookworm", - "url": "https://huggingface.co./thefrigidliquidation/nllb-200-distilled-1.3B-bookworm", - "project_name": "nllb-200-distilled-1.3B-bookworm", + "description": "Convert from: drewschaub/whisper-large-v3-japanese-4k-steps Whisper large-v3 model for CTranslate2 This repository contains the conversion of drewschaub/whisper-large-v3-japanese-4k-steps to the CTranslate2 model format.", + "url": "https://huggingface.co./JhonVanced/whisper-large-v3-japanese-4k-steps-ct2", + "project_name": "whisper-large-v3-japanese-4k-steps-ct2", "downloads": 26, "source": "Hugging Face", - "score": -0.10434700420954082, - "first_commit": "2022-07-27 20:39:08", - "latest_commit": "2024-04-14 18:45:22", + "score": -0.10609299532371988, + "first_commit": "2024-02-20 13:41:17", + "latest_commit": "2024-02-22 01:11:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "description public RLHF dataset in Japanese the construction of the reward model was reformatted into a classification task.", @@ -9950,11 +10443,38 @@ "project_name": "open_preference-v0.3", "downloads": 26, "source": "Hugging Face", - "score": -0.10434700420954082, + "score": -0.10609299532371988, "first_commit": "2024-06-25 08:55:53", "latest_commit": "2024-07-04 12:55:42", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "Asian Language Treebank (ALT) Project ALT Parallel Corpusのうち、日英対訳部分のみを抽出したデータセットです。", + "url": "https://huggingface.co./datasets/hpprc/alt-parallel-en-ja", + "project_name": "alt-parallel-en-ja", + "downloads": 26, + "source": "Hugging Face", + "score": -0.10609299532371988, + "first_commit": "2024-03-21 02:24:27", + "latest_commit": "2024-03-21 12:40:15", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "Donut (base-sized model, fine-tuned on visual novel like synthetic dataset ) ビジュアルノベル風画像の合成データセットでnaver-clova-ix/donut-baseを訓練したモデルです。 ", + "url": "https://huggingface.co./oshizo/donut-base-japanese-visual-novel", + "project_name": "donut-base-japanese-visual-novel", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2023-05-03 04:53:49", + "latest_commit": "2023-05-03 09:25:19", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "japanese-soseki-gpt2-1b", @@ -9962,11 +10482,12 @@ "project_name": "japanese-soseki-gpt2-1b", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, + "score": -0.10610760328114287, "first_commit": "2022-03-03 04:53:15", "latest_commit": "2023-03-27 12:09:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "TakoMT", @@ -9974,47 +10495,116 @@ "project_name": "takomt", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, + "score": -0.10610760328114287, "first_commit": "2022-05-08 03:52:40", "latest_commit": "2023-08-15 17:32:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-small Fine-tuned openai/whisper-small on Japanese using Common Voice, JVS and JSUT.", - "url": "https://huggingface.co./Ivydata/whisper-small-japanese", - "project_name": "whisper-small-japanese", + "description": "deberta-large-japanese-unidic-ud-head Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic-ud-head", + "project_name": "deberta-large-japanese-unidic-ud-head", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, - "first_commit": "2023-05-19 10:42:27", - "latest_commit": "2023-05-19 10:50:13", + "score": -0.10610760328114287, + "first_commit": "2022-06-19 00:10:56", + "latest_commit": "2023-11-05 17:51:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-70B-AWQ", - "project_name": "japanese-stablelm-instruct-beta-70B-AWQ", + "description": "Japanese DialoGPT trained with Aozora (ja) 青空文庫のセリフで学習した日本語のDialoGPT Smallです(en) Japanese DialoGPT Small trained on Aozora Bunko.", + "url": "https://huggingface.co./akiFQC/japanese-dialogpt-small-aozora", + "project_name": "japanese-dialogpt-small-aozora", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, - "first_commit": "2023-11-02 15:45:23", - "latest_commit": "2023-11-09 18:16:16", + "score": -0.10610760328114287, + "first_commit": "2023-02-08 13:22:24", + "latest_commit": "2023-02-09 00:55:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model is traned with llm-japanese-dataset dataset.", - "url": "https://huggingface.co./ganchengguang/Yoko_13B_Japanese_QLoRA", - "project_name": "Yoko_13B_Japanese_QLoRA", + "description": "Llama-3-Umievo-itr014-Shizuko-8b このモデルは日本語に対応しているLlama-3ベースの4つのモデルを進化的アルゴリズムで進化的マージしたものです。", + "url": "https://huggingface.co./umiyuki/Llama-3-Umievo-itr014-Shizuko-8b", + "project_name": "Llama-3-Umievo-itr014-Shizuko-8b", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, - "first_commit": "2023-08-17 16:26:52", - "latest_commit": "2023-08-17 16:51:41", + "score": -0.10610760328114287, + "first_commit": "2024-06-08 05:25:05", + "latest_commit": "2024-06-08 07:47:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 + }, + { + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-3.0bpw-h6-exl2", + "project_name": "shisa-7b-v1-3.0bpw-h6-exl2", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2023-12-07 17:52:29", + "latest_commit": "2023-12-07 18:54:23", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "MPT-7B-base このモデルは、MosaicMLのllm-foundryリポジトリを使用してmosaicml/mpt-7bをファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./Jumtra/mpt-7b-base", + "project_name": "mpt-7b-base", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2023-05-24 14:30:09", + "latest_commit": "2023-06-26 01:08:31", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "COMET-T5 ja Finetuned T5 on ATOMIC ja using a text-to-text language modeling objective.", + "url": "https://huggingface.co./nlp-waseda/comet-t5-base-japanese", + "project_name": "comet-t5-base-japanese", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2022-11-12 15:07:40", + "latest_commit": "2023-02-08 09:26:55", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "This dataset is a clarified version of the image, context, and question set included in the Japanese-Heron-Bench for the construction of the Japanese evaluation benchmark suite.", + "url": "https://huggingface.co./datasets/Silviase/Japanese-Heron-Bench", + "project_name": "Japanese-Heron-Bench", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2024-07-16 08:12:30", + "latest_commit": "2024-07-28 12:33:15", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "common voice, google fleurs, JSUTv1.1, JAS_v2 (joujiboi/japanese-anime-speech-v2)", + "url": "https://huggingface.co./datasets/sin2piusc/jgca_v2_50k_2", + "project_name": "jgca_v2_50k_2", + "downloads": 25, + "source": "Hugging Face", + "score": -0.10610760328114287, + "first_commit": "2024-07-08 18:06:27", + "latest_commit": "2024-07-24 18:58:08", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Dataset Details Dataset Type:Japanese LLaVA v1.5", @@ -10022,155 +10612,181 @@ "project_name": "LLaVA-v1.5-Instruct-620K-JA", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, + "score": -0.10610760328114287, "first_commit": "2024-04-10 05:04:58", "latest_commit": "2024-04-12 09:18:42", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "This dataset is a clarified version of the image, context, and question set included in the Japanese-Heron-Bench for the construction of the Japanese evaluation benchmark suite.", - "url": "https://huggingface.co./datasets/Silviase/Japanese-Heron-Bench", - "project_name": "Japanese-Heron-Bench", + "description": "Dataset details: Each entry in this dataset is a sentence-aligned Japanese web novel chapter and English fan translation.", + "url": "https://huggingface.co./datasets/NilanE/ParallelFiction-Ja_En-100k", + "project_name": "ParallelFiction-Ja_En-100k", "downloads": 25, "source": "Hugging Face", - "score": -0.10436114433248238, - "first_commit": "2024-07-16 08:12:30", - "latest_commit": "2024-07-28 12:33:15", + "score": -0.10610760328114287, + "first_commit": "2024-03-25 23:41:17", + "latest_commit": "2024-06-02 18:03:38", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "Summary This is a text classifier for assigning a JLPT level.", + "url": "https://huggingface.co./bennexx/cl-tohoku-bert-base-japanese-v3-jlpt-classifier", + "project_name": "cl-tohoku-bert-base-japanese-v3-jlpt-classifier", + "downloads": 24, + "source": "Hugging Face", + "score": -0.10612221123856586, + "first_commit": "2024-01-19 00:32:15", + "latest_commit": "2024-07-10 13:41:08", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "zenz-v2 zenz-v2はGPT-2アーキテクチャに基づくかな漢字変換タスクに特化した言語モデルです。", - "url": "https://huggingface.co./Miwa-Keita/zenz-v2-gguf", - "project_name": "zenz-v2-gguf", + "description": "BERT base Japanese model This repository contains a BERT base model trained on Japanese Wikipedia dataset.", + "url": "https://huggingface.co./colorfulscoop/bert-base-ja", + "project_name": "bert-base-ja", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2024-07-31 15:15:16", - "latest_commit": "2024-08-04 09:35:48", + "score": -0.10612221123856586, + "first_commit": "2021-07-30 10:11:35", + "latest_commit": "2021-09-23 15:46:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-AWQ", - "project_name": "japanese-stablelm-base-beta-70B-AWQ", + "description": "Google's mt5-base fine-tuned in Japanese to solve error detection and correction task. ", + "url": "https://huggingface.co./kz/mt5base-finetuned-ECC-japanese-small", + "project_name": "mt5base-finetuned-ECC-japanese-small", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2023-11-06 11:33:47", - "latest_commit": "2023-11-09 18:16:05", + "score": -0.10612221123856586, + "first_commit": "2021-03-21 19:07:13", + "latest_commit": "2022-05-26 13:50:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GPTQ", - "project_name": "japanese-stablelm-base-beta-70B-GPTQ", + "description": "ELECTRA base Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-base-japanese-generator", + "project_name": "electra-base-japanese-generator", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2023-11-06 11:33:47", - "latest_commit": "2023-11-06 16:00:08", + "score": -0.10612221123856586, + "first_commit": "2021-11-15 17:23:50", + "latest_commit": "2023-10-21 13:21:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.035500000000000004 }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-GPTQ", - "project_name": "japanese-stablelm-instruct-beta-7B-GPTQ", + "description": "モデルの概略 東方Projectのキャラクターである霧雨魔理沙とおしゃべりできるモデルです。 ", + "url": "https://huggingface.co./tsukemono/japanese-novel-gpt-j-6b-f16-marisa", + "project_name": "japanese-novel-gpt-j-6b-f16-marisa", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2023-11-03 10:31:29", - "latest_commit": "2023-11-03 12:54:41", + "score": -0.10612221123856586, + "first_commit": "2023-09-02 15:15:51", + "latest_commit": "2023-09-06 18:42:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "MPT-7B-inst このモデルは、MosaicMLのllm-foundryリポジトリを使用してmosaicml/mpt-7b-instructをファインチューニングしたモデルです。 ", - "url": "https://huggingface.co./Jumtra/mpt-7b-inst", - "project_name": "mpt-7b-inst", + "description": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-base-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", + "project_name": "japanese-stablelm-base-gamma-7b-Mistral-7B-Instruct-v0.1", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2023-05-24 14:22:33", - "latest_commit": "2023-06-26 01:09:06", + "score": -0.10612221123856586, + "first_commit": "2024-01-17 04:41:20", + "latest_commit": "2024-01-17 04:46:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "NLLB 1.3B fine-tuned on Japanese to English Light Novel translation This model was fine-tuned on light and web novel for Japanese to English translation.", - "url": "https://huggingface.co./thefrigidliquidation/nllb-jaen-1.3B-lightnovels", - "project_name": "nllb-jaen-1.3B-lightnovels", + "description": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1 is a merge of the following models: mistralai/Mistral-7B-Instruct-v0.1 stabilityai/japanese-stablelm-instruct-gamma-7b 🧩 Configuration slices: - sources: - model: mistralai/Mistral-7B-Instruct-v0.1 layer_range:", + "url": "https://huggingface.co./MaziyarPanahi/japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", + "project_name": "japanese-stablelm-instruct-gamma-7b-Mistral-7B-Instruct-v0.1", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2022-10-01 00:43:59", - "latest_commit": "2023-06-04 13:38:43", + "score": -0.10612221123856586, + "first_commit": "2024-01-16 12:23:01", + "latest_commit": "2024-01-16 12:27:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Google's mt5-base fine-tuned in Japanese to solve error detection and correction task. ", - "url": "https://huggingface.co./kz/mt5base-finetuned-ECC-japanese-small", - "project_name": "mt5base-finetuned-ECC-japanese-small", + "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-small Fine-tuned openai/whisper-small on Japanese using Common Voice, JVS and JSUT.", + "url": "https://huggingface.co./Ivydata/whisper-small-japanese", + "project_name": "whisper-small-japanese", "downloads": 24, "source": "Hugging Face", - "score": -0.10437528445542393, - "first_commit": "2021-03-21 19:07:13", - "latest_commit": "2022-05-26 13:50:56", + "score": -0.10612221123856586, + "first_commit": "2023-05-19 10:42:27", + "latest_commit": "2023-05-19 10:50:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "BERT large Japanese (character-level tokenization with whole word masking, jawiki-20200831)", - "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char", - "project_name": "bert-large-japanese-char", - "downloads": 23, + "description": "NLLB-200 1.3B fine-tuned on Ascendance of a Bookworm", + "url": "https://huggingface.co./thefrigidliquidation/nllb-200-distilled-1.3B-bookworm", + "project_name": "nllb-200-distilled-1.3B-bookworm", + "downloads": 24, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2021-03-05 06:36:24", - "latest_commit": "2021-09-23 15:45:39", + "score": -0.10612221123856586, + "first_commit": "2022-07-27 20:39:08", + "latest_commit": "2024-04-14 18:45:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.37 }, { - "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-ner", - "project_name": "deberta-v2-base-japanese-finetuned-ner", - "downloads": 23, + "description": "rinna/nekomata-7b-instruction-gguf Overview The model is the GGUF version of rinna/nekomata-7b-instruction.", + "url": "https://huggingface.co./rinna/nekomata-7b-instruction-gguf", + "project_name": "nekomata-7b-instruction-gguf", + "downloads": 24, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2023-01-20 05:57:37", - "latest_commit": "2023-03-27 08:05:06", + "score": -0.10612221123856586, + "first_commit": "2023-12-19 08:11:08", + "latest_commit": "2024-07-20 08:38:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.72 }, { - "description": "Model Card for Model ID Original model elyza/ELYZA-japanese-Llama-2-7b-instruct which is based on Meta's \"Llama 2\" and has undergone additional pre-training in Japanese instruction.", - "url": "https://huggingface.co./dahara1/ELYZA-japanese-Llama-2-7b-instruct-AWQ", - "project_name": "ELYZA-japanese-Llama-2-7b-instruct-AWQ", + "description": "jpn-msa source group: Japanese target group: Malay (macrolanguage) OPUS readme: jpn-msa model: transformer-align source language(s): jpn jpn_Hani jpn_Hira jpn_Kana target language(s): ind", + "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-ms", + "project_name": "opus-mt-ja-ms", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2023-09-08 08:35:31", - "latest_commit": "2023-09-17 04:24:55", + "score": -0.10613681919598884, + "first_commit": "2020-08-19 00:29:11", + "latest_commit": "2023-08-16 11:59:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "JPNsensei-V2 Model Application", - "url": "https://huggingface.co./kanxxyc/JPNsensei-V2", - "project_name": "JPNsensei-V2", + "description": "Deepreneur-blue-lizard Model Description Deepreneur-blue-lizardは、MetaのLlama-2-7bに対して、Wikipediaや書籍等の日本語の学習データを用いて追加事前学習と独自データによるファインチューニングを実施したモデルです。", + "url": "https://huggingface.co./Deepreneur/blue-lizard", + "project_name": "blue-lizard", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2023-10-30 00:18:24", - "latest_commit": "2024-03-11 10:19:14", + "score": -0.10613681919598884, + "first_commit": "2024-02-05 16:29:48", + "latest_commit": "2024-02-12 14:43:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { "description": "Swallow-8Bは追加の日本語継続事前学習により日本語が大変流暢なLlama-3派生モデルです。", @@ -10178,143 +10794,168 @@ "project_name": "Meta-Llama-3.1-8B-Instruct-plus-Swallow-b", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, + "score": -0.10613681919598884, "first_commit": "2024-07-24 03:30:51", "latest_commit": "2024-07-24 04:19:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "Mixtral-8x7B-Instruct-v0.1-japanese Mixtral-8x7B-Instruct-v0.1-japaneseはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施したモデルです。", - "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese", - "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese", + "description": "youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", + "url": "https://huggingface.co./youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", + "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2024-04-17 02:56:15", - "latest_commit": "2024-04-20 09:14:27", + "score": -0.10613681919598884, + "first_commit": "2024-06-02 04:49:31", + "latest_commit": "2024-06-02 04:52:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 70.6 }, { - "description": "このモデルはdeberta-v2-large-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-large-japanese-finetuned-ner", - "project_name": "deberta-v2-large-japanese-finetuned-ner", + "description": "This model is traned with llm-japanese-dataset dataset.", + "url": "https://huggingface.co./ganchengguang/Yoko_13B_Japanese_QLoRA", + "project_name": "Yoko_13B_Japanese_QLoRA", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2023-05-10 13:22:23", - "latest_commit": "2023-07-21 14:10:02", + "score": -0.10613681919598884, + "first_commit": "2023-08-17 16:26:52", + "latest_commit": "2023-08-17 16:51:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "jpn-msa source group: Japanese target group: Malay (macrolanguage) OPUS readme: jpn-msa model: transformer-align source language(s): jpn jpn_Hani jpn_Hira jpn_Kana target language(s): ind", - "url": "https://huggingface.co./Helsinki-NLP/opus-mt-ja-ms", - "project_name": "opus-mt-ja-ms", + "description": "NLLB 1.3B fine-tuned on Japanese to English Light Novel translation This model was fine-tuned on light and web novel for Japanese to English translation.", + "url": "https://huggingface.co./thefrigidliquidation/nllb-jaen-1.3B-lightnovels", + "project_name": "nllb-jaen-1.3B-lightnovels", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2020-08-19 00:29:11", - "latest_commit": "2023-08-16 11:59:16", + "score": -0.10613681919598884, + "first_commit": "2022-10-01 00:43:59", + "latest_commit": "2023-06-04 13:38:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.37 }, { - "description": "Asian Language Treebank (ALT) Project ALT Parallel Corpusのうち、日英対訳部分のみを抽出したデータセットです。", - "url": "https://huggingface.co./datasets/hpprc/alt-parallel-en-ja", - "project_name": "alt-parallel-en-ja", + "description": "t5-base-japanese-web-8k (with Byte-fallback, 8K) Description megagonlabs/t5-base-japanese-web-8k is a T5 (Text-to-Text Transfer Transformer) model pre-trained on Japanese web texts.", + "url": "https://huggingface.co./megagonlabs/t5-base-japanese-web-8k", + "project_name": "t5-base-japanese-web-8k", "downloads": 23, "source": "Hugging Face", - "score": -0.10438942457836549, - "first_commit": "2024-03-21 02:24:27", - "latest_commit": "2024-03-21 12:40:15", + "score": -0.10613681919598884, + "first_commit": "2021-09-06 10:13:42", + "latest_commit": "2023-07-04 07:05:38", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Summary This is a text classifier for assigning a JLPT level.", - "url": "https://huggingface.co./bennexx/cl-tohoku-bert-base-japanese-v3-jlpt-classifier", - "project_name": "cl-tohoku-bert-base-japanese-v3-jlpt-classifier", + "description": "Aya_ja このデータセットはCohereForAI/aya_datasetの日本語インストラクションデータのみを抽出したデータセットです。 ", + "url": "https://huggingface.co./datasets/ryota39/Aya_ja", + "project_name": "Aya_ja", + "downloads": 23, + "source": "Hugging Face", + "score": -0.10613681919598884, + "first_commit": "2024-02-14 08:03:42", + "latest_commit": "2024-02-14 08:25:06", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-japanese-finetuned-ner", + "project_name": "deberta-v2-base-japanese-finetuned-ner", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2024-01-19 00:32:15", - "latest_commit": "2024-07-10 13:41:08", + "score": -0.10615142715341183, + "first_commit": "2023-01-20 05:57:37", + "latest_commit": "2023-03-27 08:05:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.112 }, { - "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-fin-generator", - "project_name": "electra-small-paper-japanese-fin-generator", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-GPTQ", + "project_name": "japanese-stablelm-instruct-beta-7B-GPTQ", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2021-10-04 13:38:47", - "latest_commit": "2023-10-21 13:21:24", + "score": -0.10615142715341183, + "first_commit": "2023-11-03 10:31:29", + "latest_commit": "2023-11-03 12:54:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.13 }, { - "description": "JAINU-Model (T5 fine-tuned model) JAINU is a Japanese - Ainu language machine translation model. ", - "url": "https://huggingface.co./astremo/JAINU", - "project_name": "JAINU", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-base-beta-70B-GPTQ", + "project_name": "japanese-stablelm-base-beta-70B-GPTQ", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2022-04-30 13:57:31", - "latest_commit": "2022-05-22 05:51:12", + "score": -0.10615142715341183, + "first_commit": "2023-11-06 11:33:47", + "latest_commit": "2023-11-06 16:00:08", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 9.1 }, { - "description": "Japanese DialoGPT trained with Aozora (ja) 青空文庫のセリフで学習した日本語のDialoGPT Smallです(en) Japanese DialoGPT Small trained on Aozora Bunko.", - "url": "https://huggingface.co./akiFQC/japanese-dialogpt-small-aozora", - "project_name": "japanese-dialogpt-small-aozora", + "description": "This model was created by merging intfloat/e5-mistral-7b-instruct and stabilityai/japanese-stablelm-base-gamma-7b.", + "url": "https://huggingface.co./oshizo/japanese-e5-mistral-7b_slerp", + "project_name": "japanese-e5-mistral-7b_slerp", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2023-02-08 13:22:24", - "latest_commit": "2023-02-09 00:55:31", + "score": -0.10615142715341183, + "first_commit": "2024-01-04 12:33:19", + "latest_commit": "2024-01-05 15:48:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "distilhubert-ft-japanese-50k Fine-tuned (more precisely, continue trained)", - "url": "https://huggingface.co./TylorShine/distilhubert-ft-japanese-50k", - "project_name": "distilhubert-ft-japanese-50k", + "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for Ninja-v1.0 The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 Ninja has the following changes compared to Mistral-7B-v0.1.", + "url": "https://huggingface.co./Local-Novel-LLM-project/Ninja-v1", + "project_name": "Ninja-v1", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2023-04-20 17:51:47", - "latest_commit": "2023-04-21 18:00:04", + "score": -0.10615142715341183, + "first_commit": "2024-04-24 10:28:30", + "latest_commit": "2024-05-04 04:07:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "日本語でtrainingしたllama2をinstruction用のデータセットでsftしたものになります base: https://huggingface.co./if001/llama2_ja_small trainingは以下のscript参照 https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", - "url": "https://huggingface.co./if001/llama2_ja_small_instruct", - "project_name": "llama2_ja_small_instruct", + "description": "SambaLingo-Japanese-Base SambaLingo-Japanese-Base is a pretrained Bi-lingual Japanese and English model that adapts Llama-2-7b to Japanese by training on 42 billion tokens from the Japanese split of the Cultura-X dataset.", + "url": "https://huggingface.co./sambanovasystems/SambaLingo-Japanese-Base", + "project_name": "SambaLingo-Japanese-Base", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2023-10-21 05:21:13", - "latest_commit": "2023-10-23 19:39:51", + "score": -0.10615142715341183, + "first_commit": "2024-02-15 22:49:08", + "latest_commit": "2024-04-16 22:33:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model was created by merging intfloat/e5-mistral-7b-instruct and stabilityai/japanese-stablelm-base-gamma-7b.", - "url": "https://huggingface.co./oshizo/japanese-e5-mistral-7b_slerp", - "project_name": "japanese-e5-mistral-7b_slerp", + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-8.0bpw-h8-exl2", + "project_name": "shisa-7b-v1-8.0bpw-h8-exl2", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2024-01-04 12:33:19", - "latest_commit": "2024-01-05 15:48:24", + "score": -0.10615142715341183, + "first_commit": "2023-12-07 18:22:23", + "latest_commit": "2023-12-07 18:54:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Aerner LM-v1 事前学習から全部日本語で学習させたモデルです。 ", @@ -10322,23 +10963,51 @@ "project_name": "lm-v1", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, + "score": -0.10615142715341183, "first_commit": "2023-05-25 12:35:32", "latest_commit": "2023-05-25 13:35:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "The JaNLI (Japanese Adversarial NLI) dataset, inspired by the English HANS dataset, is designed to necessitate an understanding of Japanese linguistic phenomena and to illuminate the vulnerabilities of models.", - "url": "https://huggingface.co./datasets/hpprc/janli", - "project_name": "janli", + "description": "Japanese GPT2 Lyric Model Model description", + "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-medium", + "project_name": "gpt2-japanese-lyric-medium", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2023-04-05 12:25:01", - "latest_commit": "2023-04-11 13:40:37", + "score": -0.10615142715341183, + "first_commit": "2022-07-08 13:28:12", + "latest_commit": "2023-10-21 14:53:57", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 0.361 + }, + { + "description": "deberta-large-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-upos", + "project_name": "deberta-large-japanese-upos", + "downloads": 22, + "source": "Hugging Face", + "score": -0.10615142715341183, + "first_commit": "2022-05-27 06:50:55", + "latest_commit": "2024-07-26 16:00:59", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "BERT large Japanese (character-level tokenization with whole word masking, jawiki-20200831)", + "url": "https://huggingface.co./tohoku-nlp/bert-large-japanese-char", + "project_name": "bert-large-japanese-char", + "downloads": 22, + "source": "Hugging Face", + "score": -0.10615142715341183, + "first_commit": "2021-03-05 06:36:24", + "latest_commit": "2021-09-23 15:45:39", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Wikipedia日本語版からのQ&Aの自動生成 Mixtral 8x22bのGGUF(5bit)をベースに、Wikipedia日本語版の記事から、 自動生成コード1 自動生成コード2 を使ってQ&Aを作成しました。 計算には東京工業大学のスーパーコンピュータTSUBAME4.0を利用しました。 注意 回答にハルシネーション等が含まれている可能性があるので、フィルタリングをかける必要があるかもしれません。", @@ -10346,23 +11015,38 @@ "project_name": "AutoWikiQA", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, + "score": -0.10615142715341183, "first_commit": "2024-05-08 08:25:16", "latest_commit": "2024-05-09 01:05:15", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", - "url": "https://huggingface.co./datasets/llm-book/jawiki-paragraphs", - "project_name": "jawiki-paragraphs", + "description": "Wikipedia日本語版データセット(izumi-lab/wikipedia-ja-20230720)", + "url": "https://huggingface.co./datasets/shi3z/Japanese_Wikipedia_Conversation", + "project_name": "Japanese_Wikipedia_Conversation", "downloads": 22, "source": "Hugging Face", - "score": -0.10440356470130704, - "first_commit": "2023-06-03 03:04:05", - "latest_commit": "2023-06-03 03:04:43", + "score": -0.10615142715341183, + "first_commit": "2023-11-10 07:36:40", + "latest_commit": "2023-11-10 22:46:29", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null + }, + { + "description": "bert-japanese-ner このモデルは日本語の固有表現抽出タスクを目的として、京都大学 黒橋・褚・村脇研究室が公開しているBERT日本語Pretrainedモデルをベースにストックマーク株式会社が公開しているner-wikipedia-datasetでファインチューニングしたものです。 ", + "url": "https://huggingface.co./ken11/bert-japanese-ner", + "project_name": "bert-japanese-ner", + "downloads": 21, + "source": "Hugging Face", + "score": -0.1061660351108348, + "first_commit": "2021-11-13 16:28:23", + "latest_commit": "2021-11-14 02:34:01", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "electra-base-cyberbullying This is an ELECTRA Small model for the Japanese language finetuned for automatic cyberbullying detection.", @@ -10370,11 +11054,25 @@ "project_name": "electra-small-japanese-discriminator-cyberbullying", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, + "score": -0.1061660351108348, "first_commit": "2022-09-09 02:43:59", "latest_commit": "2022-11-01 07:14:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "このモデルはdeberta-v2-large-japaneseをファインチューニングして固有表現抽出(NER)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-large-japanese-finetuned-ner", + "project_name": "deberta-v2-large-japanese-finetuned-ner", + "downloads": 21, + "source": "Hugging Face", + "score": -0.1061660351108348, + "first_commit": "2023-05-10 13:22:23", + "latest_commit": "2023-07-21 14:10:02", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.339 }, { "description": "Heron GIT Japanese StableLM", @@ -10382,479 +11080,389 @@ "project_name": "heron-chat-git-ja-stablelm-base-7b-v0", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, + "score": -0.1061660351108348, "first_commit": "2023-09-06 09:19:59", "latest_commit": "2023-09-11 16:55:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "お知らせ より回答が適切になるように学習させたモデル、https://huggingface.co./hotchpotch/youri-7b-stf-qa-context-jaqket-jsquad-gptq もあります。 ", - "url": "https://huggingface.co./hotchpotch/youri-7b-sft-qa-context-jaqket-awq", - "project_name": "youri-7b-sft-qa-context-jaqket-awq", + "description": "t5-base-xlsum-ja", + "url": "https://huggingface.co./p1atdev/t5-base-xlsum-ja", + "project_name": "t5-base-xlsum-ja", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, - "first_commit": "2023-12-10 08:52:23", - "latest_commit": "2024-02-25 06:40:30", + "score": -0.1061660351108348, + "first_commit": "2023-10-06 03:18:28", + "latest_commit": "2023-11-20 09:25:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.248 }, { - "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", - "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-4bit", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-4bit", + "description": "JPNsensei-V2 Model Application", + "url": "https://huggingface.co./kanxxyc/JPNsensei-V2", + "project_name": "JPNsensei-V2", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, - "first_commit": "2024-07-26 21:06:54", - "latest_commit": "2024-07-26 21:37:02", + "score": -0.1061660351108348, + "first_commit": "2023-10-30 00:18:24", + "latest_commit": "2024-03-11 10:19:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "日本語でtrainingしたllama2 model size: 417.12M trainingは以下のscript参照https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", - "url": "https://huggingface.co./if001/llama2_ja_small", - "project_name": "llama2_ja_small", + "description": "llm-jp-1.3b-v1.0-aya llm-jp's llm-jp-1.3b-v1.0 model fine-tuned on the Japanese examples from Cohere's aya dataset Model llm-jp-eval AVG kcoopermiller/llm-jp-1.3b-v1.0-aya 0.0698 llm-jp/llm-jp-1.3b-v1.0 0.047 How to use import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"kcoopermiller/llm-jp-1.3b-v1.0-aya\")", + "url": "https://huggingface.co./kcoopermiller/llm-jp-1.3b-v1.0-aya", + "project_name": "llm-jp-1.3b-v1.0-aya", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, - "first_commit": "2023-10-11 09:11:41", - "latest_commit": "2023-10-14 13:50:54", + "score": -0.1061660351108348, + "first_commit": "2024-02-23 05:39:39", + "latest_commit": "2024-02-29 23:48:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.32 }, { - "description": "whisper-large-v2-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-jp to the CTranslate2 model format.", - "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-jp", - "project_name": "faster-whisper-large-v2-jp", + "description": "llm-japanese-dataset-vanilla LLM構築用の日本語チャットデータセット izumi-lab/llm-japanese-dataset から,日英翻訳のデータセット等を抜いたものです. ", + "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset-vanilla", + "project_name": "llm-japanese-dataset-vanilla", "downloads": 21, "source": "Hugging Face", - "score": -0.1044177048242486, - "first_commit": "2023-07-07 06:16:06", - "latest_commit": "2023-07-07 18:09:09", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "bert-base-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-unidic-luw-upos", - "project_name": "bert-base-japanese-unidic-luw-upos", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2022-02-13 01:00:01", - "latest_commit": "2023-11-05 18:44:10", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Wav2Vec2-XLS-R-300M-Japanese-Hiragana Fine-tuned facebook/wav2vec2-xls-r-300m on Japanese Hiragana characters using the Common Voice and JSUT.", - "url": "https://huggingface.co./slplab/wav2vec2-xls-r-300m-japanese-hiragana", - "project_name": "wav2vec2-xls-r-300m-japanese-hiragana", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2022-09-16 07:34:58", - "latest_commit": "2022-09-16 11:01:54", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This model is traned with guanaco dataset.", - "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v1", - "project_name": "Yoko-7B-Japanese-v1", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-08-10 13:01:38", - "latest_commit": "2023-08-10 13:11:05", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This is a Japanese sentence-LUKE model.", - "url": "https://huggingface.co./cheonboy/sentence_embedding_japanese", - "project_name": "sentence_embedding_japanese", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-10-05 05:10:25", - "latest_commit": "2023-10-05 05:13:09", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-AWQ", - "project_name": "japanese-stablelm-instruct-gamma-7B-AWQ", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-10-28 19:03:17", - "latest_commit": "2023-11-09 18:16:33", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Deepreneur-blue-lizard Model Description Deepreneur-blue-lizardは、MetaのLlama-2-7bに対して、Wikipediaや書籍等の日本語の学習データを用いて追加事前学習と独自データによるファインチューニングを実施したモデルです。", - "url": "https://huggingface.co./Deepreneur/blue-lizard", - "project_name": "blue-lizard", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2024-02-05 16:29:48", - "latest_commit": "2024-02-12 14:43:33", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "In-progess long-context Japanese-English translation model based on tinyllama.", - "url": "https://huggingface.co./NilanE/tinyllama-en_ja-translation-v2", - "project_name": "tinyllama-en_ja-translation-v2", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2024-03-06 16:45:44", - "latest_commit": "2024-03-28 16:36:13", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "This repository contains a model trained (QLoRA-SFT)", - "url": "https://huggingface.co./taoki/phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", - "project_name": "phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2024-05-29 15:11:10", - "latest_commit": "2024-05-31 11:28:45", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "japanese-novel-gpt-j-6b https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b\" に合計216個の評価の高いなろう小説、青空文庫、ウィキペディアなどの文章をQLoRA学習させた小説生成用モデルです。 ", - "url": "https://huggingface.co./akineAItech/Jeneri-SAMA-6B", - "project_name": "Jeneri-SAMA-6B", - "downloads": 20, - "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2024-02-25 10:30:06", - "latest_commit": "2024-03-16 15:00:14", + "score": -0.1061660351108348, + "first_commit": "2023-05-23 14:45:27", + "latest_commit": "2024-02-17 16:17:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "OpenCALM-LARGE Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by CyberAgent, Inc. ", - "url": "https://huggingface.co./Mizuiro-sakura/open-calm-large-finetuned-databricks-dolly", - "project_name": "open-calm-large-finetuned-databricks-dolly", - "downloads": 20, + "description": "データセット概要 手動で作成したDatabricksに関する質問と回答ペアの日本語データセットです。 ", + "url": "https://huggingface.co./datasets/yulanfmy/databricks-qa-ja", + "project_name": "databricks-qa-ja", + "downloads": 21, "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-06-26 13:37:53", - "latest_commit": "2023-07-02 14:30:47", + "score": -0.1061660351108348, + "first_commit": "2023-05-15 13:27:23", + "latest_commit": "2023-05-15 14:55:06", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-large-medium", - "project_name": "t5-large-medium", + "description": "日本語ByT5事前学習済みモデル This is a ByT5 (a tokenizer-free extension of the Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", + "url": "https://huggingface.co./sonoisa/byt5-small-japanese", + "project_name": "byt5-small-japanese", "downloads": 20, "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-04-26 08:31:45", - "latest_commit": "2023-05-10 10:00:45", + "score": -0.1061806430682578, + "first_commit": "2021-06-04 13:14:22", + "latest_commit": "2021-09-23 18:29:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese GPT2 Lyric Model Model description", - "url": "https://huggingface.co./skytnt/gpt2-japanese-lyric-medium", - "project_name": "gpt2-japanese-lyric-medium", + "description": "日本語でtrainingしたllama2 model size: 417.12M trainingは以下のscript参照https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", + "url": "https://huggingface.co./if001/llama2_ja_small", + "project_name": "llama2_ja_small", "downloads": 20, "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2022-07-08 13:28:12", - "latest_commit": "2023-10-21 14:53:57", + "score": -0.1061806430682578, + "first_commit": "2023-10-11 09:11:41", + "latest_commit": "2023-10-14 13:50:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Filtered and modified version of Japanese/Chinese language pair data from WikiMatrix v1.", - "url": "https://huggingface.co./datasets/larryvrh/WikiMatrix-v1-Ja_Zh-filtered", - "project_name": "WikiMatrix-v1-Ja_Zh-filtered", + "description": "日本語でtrainingしたllama2をinstruction用のデータセットでsftしたものになります base: https://huggingface.co./if001/llama2_ja_small trainingは以下のscript参照 https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_small\")", + "url": "https://huggingface.co./if001/llama2_ja_small_instruct", + "project_name": "llama2_ja_small_instruct", "downloads": 20, "source": "Hugging Face", - "score": -0.10443184494719016, - "first_commit": "2023-04-08 03:07:25", - "latest_commit": "2023-04-08 05:16:37", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "bert-base-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-luw-upos", - "project_name": "bert-base-japanese-luw-upos", - "downloads": 19, - "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2021-10-26 13:26:38", - "latest_commit": "2022-09-18 19:43:18", + "score": -0.1061806430682578, + "first_commit": "2023-10-21 05:21:13", + "latest_commit": "2023-10-23 19:39:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "nlp-waseda/gpt2-small-japanese-wikipedia This model is Japanese GPT-2 pretrained on Japanese Wikipedia.", - "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese-wikipedia", - "project_name": "gpt2-small-japanese-wikipedia", - "downloads": 19, + "description": "お知らせ より回答が適切になるように学習させたモデル、https://huggingface.co./hotchpotch/youri-7b-stf-qa-context-jaqket-jsquad-gptq もあります。 ", + "url": "https://huggingface.co./hotchpotch/youri-7b-sft-qa-context-jaqket-awq", + "project_name": "youri-7b-sft-qa-context-jaqket-awq", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2021-12-28 01:22:40", - "latest_commit": "2021-12-28 15:31:38", + "score": -0.1061806430682578, + "first_commit": "2023-12-10 08:52:23", + "latest_commit": "2024-02-25 06:40:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.13 }, { - "description": "yuyuyui-chatbot", - "url": "https://huggingface.co./ushikado/yuyuyui-chatbot", - "project_name": "yuyuyui-chatbot", - "downloads": 19, + "description": "Introduction Who am I: Qishen Ha", + "url": "https://huggingface.co./haqishen/h2o-Llama-3-8B-Japanese-Instruct", + "project_name": "h2o-Llama-3-8B-Japanese-Instruct", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2021-05-04 14:52:12", - "latest_commit": "2021-05-23 13:27:10", + "score": -0.1061806430682578, + "first_commit": "2024-04-24 07:48:45", + "latest_commit": "2024-06-24 08:57:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "bart-base-japanese-news(base-sized model)", - "url": "https://huggingface.co./stockmark/bart-base-japanese-news", - "project_name": "bart-base-japanese-news", - "downloads": 19, + "description": "Model Card for Model ID Fine tunned ASR model from distil-whisper/distil-large-v2.", + "url": "https://huggingface.co./spow12/Visual-novel-transcriptor", + "project_name": "Visual-novel-transcriptor", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-01-20 04:23:07", - "latest_commit": "2023-12-08 03:39:50", + "score": -0.1061806430682578, + "first_commit": "2024-04-15 01:43:08", + "latest_commit": "2024-08-12 12:39:52", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.756 }, { - "description": "This model is traned with guanaco dataset.", - "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v0", - "project_name": "Yoko-7B-Japanese-v0", - "downloads": 19, + "description": "whisper-large-v2-jp model for CTranslate2 This repository contains the conversion of vumichien/whisper-large-v2-jp to the CTranslate2 model format.", + "url": "https://huggingface.co./arc-r/faster-whisper-large-v2-jp", + "project_name": "faster-whisper-large-v2-jp", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-08-09 16:28:38", - "latest_commit": "2023-08-10 13:00:34", + "score": -0.1061806430682578, + "first_commit": "2023-07-07 06:16:06", + "latest_commit": "2023-07-07 18:09:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-3.0bpw-h6-exl2", - "downloads": 19, + "description": "Model card for model ID", + "url": "https://huggingface.co./retrieva-jp/t5-large-medium", + "project_name": "t5-large-medium", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-10-28 20:16:15", - "latest_commit": "2023-10-28 15:16:25", + "score": -0.1061806430682578, + "first_commit": "2023-04-26 08:31:45", + "latest_commit": "2023-05-10 10:00:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "(English part follows Japanese one.", - "url": "https://huggingface.co./tohoku-nlp/stable-diffusion-xl-jp-base-1.0", - "project_name": "stable-diffusion-xl-jp-base-1.0", - "downloads": 19, + "description": "bert-base-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-unidic-luw-upos", + "project_name": "bert-base-japanese-unidic-luw-upos", + "downloads": 20, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-11-06 05:02:27", - "latest_commit": "2023-11-06 05:37:01", + "score": -0.1061806430682578, + "first_commit": "2022-02-13 01:00:01", + "latest_commit": "2023-11-05 18:44:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model description Cyberagent様のcyberagent/calm2-7b-chatを追加学習した、作家さん用アシスタントAIのアルファ版です。 ", - "url": "https://huggingface.co./falche/opennovel_oc2_01a_7b", - "project_name": "opennovel_oc2_01a_7b", + "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-fin-generator", + "project_name": "electra-small-paper-japanese-fin-generator", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-12-09 13:36:32", - "latest_commit": "2023-12-09 15:19:57", + "score": -0.10619525102568078, + "first_commit": "2021-10-04 13:38:47", + "latest_commit": "2023-10-21 13:21:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.00491 }, { - "description": "llm-jp-1.3b-v1.0-aya llm-jp's llm-jp-1.3b-v1.0 model fine-tuned on the Japanese examples from Cohere's aya dataset Model llm-jp-eval AVG kcoopermiller/llm-jp-1.3b-v1.0-aya 0.0698 llm-jp/llm-jp-1.3b-v1.0 0.047 How to use import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"kcoopermiller/llm-jp-1.3b-v1.0-aya\")", - "url": "https://huggingface.co./kcoopermiller/llm-jp-1.3b-v1.0-aya", - "project_name": "llm-jp-1.3b-v1.0-aya", + "description": "nlp-waseda/gpt2-small-japanese-wikipedia This model is Japanese GPT-2 pretrained on Japanese Wikipedia.", + "url": "https://huggingface.co./nlp-waseda/gpt2-small-japanese-wikipedia", + "project_name": "gpt2-small-japanese-wikipedia", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2024-02-23 05:39:39", - "latest_commit": "2024-02-29 23:48:58", + "score": -0.10619525102568078, + "first_commit": "2021-12-28 01:22:40", + "latest_commit": "2021-12-28 15:31:38", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "output 筑波 2.0035860538482666 つくば 1.6586617231369019 研究 1.6227693557739258 大学 1.3798155784606934 実験 0.5522942543029785 学生 0.42351895570755005 分析 0.37844282388687134 国立 0.3685397505760193 キャンパス 0.36495038866996765 茨城 0.3056415021419525 科学 0.2876652181148529 関東 0.24301066994667053 地域 0.21340851485729218 実施 0.1976248174905777 先端 0.192025288939476 サイト 0.11629197001457214 調査 0.09159307181835175 プロジェクト 0.08552580326795578 議論 0.07484486699104309 検討 0.007034890353679657", - "url": "https://huggingface.co./aken12/splade-japanese-efficient", - "project_name": "splade-japanese-efficient", + "description": "Wav2Vec2-XLS-R-300M-Japanese-Hiragana Fine-tuned facebook/wav2vec2-xls-r-300m on Japanese Hiragana characters using the Common Voice and JSUT.", + "url": "https://huggingface.co./slplab/wav2vec2-xls-r-300m-japanese-hiragana", + "project_name": "wav2vec2-xls-r-300m-japanese-hiragana", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2024-03-11 03:02:28", - "latest_commit": "2024-03-16 16:27:35", + "score": -0.10619525102568078, + "first_commit": "2022-09-16 07:34:58", + "latest_commit": "2022-09-16 11:01:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-3.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-3.0bpw-h6-exl2", + "description": "シサム語による説明 アイヌ語と日本語の双方向機械翻訳モデルです。 ", + "url": "https://huggingface.co./SoMiyagawa/AinuTrans-2.0", + "project_name": "AinuTrans-2.0", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-12-07 17:52:29", - "latest_commit": "2023-12-07 18:54:23", + "score": -0.10619525102568078, + "first_commit": null, + "latest_commit": null, "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-japanese-base-finetuned-commonsenseqa", - "project_name": "deberta-v2-japanese-base-finetuned-commonsenseqa", + "description": "OpenCALM-LARGE Model Description OpenCALM is a suite of decoder-only language models pre-trained on Japanese datasets, developed by CyberAgent, Inc. ", + "url": "https://huggingface.co./Mizuiro-sakura/open-calm-large-finetuned-databricks-dolly", + "project_name": "open-calm-large-finetuned-databricks-dolly", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-02-01 01:02:44", - "latest_commit": "2023-05-26 15:05:18", + "score": -0.10619525102568078, + "first_commit": "2023-06-26 13:37:53", + "latest_commit": "2023-07-02 14:30:47", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-base-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-upos", - "project_name": "deberta-base-japanese-upos", + "description": "zenz-v1 Checkpoints zenz-v1 is a language model specialized for kana-kanji conversion tasks based on the GPT-2 architecture.", + "url": "https://huggingface.co./Miwa-Keita/zenz-v1-checkpoints", + "project_name": "zenz-v1-checkpoints", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2022-05-24 08:12:05", - "latest_commit": "2024-07-26 15:59:24", + "score": -0.10619525102568078, + "first_commit": "2024-06-28 14:26:33", + "latest_commit": "2024-06-28 14:53:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "日本語VL-T5事前学習済みモデル", - "url": "https://huggingface.co./sonoisa/vl-t5-base-japanese", - "project_name": "vl-t5-base-japanese", + "description": "This repository contains a model trained (QLoRA-SFT)", + "url": "https://huggingface.co./taoki/phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", + "project_name": "phi3-mini-4k-qlora-jmultiwoz-dolly-amenokaku-alpaca_jp_python-GGUF", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2021-10-03 11:54:43", - "latest_commit": "2021-10-04 11:13:35", + "score": -0.10619525102568078, + "first_commit": "2024-05-29 15:11:10", + "latest_commit": "2024-05-31 11:28:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 3.82 }, { - "description": "bert-japanese-ner このモデルは日本語の固有表現抽出タスクを目的として、京都大学 黒橋・褚・村脇研究室が公開しているBERT日本語Pretrainedモデルをベースにストックマーク株式会社が公開しているner-wikipedia-datasetでファインチューニングしたものです。 ", - "url": "https://huggingface.co./ken11/bert-japanese-ner", - "project_name": "bert-japanese-ner", + "description": "nagisa_bert A BERT model for nagisa.", + "url": "https://huggingface.co./taishi-i/nagisa_bert", + "project_name": "nagisa_bert", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2021-11-13 16:28:23", - "latest_commit": "2021-11-14 02:34:01", + "score": -0.10619525102568078, + "first_commit": "2022-09-25 13:12:57", + "latest_commit": "2023-09-15 01:28:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "This pre-training dataset was created for shisa-base-7b-v1.", - "url": "https://huggingface.co./datasets/augmxnt/shisa-pretrain-en-ja-v1", - "project_name": "shisa-pretrain-en-ja-v1", + "description": "bert-base-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-luw-upos", + "project_name": "bert-base-japanese-luw-upos", "downloads": 19, "source": "Hugging Face", - "score": -0.10444598507013171, - "first_commit": "2023-11-19 09:48:04", - "latest_commit": "2023-12-05 20:08:51", + "score": -0.10619525102568078, + "first_commit": "2021-10-26 13:26:38", + "latest_commit": "2022-09-18 19:43:18", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "日本語ByT5事前学習済みモデル This is a ByT5 (a tokenizer-free extension of the Text-to-Text Transfer Transformer) model pretrained on Japanese corpus. ", - "url": "https://huggingface.co./sonoisa/byt5-small-japanese", - "project_name": "byt5-small-japanese", - "downloads": 18, + "description": "The JaNLI (Japanese Adversarial NLI) dataset, inspired by the English HANS dataset, is designed to necessitate an understanding of Japanese linguistic phenomena and to illuminate the vulnerabilities of models.", + "url": "https://huggingface.co./datasets/hpprc/janli", + "project_name": "janli", + "downloads": 19, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2021-06-04 13:14:22", - "latest_commit": "2021-09-23 18:29:53", + "score": -0.10619525102568078, + "first_commit": "2023-04-05 12:25:01", + "latest_commit": "2023-04-11 13:40:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Bloom model trained on Japanese corpus.", - "url": "https://huggingface.co./Aruno/Bloom-JP-160m", - "project_name": "Bloom-JP-160m", - "downloads": 18, + "description": "GitHub リポジトリ singletongue/wikipedia-utils で公開されているデータセットを利用しています。 ", + "url": "https://huggingface.co./datasets/llm-book/jawiki-paragraphs", + "project_name": "jawiki-paragraphs", + "downloads": 19, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-04-24 04:47:05", - "latest_commit": "2023-04-24 05:12:10", + "score": -0.10619525102568078, + "first_commit": "2023-06-03 03:04:05", + "latest_commit": "2023-06-03 03:04:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "transformer-lm-japanese-0.1b", - "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-0.1b", - "project_name": "transformer-lm-japanese-0.1b", + "description": "ku-nlp/roberta-large-japanese-char-wwm Model description This is a Japanese RoBERTa large model pre-trained on Japanese Wikipedia and the Japanese portion of CC-100.", + "url": "https://huggingface.co./ku-nlp/roberta-large-japanese-char-wwm", + "project_name": "roberta-large-japanese-char-wwm", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-07-12 02:11:11", - "latest_commit": "2024-06-03 06:17:19", + "score": -0.10620985898310377, + "first_commit": "2022-09-18 08:10:44", + "latest_commit": "2023-03-19 01:58:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.323 }, { - "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", - "url": "https://huggingface.co./masato12/bert-base-japanese-v3-jsts-with-tokenizer", - "project_name": "bert-base-japanese-v3-jsts-with-tokenizer", + "description": "This model is traned with guanaco dataset.", + "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v1", + "project_name": "Yoko-7B-Japanese-v1", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2024-07-21 04:58:46", - "latest_commit": "2024-07-21 18:21:41", + "score": -0.10620985898310377, + "first_commit": "2023-08-10 13:01:38", + "latest_commit": "2023-08-10 13:11:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Llama3ベースの日本語医療LLM MedLlama3-JP このモデルはLlama3の継続学習により作成された4種類のLLMから成るマージモデルです。 ", - "url": "https://huggingface.co./EQUES/MedLLama3-JP-v2", - "project_name": "MedLLama3-JP-v2", + "description": "japanese-novel-gpt-j-6b https://huggingface.co./AIBunCho/japanese-novel-gpt-j-6b\" に合計216個の評価の高いなろう小説、青空文庫、ウィキペディアなどの文章をQLoRA学習させた小説生成用モデルです。 ", + "url": "https://huggingface.co./akineAItech/Jeneri-SAMA-6B", + "project_name": "Jeneri-SAMA-6B", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2024-07-01 13:42:17", - "latest_commit": "2024-07-13 06:12:43", + "score": -0.10620985898310377, + "first_commit": "2024-02-25 10:30:06", + "latest_commit": "2024-03-16 15:00:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.05 }, { - "description": "VecteusをベースにLLavaに対応させたモデルです。 ", - "url": "https://huggingface.co./Local-Novel-LLM-project/Ocuteus-v1", - "project_name": "Ocuteus-v1", + "description": "In-progess long-context Japanese-English translation model based on tinyllama.", + "url": "https://huggingface.co./NilanE/tinyllama-en_ja-translation-v2", + "project_name": "tinyllama-en_ja-translation-v2", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2024-05-07 10:03:15", - "latest_commit": "2024-05-10 05:39:04", + "score": -0.10620985898310377, + "first_commit": "2024-03-06 16:45:44", + "latest_commit": "2024-03-28 16:36:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.1 }, { "description": "Model Card for Model ID 料理を検索するための質問文から、検索検索用キーワードである固有表現を抽出します Model Details Model Description 例えば、「東京の肉料理で、春に食べられる、鶏肉を使った料理を教えてください」という文章を入力すると、 「東京 → 都道府県/地方(AREA)」 「肉料理 → 種類(TYPE)」 「春 → 季節(SZN)", @@ -10862,59 +11470,64 @@ "project_name": "bert-japanese-token-classification-search-local-cuisine", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, + "score": -0.10620985898310377, "first_commit": "2024-04-28 06:45:18", "latest_commit": "2024-05-12 07:20:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-8.0bpw-h8-exl2", - "project_name": "SambaLingo-Japanese-Chat-8.0bpw-h8-exl2", + "description": "Llama3ベースの日本語医療LLM MedLlama3-JP このモデルはLlama3の継続学習により作成された4種類のLLMから成るマージモデルです。 ", + "url": "https://huggingface.co./EQUES/MedLLama3-JP-v2", + "project_name": "MedLLama3-JP-v2", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2024-03-07 06:57:50", - "latest_commit": "2024-03-07 07:00:51", + "score": -0.10620985898310377, + "first_commit": "2024-07-01 13:42:17", + "latest_commit": "2024-07-13 06:12:43", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, - { - "description": "This model is a merged version of qwen-14b-vntl and Qwen1.5-14B-Chat , aiming for the translation of Japanese context into Chinese.", - "url": "https://huggingface.co./GralchemOz/Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", - "project_name": "Qwen1.5-14B-vntl-jp2zh-4.5bpw-h6-exl2", + { + "description": "bert-base-japanese-v3-jsts 「大規模言語モデル入門」の第5章で紹介している(意味類似度計算)のモデルです。 ", + "url": "https://huggingface.co./masato12/bert-base-japanese-v3-jsts-with-tokenizer", + "project_name": "bert-base-japanese-v3-jsts-with-tokenizer", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2024-03-03 02:29:43", - "latest_commit": "2024-03-03 03:17:09", + "score": -0.10620985898310377, + "first_commit": "2024-07-21 04:58:46", + "latest_commit": "2024-07-21 18:21:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model Card Summary This model was trained using H2O LLM Studio.", - "url": "https://huggingface.co./yukismd/JapaneseQuizChatbot_v1", - "project_name": "JapaneseQuizChatbot_v1", + "description": "(English part follows Japanese one.", + "url": "https://huggingface.co./tohoku-nlp/stable-diffusion-xl-jp-base-1.0", + "project_name": "stable-diffusion-xl-jp-base-1.0", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-06-08 00:25:01", - "latest_commit": "2023-06-08 00:48:50", + "score": -0.10620985898310377, + "first_commit": "2023-11-06 05:02:27", + "latest_commit": "2023-11-06 05:37:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはdeberta-v2-tiny-japaneseをファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-japanese-tiny-finetuned-commonsenseqa", - "project_name": "deberta-v2-japanese-tiny-finetuned-commonsenseqa", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-gamma-7B-AWQ", + "project_name": "japanese-stablelm-instruct-gamma-7B-AWQ", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-05-11 10:28:33", - "latest_commit": "2023-05-26 15:01:57", + "score": -0.10620985898310377, + "first_commit": "2023-10-28 19:03:17", + "latest_commit": "2023-11-09 18:16:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.2 }, { "description": "Fine-tuned Japanese Wav2Vec2 model for speech recognition using XLSR-53 large Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using Common Voice, JVS and JSUT.", @@ -10922,179 +11535,194 @@ "project_name": "wav2vec2-large-xlsr-53-japanese", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, + "score": -0.10620985898310377, "first_commit": "2023-05-11 08:47:29", "latest_commit": "2023-05-12 02:15:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Model card for model ID", - "url": "https://huggingface.co./retrieva-jp/t5-base-medium", - "project_name": "t5-base-medium", + "description": "Manga OCR Optical character recognition for Japanese text, with the main focus being Japanese manga.", + "url": "https://huggingface.co./TeamFnord/manga-ocr", + "project_name": "manga-ocr", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-04-26 08:27:09", - "latest_commit": "2023-05-10 10:00:12", + "score": -0.10620985898310377, + "first_commit": "2022-01-15 17:39:06", + "latest_commit": "2022-02-10 07:50:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-large-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora", - "project_name": "deberta-large-japanese-aozora", + "description": "yuyuyui-chatbot", + "url": "https://huggingface.co./ushikado/yuyuyui-chatbot", + "project_name": "yuyuyui-chatbot", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2022-05-26 14:46:58", - "latest_commit": "2023-01-14 00:27:22", + "score": -0.10620985898310377, + "first_commit": "2021-05-04 14:52:12", + "latest_commit": "2021-05-23 13:27:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-small-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-upos", - "project_name": "deberta-small-japanese-upos", + "description": "cyberagent/calm2-7b-chatの出力を人手でチェック・修正することで作成した日本語Instructionデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/jimba-instuction-1k-beta", + "project_name": "jimba-instuction-1k-beta", "downloads": 18, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2022-05-23 23:55:56", - "latest_commit": "2024-07-26 15:38:41", + "score": -0.10620985898310377, + "first_commit": "2024-02-29 15:23:48", + "latest_commit": "2024-04-25 12:49:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Google's mt5-base fine-tuned in Japanese to summarize patent claims in a limited Pharmaceutical domain. ", - "url": "https://huggingface.co./kz/mt5base-finetuned-patentsum-japanese-small", - "project_name": "mt5base-finetuned-patentsum-japanese-small", - "downloads": 18, + "description": "Tanuki-Zero Base model: llm-jp/llm-jp-13b-v1.0 Instruction data: Randomly sampled, 15k Jaster dataset (train) Code is here.", + "url": "https://huggingface.co./kanhatakeyama/Tanuki-ZeRo", + "project_name": "Tanuki-ZeRo", + "downloads": 17, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2021-04-10 00:31:15", - "latest_commit": "2022-05-19 06:50:32", + "score": -0.10622446694052676, + "first_commit": "2024-03-29 23:31:35", + "latest_commit": "2024-03-30 00:51:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 12.9 }, { - "description": "llm-japanese-dataset-vanilla LLM構築用の日本語チャットデータセット izumi-lab/llm-japanese-dataset から,日英翻訳のデータセット等を抜いたものです. ", - "url": "https://huggingface.co./datasets/izumi-lab/llm-japanese-dataset-vanilla", - "project_name": "llm-japanese-dataset-vanilla", - "downloads": 18, + "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit The Model mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit was converted to MLX format from cyberagent/Llama-3.1-70B-Japanese-Instruct-2407 using mlx-lm version 0.16.1.", + "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit", + "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-8bit", + "downloads": 17, "source": "Hugging Face", - "score": -0.10446012519307327, - "first_commit": "2023-05-23 14:45:27", - "latest_commit": "2024-02-17 16:17:18", + "score": -0.10622446694052676, + "first_commit": "2024-07-26 13:05:01", + "latest_commit": "2024-07-26 14:05:31", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 19.8 }, { - "description": "Tanuki-Zero Base model: llm-jp/llm-jp-13b-v1.0 Instruction data: Randomly sampled, 15k Jaster dataset (train) Code is here.", - "url": "https://huggingface.co./kanhatakeyama/Tanuki-ZeRo", - "project_name": "Tanuki-ZeRo", + "description": "deberta-small-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-upos", + "project_name": "deberta-small-japanese-upos", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-03-29 23:31:35", - "latest_commit": "2024-03-30 00:51:03", + "score": -0.10622446694052676, + "first_commit": "2022-05-23 23:55:56", + "latest_commit": "2024-07-26 15:38:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ELECTRA small Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-discriminator", - "project_name": "electra-small-japanese-discriminator", + "description": "このモデルはluke-japanese-baseをファインチューニングして、JNLI(文章の関係性判別)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-jnli", + "project_name": "luke-japanese-base-finetuned-jnli", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2021-10-04 13:42:57", - "latest_commit": "2022-12-09 00:41:39", + "score": -0.10622446694052676, + "first_commit": "2023-02-11 18:39:14", + "latest_commit": "2023-07-21 14:09:44", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.279 }, { - "description": "Unihan LM: Coarse-to-Fine Chinese-Japanese Language Model Pretraining with the Unihan Database Model description Chinese and Japanese share many characters with similar surface morphology.", - "url": "https://huggingface.co./microsoft/unihanlm-base", - "project_name": "unihanlm-base", + "description": "This model is traned with guanaco dataset.", + "url": "https://huggingface.co./ganchengguang/Yoko-7B-Japanese-v0", + "project_name": "Yoko-7B-Japanese-v0", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2020-09-27 11:23:02", - "latest_commit": "2021-09-22 11:00:56", + "score": -0.10622446694052676, + "first_commit": "2023-08-09 16:28:38", + "latest_commit": "2023-08-10 13:00:34", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "お知らせ より回答が適切になるように学習させたモデル、https://huggingface.co./hotchpotch/youri-7b-stf-qa-context-jaqket-jsquad-gptq もあります。 ", - "url": "https://huggingface.co./hotchpotch/youri-7b-sft-qa-context-jaqket-gptq", - "project_name": "youri-7b-sft-qa-context-jaqket-gptq", + "description": "Model Card for Model ID Original model elyza/ELYZA-japanese-Llama-2-7b-instruct which is based on Meta's \"Llama 2\" and has undergone additional pre-training in Japanese instruction.", + "url": "https://huggingface.co./dahara1/ELYZA-japanese-Llama-2-7b-instruct-AWQ", + "project_name": "ELYZA-japanese-Llama-2-7b-instruct-AWQ", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2023-12-08 03:51:28", - "latest_commit": "2024-02-25 06:40:05", + "score": -0.10622446694052676, + "first_commit": "2023-09-08 08:35:31", + "latest_commit": "2023-09-17 04:24:55", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-8.0bpw-h8-exl2", - "project_name": "shisa-7b-v1-8.0bpw-h8-exl2", + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", + "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2023-12-07 18:22:23", - "latest_commit": "2023-12-07 18:54:33", + "score": -0.10622446694052676, + "first_commit": "2023-10-28 20:43:59", + "latest_commit": "2023-10-28 15:44:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Plugin", - "project_name": "Orion-14B-Chat-Plugin", + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-6.0bpw-h6-exl2", + "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-6.0bpw-h6-exl2", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-01-16 12:19:45", - "latest_commit": "2024-03-26 10:12:37", + "score": -0.10622446694052676, + "first_commit": "2023-10-28 20:36:54", + "latest_commit": "2023-10-28 15:37:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", - "project_name": "SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", + "description": "Model description Cyberagent様のcyberagent/calm2-7b-chatを追加学習した、作家さん用アシスタントAIのアルファ版です。 ", + "url": "https://huggingface.co./falche/opennovel_oc2_01a_7b", + "project_name": "opennovel_oc2_01a_7b", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-03-07 06:50:19", - "latest_commit": "2024-03-07 06:51:42", + "score": -0.10622446694052676, + "first_commit": "2023-12-09 13:36:32", + "latest_commit": "2023-12-09 15:19:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Introduction Who am I: Qishen Ha", - "url": "https://huggingface.co./haqishen/h2o-Llama-3-8B-Japanese-Instruct", - "project_name": "h2o-Llama-3-8B-Japanese-Instruct", + "description": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", + "project_name": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-04-24 07:48:45", - "latest_commit": "2024-06-24 08:57:49", + "score": -0.10622446694052676, + "first_commit": "2024-03-03 12:51:40", + "latest_commit": "2024-03-03 13:39:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 21.5 }, { - "description": "DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", - "url": "https://huggingface.co./DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", - "project_name": "alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "description": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", + "project_name": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-04-20 08:49:14", - "latest_commit": "2024-04-20 08:49:19", + "score": -0.10622446694052676, + "first_commit": "2024-03-07 13:21:38", + "latest_commit": "2024-03-07 13:47:58", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 11.2 }, { "description": "はじめに GoogleのGemma-2Bを日本語で使えるように継続事前学習を施した、商用利用可能なベースモデルです。 ", @@ -11102,263 +11730,311 @@ "project_name": "suzume-poc", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, + "score": -0.10622446694052676, "first_commit": "2024-03-14 09:51:38", "latest_commit": "2024-03-17 15:05:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 2.51 }, { - "description": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", - "project_name": "ELYZA-japanese-Llama-2-fast-MoE-2x7B-v0.1-GGUF", + "description": "Overview This model is based on rinna's [rinna/llama-3-youko-8b], fine-tuned using LoRA on a small number of parallel sentences from English to Japanese.", + "url": "https://huggingface.co./lyu-boxuan/llama-3-youko-8b-En-Ja-MT-LoRA", + "project_name": "llama-3-youko-8b-En-Ja-MT-LoRA", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-03-07 13:21:38", - "latest_commit": "2024-03-07 13:47:58", + "score": -0.10622446694052676, + "first_commit": "2024-05-10 14:33:57", + "latest_commit": "2024-05-21 14:54:46", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { - "description": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", - "project_name": "ELYZA-japanese-Llama-2-MoE-2x13B-v0.1-GGUF", + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-5.0bpw-h6-exl2", + "project_name": "shisa-7b-v1-5.0bpw-h6-exl2", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-03-03 12:51:40", - "latest_commit": "2024-03-03 13:39:01", + "score": -0.10622446694052676, + "first_commit": "2023-12-07 18:07:21", + "latest_commit": "2023-12-07 18:54:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Only for Japanese Please use AutoTokenizer and AutoModelForCausalLM And must use Unifine format to input and output. ", - "url": "https://huggingface.co./ganchengguang/USA-7B-instruction-incontext-learning", - "project_name": "USA-7B-instruction-incontext-learning", + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-4.0bpw-h6-exl2", + "project_name": "shisa-7b-v1-4.0bpw-h6-exl2", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2023-09-13 16:15:06", - "latest_commit": "2023-11-17 16:44:57", + "score": -0.10622446694052676, + "first_commit": "2023-12-07 17:59:51", + "latest_commit": "2023-12-07 18:54:26", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはluke-japanese-baseをファインチューニングして、JNLI(文章の関係性判別)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/luke-japanese-base-finetuned-jnli", - "project_name": "luke-japanese-base-finetuned-jnli", + "description": "モデル説明 (model explanation) V1 = MoeDiffusion 1.0 + (HassanBlend 1.5 - VMix03) * 0.2 V2 = MoeDiffusion 0.6 : HassanBlend 1.5 0.2 : VMix03 : 0.2 マージ元のルーツにNAIリークやInsta系モデルが含まれるという噂があるので、NAIリークアンチ・Insta系モデルアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", + "url": "https://huggingface.co./ThePioneer/MoeDiffusionPlusPlus", + "project_name": "MoeDiffusionPlusPlus", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2023-02-11 18:39:14", - "latest_commit": "2023-07-21 14:09:44", + "score": -0.10622446694052676, + "first_commit": "2023-01-19 13:04:02", + "latest_commit": "2023-01-21 02:05:54", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "nagisa_bert A BERT model for nagisa.", - "url": "https://huggingface.co./taishi-i/nagisa_bert", - "project_name": "nagisa_bert", + "description": "deberta-large-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-aozora", + "project_name": "deberta-large-japanese-aozora", + "downloads": 17, + "source": "Hugging Face", + "score": -0.10622446694052676, + "first_commit": "2022-05-26 14:46:58", + "latest_commit": "2023-01-14 00:27:22", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "deberta-base-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-upos", + "project_name": "deberta-base-japanese-upos", + "downloads": 17, + "source": "Hugging Face", + "score": -0.10622446694052676, + "first_commit": "2022-05-24 08:12:05", + "latest_commit": "2024-07-26 15:59:24", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "日本語VL-T5事前学習済みモデル", + "url": "https://huggingface.co./sonoisa/vl-t5-base-japanese", + "project_name": "vl-t5-base-japanese", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2022-09-25 13:12:57", - "latest_commit": "2023-09-15 01:28:14", + "score": -0.10622446694052676, + "first_commit": "2021-10-03 11:54:43", + "latest_commit": "2021-10-04 11:13:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit The Model mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit was converted to MLX format from cyberagent/Llama-3.1-70B-Japanese-Instruct-2407 using mlx-lm version 0.16.1.", - "url": "https://huggingface.co./mlx-community/Llama-3.1-70B-Japanese-Instruct-2407-8bit", - "project_name": "Llama-3.1-70B-Japanese-Instruct-2407-8bit", + "description": "Unihan LM: Coarse-to-Fine Chinese-Japanese Language Model Pretraining with the Unihan Database Model description Chinese and Japanese share many characters with similar surface morphology.", + "url": "https://huggingface.co./microsoft/unihanlm-base", + "project_name": "unihanlm-base", "downloads": 17, "source": "Hugging Face", - "score": -0.10447426531601482, - "first_commit": "2024-07-26 13:05:01", - "latest_commit": "2024-07-26 14:05:31", + "score": -0.10622446694052676, + "first_commit": "2020-09-27 11:23:02", + "latest_commit": "2021-09-22 11:00:56", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Finance Sentiment JA (base) Finance Sentiment JA (base) is a model based on bert-base-japanese for analyzing sentiment of Japanese financial news.", - "url": "https://huggingface.co./bardsai/finance-sentiment-ja-base", - "project_name": "finance-sentiment-ja-base", - "downloads": 16, + "description": "Google's mt5-base fine-tuned in Japanese to summarize patent claims in a limited Pharmaceutical domain. ", + "url": "https://huggingface.co./kz/mt5base-finetuned-patentsum-japanese-small", + "project_name": "mt5base-finetuned-patentsum-japanese-small", + "downloads": 17, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2023-09-18 13:00:29", - "latest_commit": "2023-09-18 13:01:21", + "score": -0.10622446694052676, + "first_commit": "2021-04-10 00:31:15", + "latest_commit": "2022-05-19 06:50:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "COMET-GPT2 ja v2 Finetuned GPT-2 xl on the large version of ATOMIC ja using a causal language modeling (CLM) objective.", - "url": "https://huggingface.co./nlp-waseda/comet-gpt2-xl-japanese", - "project_name": "comet-gpt2-xl-japanese", - "downloads": 16, + "description": "friendly_JA-Model (T5 fine-tuned model) MT model trained using the friendly_JA Corpus attempting to make Japanese easier/more accessible to occidental people by using the Latin/English derived katakana lexicon instead of the standard Sino-Japanese lexicon Examples input output 最適化を応用した機械翻訳モデルは高精度だ オプティマイゼーションを応用したマシントランスレーションモデルは高いアキュラシーだ 彼は架空の世界に住んでいる 彼はイマジナリー世界に住んでいる 新型コロナウイルスに感染してしまった コロナウイルスにかかってしまった 深層学習は難しい ディープラーニングはむずかしい 新たな概念を紹介する 新しいコンセプトを紹介する 津波の警報が流れた ツナミのアラートが流れた 南海トラフの災害は震源地による 南海トラフのディザスターはエピ", + "url": "https://huggingface.co./astremo/friendly_JA", + "project_name": "friendly_JA", + "downloads": 17, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2023-09-26 13:37:52", - "latest_commit": "2024-03-11 04:16:02", + "score": -0.10622446694052676, + "first_commit": "2022-01-10 06:31:18", + "latest_commit": "2022-05-22 14:57:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "日本語でtrainingしたllama2 model size: 130.78M trainingは以下のscript参照 https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_ss\")", - "url": "https://huggingface.co./if001/llama2_ja_ss", - "project_name": "llama2_ja_ss", + "description": "ELECTRA small Japanese discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-japanese-discriminator", + "project_name": "electra-small-japanese-discriminator", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2023-10-16 04:12:34", - "latest_commit": "2023-10-16 13:49:48", + "score": -0.10623907489794975, + "first_commit": "2021-10-04 13:42:57", + "latest_commit": "2022-12-09 00:41:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model is a voice clone of myself created specifically for Style Bert VITS2.", - "url": "https://huggingface.co./ThePioneer/MyVoiceClone-Style-Bert-VITS2", - "project_name": "MyVoiceClone-Style-Bert-VITS2", + "description": "bart-base-japanese-news(base-sized model)", + "url": "https://huggingface.co./stockmark/bart-base-japanese-news", + "project_name": "bart-base-japanese-news", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-02-29 19:34:12", - "latest_commit": "2024-03-04 10:43:27", + "score": -0.10623907489794975, + "first_commit": "2023-01-20 04:23:07", + "latest_commit": "2023-12-08 03:39:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.125 }, { - "description": "更新情報 日本語機能とinstructベクトルのバランス調整したver.2をアップロードしましたSwallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2 モデル概要 Swallow-MX-8x7b-NVE-v0.1に対し、 Mixtral-8x7B-Instruct-v0.1とMixtral-8x7B-v0.1の差分をマージしたモデルです。 ", - "url": "https://huggingface.co./aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct", - "project_name": "Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct", + "description": "Bloom model trained on Japanese corpus.", + "url": "https://huggingface.co./Aruno/Bloom-JP-160m", + "project_name": "Bloom-JP-160m", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-03-20 16:15:26", - "latest_commit": "2024-03-23 04:14:49", + "score": -0.10623907489794975, + "first_commit": "2023-04-24 04:47:05", + "latest_commit": "2023-04-24 05:12:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-hermes-2x8B", - "project_name": "Llama-3-ELYZA-hermes-2x8B", + "description": "ebisuke/liz-nojaloli-ja License MIT Licenseベースとしてrinna/japanese-gpt-neox-3.6bを使用しています。 ", + "url": "https://huggingface.co./ebisuke/liz-nojaloli-ja", + "project_name": "liz-nojaloli-ja", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-06-26 15:11:08", - "latest_commit": "2024-06-27 04:00:37", + "score": -0.10623907489794975, + "first_commit": "2023-05-23 16:59:22", + "latest_commit": "2023-05-30 16:01:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Sarashina2-7B Instruct sarashina2-7Bを会話できるようにフルファインチューニングしたものです。", - "url": "https://huggingface.co./alfredplpl/sarashina2-7b-it", - "project_name": "sarashina2-7b-it", + "description": "transformer-lm-japanese-0.1b", + "url": "https://huggingface.co./fukugawa/transformer-lm-japanese-0.1b", + "project_name": "transformer-lm-japanese-0.1b", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-06-12 02:24:28", - "latest_commit": "2024-06-12 03:00:35", + "score": -0.10623907489794975, + "first_commit": "2023-07-12 02:11:11", + "latest_commit": "2024-06-03 06:17:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", - "url": "https://huggingface.co./youhansun/Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", - "project_name": "Llama-3-70B-japanese-suzume-vector-v0.1-Q2_K-GGUF", + "description": "Finance Sentiment JA (base) Finance Sentiment JA (base) is a model based on bert-base-japanese for analyzing sentiment of Japanese financial news.", + "url": "https://huggingface.co./bardsai/finance-sentiment-ja-base", + "project_name": "finance-sentiment-ja-base", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-06-02 04:49:31", - "latest_commit": "2024-06-02 04:52:45", + "score": -0.10623907489794975, + "first_commit": "2023-09-18 13:00:29", + "latest_commit": "2023-09-18 13:01:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルの説明(English explanation is below.", - "url": "https://huggingface.co./keitokei1994/Llama-3-8B-shisa-2x8B", - "project_name": "Llama-3-8B-shisa-2x8B", + "description": "COMET-GPT2 ja v2 Finetuned GPT-2 xl on the large version of ATOMIC ja using a causal language modeling (CLM) objective.", + "url": "https://huggingface.co./nlp-waseda/comet-gpt2-xl-japanese", + "project_name": "comet-gpt2-xl-japanese", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-05-24 18:58:48", - "latest_commit": "2024-06-11 07:41:22", + "score": -0.10623907489794975, + "first_commit": "2023-09-26 13:37:52", + "latest_commit": "2024-03-11 04:16:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したTokara-0.5B-v0.1にchat vectorで対話能力を加えたモデルになります。 ", - "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-Chat-v0.1", - "project_name": "Tokara-0.5B-Chat-v0.1", + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", + "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-05-06 15:47:55", - "latest_commit": "2024-05-08 13:30:12", + "score": -0.10623907489794975, + "first_commit": "2023-10-28 20:29:59", + "latest_commit": "2023-10-28 15:30:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したモデルです。 ", - "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-v0.1", - "project_name": "Tokara-0.5B-v0.1", + "description": "Japanese Stable LM Instruct Gamma 7B Model Description", + "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-4.0bpw-h6-exl2", + "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-4.0bpw-h6-exl2", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-05-06 11:39:26", - "latest_commit": "2024-05-08 12:44:05", + "score": -0.10623907489794975, + "first_commit": "2023-10-28 20:23:03", + "latest_commit": "2023-10-28 15:23:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Swallow-MX-8x7b-NVE-v0.1に対し、 Mixtral-8x7B-Instruct-v0.1とMixtral-8x7B-v0.1の差分をマージしたモデルです。 ", - "url": "https://huggingface.co./aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2", - "project_name": "Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2", + "description": "VecteusをベースにLLavaに対応させたモデルです。 ", + "url": "https://huggingface.co./Local-Novel-LLM-project/Ocuteus-v1", + "project_name": "Ocuteus-v1", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-03-22 08:26:53", - "latest_commit": "2024-03-23 04:17:50", + "score": -0.10623907489794975, + "first_commit": "2024-05-07 10:03:15", + "latest_commit": "2024-05-10 05:39:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.57 }, { - "description": "Orion-14B 🌐English | 🇨", - "url": "https://huggingface.co./sosoai/Orion-14B-Chat-safetensors", - "project_name": "Orion-14B-Chat-safetensors", + "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", + "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-6.0bpw-h6-exl2", + "project_name": "shisa-7b-v1-6.0bpw-h6-exl2", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-01-24 23:44:38", - "latest_commit": "2024-01-25 02:08:59", + "score": -0.10623907489794975, + "first_commit": "2023-12-07 18:14:46", + "latest_commit": "2023-12-07 18:58:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chat & support: TheBloke's Discord server Want to contribute?", - "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-AWQ", - "project_name": "japanese-stablelm-instruct-beta-7B-AWQ", + "description": "こちらでアップロードできないので、civitaiにて先に公開しています。 ", + "url": "https://huggingface.co./sazyou-roukaku/AfterRealXL", + "project_name": "AfterRealXL", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2023-11-03 01:04:31", - "latest_commit": "2023-11-09 18:16:12", + "score": -0.10623907489794975, + "first_commit": "2023-09-23 08:43:02", + "latest_commit": "2023-10-01 18:12:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ebisuke/liz-nojaloli-ja License MIT Licenseベースとしてrinna/japanese-gpt-neox-3.6bを使用しています。 ", - "url": "https://huggingface.co./ebisuke/liz-nojaloli-ja", - "project_name": "liz-nojaloli-ja", + "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-japanese-base-finetuned-commonsenseqa", + "project_name": "deberta-v2-japanese-base-finetuned-commonsenseqa", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2023-05-23 16:59:22", - "latest_commit": "2023-05-30 16:01:20", + "score": -0.10623907489794975, + "first_commit": "2023-02-01 01:02:44", + "latest_commit": "2023-05-26 15:05:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "ESを書くAI Japanese GPT-2 modelをファインチューニングしました ファインチューニングには、内定者の二万件以上のESを用いました。 ", @@ -11366,239 +12042,220 @@ "project_name": "es", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, + "score": -0.10623907489794975, "first_commit": "2022-08-01 14:59:47", "latest_commit": "2022-08-14 05:47:18", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "LLM-jp Toxicity Dataset 日本語有害文書データセット「LLM-jp Toxicity Dataset」 See https://gitlab.llm-jp.nii.ac.jp/datasets/llm-jp-toxicity-dataset", - "url": "https://huggingface.co./datasets/p1atdev/LLM-jp-Toxicity-Dataset", - "project_name": "LLM-jp-Toxicity-Dataset", + "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59363 Validation Metrics Loss: 0.12651239335536957 Accuracy: 0.9532079853817648 Precision: 0.9729688278823665 Recall: 0.9744633462616643 AUC: 0.9717333684823413 F1: 0.9737155136027014 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", + "url": "https://huggingface.co./abhishek/autonlp-japanese-sentiment-59363", + "project_name": "autonlp-japanese-sentiment-59363", "downloads": 16, "source": "Hugging Face", - "score": -0.10448840543895638, - "first_commit": "2024-08-07 07:11:08", - "latest_commit": "2024-08-07 07:21:07", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "bert-large-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-luw-upos", - "project_name": "bert-large-japanese-luw-upos", - "downloads": 15, - "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2021-10-26 13:54:17", - "latest_commit": "2022-09-18 19:43:45", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "モデル説明 (model explanation) V1 = MoeDiffusion 1.0 + (HassanBlend 1.5 - VMix03) * 0.2 V2 = MoeDiffusion 0.6 : HassanBlend 1.5 0.2 : VMix03 : 0.2 マージ元のルーツにNAIリークやInsta系モデルが含まれるという噂があるので、NAIリークアンチ・Insta系モデルアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", - "url": "https://huggingface.co./ThePioneer/MoeDiffusionPlusPlus", - "project_name": "MoeDiffusionPlusPlus", - "downloads": 15, - "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-01-19 13:04:02", - "latest_commit": "2023-01-21 02:05:54", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "deberta-base-japanese-juman-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith", - "project_name": "deberta-base-japanese-juman-ud-goeswith", - "downloads": 15, - "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-02-05 06:48:32", - "latest_commit": "2023-05-12 01:16:53", + "score": -0.10623907489794975, + "first_commit": "2021-04-21 11:28:24", + "latest_commit": "2021-05-18 22:56:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-Alpaca-2-13B Japanese-Alpaca-2-13Bは指示実行モデル、フルモデルです。 ", - "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b", - "project_name": "japanese-alpaca-2-13b", + "description": "bert-large-japanese-unidic-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-unidic-luw-upos", + "project_name": "bert-large-japanese-unidic-luw-upos", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-12-20 10:55:29", - "latest_commit": "2023-12-26 11:40:50", + "score": -0.10625368285537272, + "first_commit": "2022-02-13 01:00:41", + "latest_commit": "2023-11-05 18:44:20", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "swallow-hermes-st-v1 物語作成に強めなモデルが出来ないかと考えて作ったモデルです。", - "url": "https://huggingface.co./napopoa32/swallow-hermes-st-v1", - "project_name": "swallow-hermes-st-v1", + "description": "deberta-large-japanese-unidic Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic", + "project_name": "deberta-large-japanese-unidic", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-03-24 06:19:48", - "latest_commit": "2024-03-26 12:36:41", + "score": -0.10625368285537272, + "first_commit": "2022-06-10 12:49:12", + "latest_commit": "2022-06-19 09:15:35", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Oumuamua-7b-instruct-v2-RP nitky/Oumuamua-7b-instruct-v2をロールプレイ用にLoRAでファインチューニングしたモデルです。 ", - "url": "https://huggingface.co./Aratako/Oumuamua-7b-instruct-v2-RP", - "project_name": "Oumuamua-7b-instruct-v2-RP", + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "fasttext-jp-embedding This model is experimental.", + "url": "https://huggingface.co./paulhindemith/fasttext-jp-embedding", + "project_name": "fasttext-jp-embedding", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-06-21 13:51:55", - "latest_commit": "2024-06-23 13:55:42", + "score": -0.10625368285537272, + "first_commit": "2022-11-05 11:14:51", + "latest_commit": "2022-11-16 22:21:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "NikolayKozloff/h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", - "url": "https://huggingface.co./NikolayKozloff/h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", - "project_name": "h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", + "description": "bart-large-japanese This model is converted from the original Japanese BART Pretrained model released by Kyoto University.", + "url": "https://huggingface.co./Formzu/bart-large-japanese", + "project_name": "bart-large-japanese", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-06-24 13:27:54", - "latest_commit": "2024-06-24 13:28:33", + "score": -0.10625368285537272, + "first_commit": "2022-10-31 06:53:19", + "latest_commit": "2022-11-07 12:06:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ChatGLM3-6B是一个中英双语大模型,本项目为ChatGLM3-6B加入日文能力。", - "url": "https://huggingface.co./dummy-foo/ChatGLM3-Japanese", - "project_name": "ChatGLM3-Japanese", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-8.0bpw-h8-exl2", + "project_name": "SambaLingo-Japanese-Chat-8.0bpw-h8-exl2", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-05-31 22:51:11", - "latest_commit": "2024-06-09 15:37:04", + "score": -0.10625368285537272, + "first_commit": "2024-03-07 06:57:50", + "latest_commit": "2024-03-07 07:00:51", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-chat-umievo-itr004-7b", - "url": "https://huggingface.co./umiyuki/Japanese-Chat-Umievo-itr004-7b", - "project_name": "Japanese-Chat-Umievo-itr004-7b", + "description": "DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "url": "https://huggingface.co./DavidAU/alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", + "project_name": "alpaca-guanaco-japanese-gpt-1b-Q8_0-GGUF", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-05-12 11:48:36", - "latest_commit": "2024-05-13 14:08:37", + "score": -0.10625368285537272, + "first_commit": "2024-04-20 08:49:14", + "latest_commit": "2024-04-20 08:49:19", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.39 }, { - "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for Ninja-v1-128k The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 Ninja-128k has the following changes compared to Mistral-7B-v0.1.", - "url": "https://huggingface.co./Local-Novel-LLM-project/Ninja-v1-128k", - "project_name": "Ninja-v1-128k", + "description": "TinySlime-1.1B-Chat-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", + "url": "https://huggingface.co./2121-8/TinySlime-1.1B-Chat-v1.0", + "project_name": "TinySlime-1.1B-Chat-v1.0", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-05-01 02:56:38", - "latest_commit": "2024-05-04 04:07:00", + "score": -0.10625368285537272, + "first_commit": "2024-07-02 03:34:30", + "latest_commit": "2024-07-02 08:53:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.1 }, { - "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", - "url": "https://huggingface.co./AbeShinzo0708/Japanese-Starling-ChatV-7B-exl2", - "project_name": "Japanese-Starling-ChatV-7B-exl2", + "description": "This model is a voice clone of myself created specifically for Style Bert VITS2.", + "url": "https://huggingface.co./ThePioneer/MyVoiceClone-Style-Bert-VITS2", + "project_name": "MyVoiceClone-Style-Bert-VITS2", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-04-22 09:34:13", - "latest_commit": "2024-04-22 09:39:09", + "score": -0.10625368285537272, + "first_commit": "2024-02-29 19:34:12", + "latest_commit": "2024-03-04 10:43:27", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", - "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-6.0bpw-h6-exl2", - "project_name": "SambaLingo-Japanese-Chat-6.0bpw-h6-exl2", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./OrionStarAI/Orion-14B-Chat-Plugin", + "project_name": "Orion-14B-Chat-Plugin", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2024-03-07 06:55:28", - "latest_commit": "2024-03-07 06:57:49", + "score": -0.10625368285537272, + "first_commit": "2024-01-16 12:19:45", + "latest_commit": "2024-03-26 10:12:37", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k elyzaさんが公開しているELYZA-japanese-CodeLlama-7b-instructを 日本語のキャリブレーションセットで生成したGPTQモデルになります。 ", - "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", - "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", + "description": "お知らせ より回答が適切になるように学習させたモデル、https://huggingface.co./hotchpotch/youri-7b-stf-qa-context-jaqket-jsquad-gptq もあります。 ", + "url": "https://huggingface.co./hotchpotch/youri-7b-sft-qa-context-jaqket-gptq", + "project_name": "youri-7b-sft-qa-context-jaqket-gptq", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-11-15 16:33:25", - "latest_commit": "2023-11-16 14:28:39", + "score": -0.10625368285537272, + "first_commit": "2023-12-08 03:51:28", + "latest_commit": "2024-02-25 06:40:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "japanese-gpt2-medium-unidic This is a medium-sized Japanese GPT-2 model using BERT-like tokenizer.", - "url": "https://huggingface.co./okazaki-lab/japanese-gpt2-medium-unidic", - "project_name": "japanese-gpt2-medium-unidic", + "description": "Only for Japanese Please use AutoTokenizer and AutoModelForCausalLM And must use Unifine format to input and output.", + "url": "https://huggingface.co./ganchengguang/USA-7B-instruction-incontext-learning", + "project_name": "USA-7B-instruction-incontext-learning", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-02-27 05:42:22", - "latest_commit": "2023-03-22 06:22:32", + "score": -0.10625368285537272, + "first_commit": "2023-09-13 16:15:06", + "latest_commit": "2023-11-17 16:44:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "fasttext-jp-embedding This model is experimental.", - "url": "https://huggingface.co./paulhindemith/fasttext-jp-embedding", - "project_name": "fasttext-jp-embedding", + "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-base Fine-tuned openai/whisper-base on Japanese using Common Voice, JVS and JSUT.", + "url": "https://huggingface.co./Ivydata/whisper-base-japanese", + "project_name": "whisper-base-japanese", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2022-11-05 11:14:51", - "latest_commit": "2022-11-16 22:21:49", + "score": -0.10625368285537272, + "first_commit": "2023-05-17 04:36:41", + "latest_commit": "2023-06-08 00:17:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "bart-large-japanese This model is converted from the original Japanese BART Pretrained model released by Kyoto University.", - "url": "https://huggingface.co./Formzu/bart-large-japanese", - "project_name": "bart-large-japanese", + "description": "このモデルはdeberta-v2-tiny-japaneseをファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", + "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-japanese-tiny-finetuned-commonsenseqa", + "project_name": "deberta-v2-japanese-tiny-finetuned-commonsenseqa", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2022-10-31 06:53:19", - "latest_commit": "2022-11-07 12:06:32", + "score": -0.10625368285537272, + "first_commit": "2023-05-11 10:28:33", + "latest_commit": "2023-05-26 15:01:57", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "friendly_JA-Model (T5 fine-tuned model) MT model trained using the friendly_JA Corpus attempting to make Japanese easier/more accessible to occidental people by using the Latin/English derived katakana lexicon instead of the standard Sino-Japanese lexicon Examples input output 最適化を応用した機械翻訳モデルは高精度だ オプティマイゼーションを応用したマシントランスレーションモデルは高いアキュラシーだ 彼は架空の世界に住んでいる 彼はイマジナリー世界に住んでいる 新型コロナウイルスに感染してしまった コロナウイルスにかかってしまった 深層学習は難しい ディープラーニングはむずかしい 新たな概念を紹介する 新しいコンセプトを紹介する 津波の警報が流れた ツナミのアラートが流れた 南海トラフの災害は震源地による 南海トラフのディザスターはエピ", - "url": "https://huggingface.co./astremo/friendly_JA", - "project_name": "friendly_JA", + "description": "deberta-base-japanese-juman-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-juman-ud-goeswith", + "project_name": "deberta-base-japanese-juman-ud-goeswith", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2022-01-10 06:31:18", - "latest_commit": "2022-05-22 14:57:21", + "score": -0.10625368285537272, + "first_commit": "2023-02-05 06:48:32", + "latest_commit": "2023-05-12 01:16:53", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "東方トカマクラブ データセット 概要 このデータセットは、東方Projectのトカマクラブに関する情報を収集したものです。", - "url": "https://huggingface.co./datasets/MakiAi/Tokama_Club_QA", - "project_name": "Tokama_Club_QA", + "description": "bert-large-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-luw-upos", + "project_name": "bert-large-japanese-luw-upos", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-12-20 15:37:51", - "latest_commit": "2023-12-20 15:46:02", + "score": -0.10625368285537272, + "first_commit": "2021-10-26 13:54:17", + "latest_commit": "2022-09-18 19:43:45", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { "description": "Dataset Details Dataset Type:Japanese LLaVA Pretrain is a localized version of the original LLaVA Pretrain dataset.", @@ -11606,167 +12263,220 @@ "project_name": "LLaVA-Pretrain-JA", "downloads": 15, "source": "Hugging Face", - "score": -0.10450254556189793, + "score": -0.10625368285537272, "first_commit": "2024-04-10 05:07:24", "latest_commit": "2024-04-12 09:15:37", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Scenery of japan.", - "url": "https://huggingface.co./datasets/JapanDegitalMaterial/Scenery_of_japan", - "project_name": "Scenery_of_japan", - "downloads": 15, + "description": "roberta-base-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-luw-upos", + "project_name": "roberta-base-japanese-luw-upos", + "downloads": 14, "source": "Hugging Face", - "score": -0.10450254556189793, - "first_commit": "2023-09-23 11:08:44", - "latest_commit": "2023-09-23 14:32:48", + "score": -0.10626829081279571, + "first_commit": "2021-12-21 00:41:00", + "latest_commit": "2022-09-18 19:44:22", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", - "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-300m-japanese", - "project_name": "wav2vec2-xls-r-300m-japanese", + "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-japanese-generator", + "project_name": "electra-small-japanese-generator", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2022-01-26 15:43:02", - "latest_commit": "2022-03-23 18:34:20", + "score": -0.10626829081279571, + "first_commit": "2021-10-04 13:43:37", + "latest_commit": "2023-10-21 13:21:28", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.013800000000000002 }, { - "description": "roberta-large-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora", - "project_name": "roberta-large-japanese-aozora", + "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-japanese-fin-generator", + "project_name": "electra-small-japanese-fin-generator", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2021-12-26 13:08:52", - "latest_commit": "2022-10-15 14:22:11", + "score": -0.10626829081279571, + "first_commit": "2021-10-04 14:07:16", + "latest_commit": "2023-10-21 13:21:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.013800000000000002 }, { - "description": "roberta-base-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-luw-upos", - "project_name": "roberta-base-japanese-luw-upos", + "description": "deberta-small-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-aozora", + "project_name": "deberta-small-japanese-aozora", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2021-12-21 00:41:00", - "latest_commit": "2022-09-18 19:44:22", + "score": -0.10626829081279571, + "first_commit": "2022-05-23 04:58:53", + "latest_commit": "2023-01-15 15:25:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "roberta-base-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora", - "project_name": "roberta-base-japanese-aozora", + "description": "japanese-gpt2-medium-unidic This is a medium-sized Japanese GPT-2 model using BERT-like tokenizer.", + "url": "https://huggingface.co./okazaki-lab/japanese-gpt2-medium-unidic", + "project_name": "japanese-gpt2-medium-unidic", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2021-12-21 00:04:03", - "latest_commit": "2022-10-15 14:20:11", + "score": -0.10626829081279571, + "first_commit": "2023-02-27 05:42:22", + "latest_commit": "2023-03-22 06:22:32", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.362 }, { - "description": "bert-large-japanese-unidic-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-large-japanese-unidic-luw-upos", - "project_name": "bert-large-japanese-unidic-luw-upos", + "description": "日本語でtrainingしたllama2 model size: 130.78M trainingは以下のscript参照 https://github.com/Lightning-AI/lit-gpt/tree/main use from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained(\"if001/sentencepiece_ja\", trust_remote_code=True) model = AutoModelForCausalLM.from_pretrained(\"if001/llama2_ja_ss\")", + "url": "https://huggingface.co./if001/llama2_ja_ss", + "project_name": "llama2_ja_ss", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2022-02-13 01:00:41", - "latest_commit": "2023-11-05 18:44:20", + "score": -0.10626829081279571, + "first_commit": "2023-10-16 04:12:34", + "latest_commit": "2023-10-16 13:49:48", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-generator", - "project_name": "electra-small-japanese-generator", + "description": "Chat & support: TheBloke's Discord server Want to contribute?", + "url": "https://huggingface.co./TheBloke/japanese-stablelm-instruct-beta-7B-AWQ", + "project_name": "japanese-stablelm-instruct-beta-7B-AWQ", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2021-10-04 13:43:37", - "latest_commit": "2023-10-21 13:21:28", + "score": -0.10626829081279571, + "first_commit": "2023-11-03 01:04:31", + "latest_commit": "2023-11-09 18:16:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.13 }, { - "description": "ELECTRA small Japanese finance generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-fin-generator", - "project_name": "electra-small-japanese-fin-generator", + "description": "This model is the fine-tuned version of Helsinki-NLP/opus-mt-ja-en on bsd_ja_en dataset.", + "url": "https://huggingface.co./minkhantycc/translation-en-ja", + "project_name": "translation-en-ja", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2021-10-04 14:07:16", - "latest_commit": "2023-10-21 13:21:23", + "score": -0.10626829081279571, + "first_commit": "2024-02-08 08:08:41", + "latest_commit": "2024-03-20 05:41:04", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.07529999999999999 }, { - "description": "deberta-large-japanese-unidic Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-unidic", - "project_name": "deberta-large-japanese-unidic", + "description": "更新情報 日本語機能とinstructベクトルのバランス調整したver.2をアップロードしましたSwallow-MX-8x7b-NVE-chatvector-Mixtral-instruct-v2 モデル概要 Swallow-MX-8x7b-NVE-v0.1に対し、 Mixtral-8x7B-Instruct-v0.1とMixtral-8x7B-v0.1の差分をマージしたモデルです。 ", + "url": "https://huggingface.co./aixsatoshi/Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct", + "project_name": "Swallow-MX-8x7b-NVE-chatvector-Mixtral-instruct", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2022-06-10 12:49:12", - "latest_commit": "2022-06-19 09:15:35", + "score": -0.10626829081279571, + "first_commit": "2024-03-20 16:15:26", + "latest_commit": "2024-03-23 04:14:49", + "languages": [], + "model_or_dataset": "model", + "model_size": 46.7 + }, + { + "description": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged Mixtral-8x7B-Instruct-v0.1-japanese-alpha-mergedはMixtral-8x7B-Instruct-v0.1をベースに日本語の語彙拡張継続事前学習を実施した学習途中のモデルに対して、差分マージを実施したモデルです。", + "url": "https://huggingface.co./abeja/Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", + "project_name": "Mixtral-8x7B-Instruct-v0.1-japanese-alpha-merged", + "downloads": 14, + "source": "Hugging Face", + "score": -0.10626829081279571, + "first_commit": "2024-04-16 07:54:14", + "latest_commit": "2024-04-20 09:14:59", + "languages": [], + "model_or_dataset": "model", + "model_size": 46.9 + }, + { + "description": "WS TCG Card Text Translator A Japanese-English machine translation model specifically trained for translating card text from the Weiss Schwarz (WS) Trading Card Game, fine-tuned on Helsinki-NLP/opus-mt-ja-en.", + "url": "https://huggingface.co./eepj/wstcg-mt-ja-en", + "project_name": "wstcg-mt-ja-en", + "downloads": 14, + "source": "Hugging Face", + "score": -0.10626829081279571, + "first_commit": "2024-04-30 13:20:20", + "latest_commit": "2024-05-22 02:45:55", + "languages": [], + "model_or_dataset": "model", + "model_size": 0.07529999999999999 + }, + { + "description": "Assistance のGGUF版 Our Models for GGUF Vecteus-GGUF Ninja-v1-GGUF Ninja-v1-NSFW-GGUF Ninja-v1-128k-GGUF Ninja-v1-NSFW-128k-GGUF", + "url": "https://huggingface.co./Local-Novel-LLM-project/Assistance-GGUF", + "project_name": "Assistance-GGUF", + "downloads": 14, + "source": "Hugging Face", + "score": -0.10626829081279571, + "first_commit": "2024-05-03 04:03:47", + "latest_commit": "2024-05-03 04:30:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Overview This model is based on rinna's [rinna/llama-3-youko-8b], fine-tuned using LoRA on a small number of parallel sentences from English to Japanese.", - "url": "https://huggingface.co./lyu-boxuan/llama-3-youko-8b-En-Ja-MT-LoRA", - "project_name": "llama-3-youko-8b-En-Ja-MT-LoRA", + "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したTokara-0.5B-v0.1にchat vectorで対話能力を加えたモデルになります。 ", + "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-Chat-v0.1", + "project_name": "Tokara-0.5B-Chat-v0.1", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-05-10 14:33:57", - "latest_commit": "2024-05-21 14:54:46", + "score": -0.10626829081279571, + "first_commit": "2024-05-06 15:47:55", + "latest_commit": "2024-05-08 13:30:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.464 }, { - "description": "kurogane/Llama3-BioYouri-8B-mergetest このモデルは生物学・医学に精通したOpenBioLLM-8Bをベースに、日本語対応を向上させるためにLlama-3-youko-8b-instruct-chatvectorとマージさせたモデルです。 ", - "url": "https://huggingface.co./kurogane/Llama3-BioYouri-8B-instruct-chatvector-mergetest", - "project_name": "Llama3-BioYouri-8B-instruct-chatvector-mergetest", + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-8B-shisa-2x8B", + "project_name": "Llama-3-8B-shisa-2x8B", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-05-10 08:34:11", - "latest_commit": "2024-05-21 12:53:33", + "score": -0.10626829081279571, + "first_commit": "2024-05-24 18:58:48", + "latest_commit": "2024-06-11 07:41:22", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Llama-3-Nymeria-ELYZA-8B Experimental merge between a Llama 3 model that has had continued pre-training with Japanese data and a regular RP model to see how well it keeps its Japanese capability and RP capability.", - "url": "https://huggingface.co./mpasila/Llama-3-Nymeria-ELYZA-8B", - "project_name": "Llama-3-Nymeria-ELYZA-8B", + "description": "Style-Bert-VITS2 Japanese Only Sakura Miko こちらは「さくらみこ」の音声データセットに基づいて学習されたVITS-TTSモデルです。 ", + "url": "https://huggingface.co./Lycoris53/style-bert-vits2-sakura-miko", + "project_name": "style-bert-vits2-sakura-miko", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-07-17 15:02:32", - "latest_commit": "2024-07-17 15:11:40", + "score": -0.10626829081279571, + "first_commit": "2024-05-27 14:58:38", + "latest_commit": "2024-05-28 03:02:14", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Assistance のGGUF版 Our Models for GGUF Vecteus-GGUF Ninja-v1-GGUF Ninja-v1-NSFW-GGUF Ninja-v1-128k-GGUF Ninja-v1-NSFW-128k-GGUF", - "url": "https://huggingface.co./Local-Novel-LLM-project/Assistance-GGUF", - "project_name": "Assistance-GGUF", + "description": "NikolayKozloff/h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", + "url": "https://huggingface.co./NikolayKozloff/h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", + "project_name": "h2o-Llama-3-8B-Japanese-Instruct-Q8_0-GGUF", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-05-03 04:03:47", - "latest_commit": "2024-05-03 04:30:45", + "score": -0.10626829081279571, + "first_commit": "2024-06-24 13:27:54", + "latest_commit": "2024-06-24 13:28:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "This is a BERT Base model for emotion analysis in Japanese additionally fine-tuned for emotion detection and classification.", @@ -11774,59 +12484,64 @@ "project_name": "bert-base-japanese-emotion-lily", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, + "score": -0.10626829081279571, "first_commit": "2024-04-25 06:05:51", "latest_commit": "2024-06-17 01:44:16", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.111 }, { - "description": "WS TCG Card Text Translator A Japanese-English machine translation model specifically trained for translating card text from the Weiss Schwarz (WS) Trading Card Game, fine-tuned on Helsinki-NLP/opus-mt-ja-en.", - "url": "https://huggingface.co./eepj/wstcg-mt-ja-en", - "project_name": "wstcg-mt-ja-en", + "description": "output 筑波 2.0035860538482666 つくば 1.6586617231369019 研究 1.6227693557739258 大学 1.3798155784606934 実験 0.5522942543029785 学生 0.42351895570755005 分析 0.37844282388687134 国立 0.3685397505760193 キャンパス 0.36495038866996765 茨城 0.3056415021419525 科学 0.2876652181148529 関東 0.24301066994667053 地域 0.21340851485729218 実施 0.1976248174905777 先端 0.192025288939476 サイト 0.11629197001457214 調査 0.09159307181835175 プロジェクト 0.08552580326795578 議論 0.07484486699104309 検討 0.007034890353679657", + "url": "https://huggingface.co./aken12/splade-japanese-efficient", + "project_name": "splade-japanese-efficient", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-04-30 13:20:20", - "latest_commit": "2024-05-22 02:45:55", + "score": -0.10626829081279571, + "first_commit": "2024-03-11 03:02:28", + "latest_commit": "2024-03-16 16:27:35", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model is the fine-tuned version of Helsinki-NLP/opus-mt-ja-en on bsd_ja_en dataset.", - "url": "https://huggingface.co./minkhantycc/translation-en-ja", - "project_name": "translation-en-ja", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./zaq-hack/Orion-14B-LongChat-bpw600-h6-exl2", + "project_name": "Orion-14B-LongChat-bpw600-h6-exl2", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-02-08 08:08:41", - "latest_commit": "2024-03-20 05:41:04", + "score": -0.10626829081279571, + "first_commit": "2024-01-25 16:58:27", + "latest_commit": "2024-01-25 21:01:29", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "The English document is here モデル概要 Watashiha-Llama-2-13B-Ogiri-sftをAWSのinf2インスタンスで動作するようにコンパイルされたモデルです。 ", - "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft-neuron", - "project_name": "Watashiha-Llama-2-13B-Ogiri-sft-neuron", + "description": "Orion-14B 🌐English | 🇨", + "url": "https://huggingface.co./sosoai/Orion-14B-Chat-safetensors", + "project_name": "Orion-14B-Chat-safetensors", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-01-24 19:05:34", - "latest_commit": "2024-02-02 06:39:21", + "score": -0.10626829081279571, + "first_commit": "2024-01-24 23:44:38", + "latest_commit": "2024-01-25 02:08:59", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.5 }, { - "description": "Superswallow-70b-v0.1 Known Performance Issues Two potential bugs have been found in this model: NEED repetition_penalty NEED high temperature Reference: Japanese LLM benchmark results at Nejumi LLM Leaderboad Neo", - "url": "https://huggingface.co./nitky/Superswallow-70b-v0.1", - "project_name": "Superswallow-70b-v0.1", + "description": "The English document is here モデル概要 Watashiha-Llama-2-13B-Ogiri-sftをAWSのinf2インスタンスで動作するようにコンパイルされたモデルです。 ", + "url": "https://huggingface.co./watashiha/Watashiha-Llama-2-13B-Ogiri-sft-neuron", + "project_name": "Watashiha-Llama-2-13B-Ogiri-sft-neuron", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-01-12 03:54:16", - "latest_commit": "2024-01-20 18:18:09", + "score": -0.10626829081279571, + "first_commit": "2024-01-24 19:05:34", + "latest_commit": "2024-02-02 06:39:21", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "◆REV-Mix \"レボリューション\"なモデルです。 ", @@ -11834,11 +12549,12 @@ "project_name": "REV-Mix", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, + "score": -0.10626829081279571, "first_commit": "2023-08-06 17:04:53", "latest_commit": "2023-08-26 16:19:02", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "このモデルはcl-tohoku/bert-large-japanese-v2をファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", @@ -11846,11 +12562,25 @@ "project_name": "bert-large-japanese-v2-finetuned-commonsenseQA", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, + "score": -0.10626829081279571, "first_commit": "2023-05-26 14:33:37", "latest_commit": "2023-05-26 15:02:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "distilhubert-ft-japanese-50k Fine-tuned (more precisely, continue trained)", + "url": "https://huggingface.co./TylorShine/distilhubert-ft-japanese-50k", + "project_name": "distilhubert-ft-japanese-50k", + "downloads": 14, + "source": "Hugging Face", + "score": -0.10626829081279571, + "first_commit": "2023-04-20 17:51:47", + "latest_commit": "2023-04-21 18:00:04", + "languages": [], + "model_or_dataset": "model", + "model_size": null }, { "description": "Model Trained Using AutoNLP Problem type: Binary Classification Model ID: 59362 Validation Metrics Loss: 0.13092292845249176 Accuracy: 0.9527127414314258 Precision: 0.9634070704982427 Recall: 0.9842171959602166 AUC: 0.9667289746092403 F1: 0.9737009564152002 Usage You can use cURL to access this model: $ curl -X POST -H \"Authorization: Bearer YOUR_API_KEY\" -H \"Content-Type: application/json\" -d '{\"inputs\": \"I love AutoNLP\"}' https://api-inference.huggingface.co/models/abhishek/autonlp-japanese-sentiment-5936", @@ -11858,11 +12588,12 @@ "project_name": "autonlp-japanese-sentiment-59362", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, + "score": -0.10626829081279571, "first_commit": "2021-04-21 11:28:11", "latest_commit": "2021-05-18 22:55:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "GitHub リポジトリ cl-tohoku/quiz-datasets で公開されているデータセットを利用しています。 ", @@ -11870,155 +12601,259 @@ "project_name": "aio-passages", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, + "score": -0.10626829081279571, "first_commit": "2023-06-06 02:03:34", "latest_commit": "2023-06-24 05:55:37", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "OpenMathInstruct-1 を日本語に自動翻訳した商用利用可能な180万件の指示チューニングデータセットになります。 ", - "url": "https://huggingface.co./datasets/kunishou/OpenMathInstruct-1-1.8m-ja", - "project_name": "OpenMathInstruct-1-1.8m-ja", + "description": "固有表現ラベルはllm-book/ner-wikipedia-datasetと同様のものを採用しており、全部で8種類 (人名、法人名、地名、製品名、政治的組織名、施設名、その他の組織名、イベント名)あります。 ", + "url": "https://huggingface.co./datasets/llm-book/ner-wikinews-dataset", + "project_name": "ner-wikinews-dataset", "downloads": 14, "source": "Hugging Face", - "score": -0.10451668568483949, - "first_commit": "2024-02-23 16:31:34", - "latest_commit": "2024-02-24 18:29:28", + "score": -0.10626829081279571, + "first_commit": "2023-04-22 14:32:21", + "latest_commit": "2023-12-12 11:22:26", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "Ninja-v1-RP-WIP 概要 Local-Novel-LLM-project/Ninja-v1-NSFWをロールプレイ用にLoRAでファインチューニングしたモデルです。 ", - "url": "https://huggingface.co./Aratako/Ninja-v1-RP-WIP", - "project_name": "Ninja-v1-RP-WIP", + "description": "roberta-large-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-aozora", + "project_name": "roberta-large-japanese-aozora", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-05-19 15:31:02", - "latest_commit": "2024-05-20 16:56:00", + "score": -0.1062828987702187, + "first_commit": "2021-12-26 13:08:52", + "latest_commit": "2022-10-15 14:22:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/bert-small-japanese-fin", - "project_name": "bert-small-japanese-fin", + "description": "roberta-base-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-base-japanese-aozora", + "project_name": "roberta-base-japanese-aozora", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2021-10-04 13:15:37", - "latest_commit": "2022-12-09 00:41:24", + "score": -0.1062828987702187, + "first_commit": "2021-12-21 00:04:03", + "latest_commit": "2022-10-15 14:20:11", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-6.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-6.0bpw-h6-exl2", + "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", + "url": "https://huggingface.co./yohida/yoshida_gpt", + "project_name": "yoshida_gpt", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2023-10-28 20:36:54", - "latest_commit": "2023-10-28 15:37:11", + "score": -0.1062828987702187, + "first_commit": "2022-02-04 10:03:54", + "latest_commit": "2022-02-04 10:13:45", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese Stable LM Instruct Gamma 7B +", - "url": "https://huggingface.co./ohwi/japanese-stablelm-instruct-gamma-7b-dpo-uf-v1", - "project_name": "japanese-stablelm-instruct-gamma-7b-dpo-uf-v1", + "description": "deberta-large-japanese-wikipedia-ud-goeswith Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-goeswith", + "project_name": "deberta-large-japanese-wikipedia-ud-goeswith", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-03-09 16:47:29", - "latest_commit": "2024-03-21 14:33:07", + "score": -0.1062828987702187, + "first_commit": "2022-09-18 08:41:06", + "latest_commit": "2023-05-12 01:29:13", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Style-Bert-VITS2 Japanese Only Sakura Miko こちらは「さくらみこ」の音声データセットに基づいて学習されたVITS-TTSモデルです。 ", - "url": "https://huggingface.co./Lycoris53/style-bert-vits2-sakura-miko", - "project_name": "style-bert-vits2-sakura-miko", + "description": "TaCOMET_ja", + "url": "https://huggingface.co./nlp-waseda/tacomet-gpt2-xl-japanese", + "project_name": "tacomet-gpt2-xl-japanese", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-05-27 14:58:38", - "latest_commit": "2024-05-28 03:02:14", + "score": -0.1062828987702187, + "first_commit": "2024-03-07 11:50:15", + "latest_commit": "2024-06-05 09:41:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Chatvector-llava-v1.5-plus-Houou-v3-7b Model Card Model Details ※好奇心から生まれたモデルです。", - "url": "https://huggingface.co./shinyice/chatvector-llava-v1.5-plus-houou-v3-7b", - "project_name": "chatvector-llava-v1.5-plus-houou-v3-7b", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-6.0bpw-h6-exl2", + "project_name": "SambaLingo-Japanese-Chat-6.0bpw-h6-exl2", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-06-04 04:24:06", - "latest_commit": "2024-06-04 05:12:10", + "score": -0.1062828987702187, + "first_commit": "2024-03-07 06:55:28", + "latest_commit": "2024-03-07 06:57:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "TinySlime-1.1B-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", - "url": "https://huggingface.co./2121-8/TinySlime-1.1B-v1.0", - "project_name": "TinySlime-1.1B-v1.0", + "description": "SambaLingo-Japanese-Chat SambaLingo-Japanese-Chat is a human aligned chat model trained in Japanese and English.", + "url": "https://huggingface.co./LoneStriker/SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", + "project_name": "SambaLingo-Japanese-Chat-3.0bpw-h6-exl2", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-06-14 23:46:31", - "latest_commit": "2024-07-02 03:58:25", + "score": -0.1062828987702187, + "first_commit": "2024-03-07 06:50:19", + "latest_commit": "2024-03-07 06:51:42", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "gpt2-medium-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-medium-japanese-upos", - "project_name": "gpt2-medium-japanese-upos", + "description": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1の量子化済みGGUF版です。", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF", + "project_name": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-06-22 22:39:14", - "latest_commit": "2024-07-27 07:49:41", + "score": -0.1062828987702187, + "first_commit": "2024-03-06 15:44:10", + "latest_commit": "2024-03-07 13:23:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 11.1 + }, + { + "description": "swallow-hermes-st-v1 物語作成に強めなモデルが出来ないかと考えて作ったモデルです。", + "url": "https://huggingface.co./napopoa32/swallow-hermes-st-v1", + "project_name": "swallow-hermes-st-v1", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-03-24 06:19:48", + "latest_commit": "2024-03-26 12:36:41", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.33 + }, + { + "description": "Japanese-Starling-ChatV-7B このモデルは\"chatntq-ja-7b-v1.0\"をベースにした7Bパラメータの日本語チャットモデルです。", + "url": "https://huggingface.co./AbeShinzo0708/Japanese-Starling-ChatV-7B-exl2", + "project_name": "Japanese-Starling-ChatV-7B-exl2", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-04-22 09:34:13", + "latest_commit": "2024-04-22 09:39:09", + "languages": [], + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Our Models Vecteus Ninja-v1 Ninja-v1-NSFW Ninja-v1-128k Ninja-v1-NSFW-128k Model Card for Ninja-v1-128k The Mistral-7B--based Large Language Model (LLM) is an noveldataset fine-tuned version of the Mistral-7B-v0.1 Ninja-128k has the following changes compared to Mistral-7B-v0.1.", + "url": "https://huggingface.co./Local-Novel-LLM-project/Ninja-v1-128k", + "project_name": "Ninja-v1-128k", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-05-01 02:56:38", + "latest_commit": "2024-05-04 04:07:00", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 + }, + { + "description": "japanese-chat-umievo-itr004-7b", + "url": "https://huggingface.co./umiyuki/Japanese-Chat-Umievo-itr004-7b", + "project_name": "Japanese-Chat-Umievo-itr004-7b", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-05-12 11:48:36", + "latest_commit": "2024-05-13 14:08:37", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "gpt2-small-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-small-japanese-upos", - "project_name": "gpt2-small-japanese-upos", + "description": "モデルの説明(English explanation is below.", + "url": "https://huggingface.co./keitokei1994/Llama-3-ELYZA-hermes-2x8B", + "project_name": "Llama-3-ELYZA-hermes-2x8B", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-06-26 15:11:08", + "latest_commit": "2024-06-27 04:00:37", + "languages": [], + "model_or_dataset": "model", + "model_size": 13.7 + }, + { + "description": "Oumuamua-7b-instruct-v2-RP nitky/Oumuamua-7b-instruct-v2をロールプレイ用にLoRAでファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./Aratako/Oumuamua-7b-instruct-v2-RP", + "project_name": "Oumuamua-7b-instruct-v2-RP", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-06-21 13:51:55", + "latest_commit": "2024-06-23 13:55:42", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.33 + }, + { + "description": "Sarashina2-7B Instruct sarashina2-7Bを会話できるようにフルファインチューニングしたものです。", + "url": "https://huggingface.co./alfredplpl/sarashina2-7b-it", + "project_name": "sarashina2-7b-it", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2024-06-12 02:24:28", + "latest_commit": "2024-06-12 03:00:35", + "languages": [], + "model_or_dataset": "model", + "model_size": 7.32 + }, + { + "description": "Japanese-Alpaca-2-13B Japanese-Alpaca-2-13Bは指示実行モデル、フルモデルです。 ", + "url": "https://huggingface.co./owner203/japanese-alpaca-2-13b", + "project_name": "japanese-alpaca-2-13b", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-06-22 22:28:58", - "latest_commit": "2024-07-27 07:49:34", + "score": -0.1062828987702187, + "first_commit": "2023-12-20 10:55:29", + "latest_commit": "2023-12-26 11:40:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "TaCOMET_ja", - "url": "https://huggingface.co./nlp-waseda/tacomet-gpt2-xl-japanese", - "project_name": "tacomet-gpt2-xl-japanese", + "description": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k elyzaさんが公開しているELYZA-japanese-CodeLlama-7b-instructを 日本語のキャリブレーションセットで生成したGPTQモデルになります。", + "url": "https://huggingface.co./mmnga/ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", + "project_name": "ELYZA-japanese-CodeLlama-7b-instruct-GPTQ-calib-ja-1k", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-03-07 11:50:15", - "latest_commit": "2024-06-05 09:41:05", + "score": -0.1062828987702187, + "first_commit": "2023-11-15 16:33:25", + "latest_commit": "2023-11-16 14:28:39", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-small-japanese-aozora Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-small-japanese-aozora", - "project_name": "deberta-small-japanese-aozora", + "description": "deberta-large-japanese-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-luw-upos", + "project_name": "deberta-large-japanese-luw-upos", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2022-05-23 04:58:53", - "latest_commit": "2023-01-15 15:25:14", + "score": -0.1062828987702187, + "first_commit": "2022-05-26 14:52:32", + "latest_commit": "2023-01-14 23:15:30", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Details: https://spacy.io/models/ja#ja_core_news_trf Japanese transformer pipeline (Transformer(name='cl-tohoku/bert-base-japanese-char-v2', piece_encoder='char', stride=160, type='bert', width=768, window=216, vocab_size=6144)).", @@ -12026,47 +12861,64 @@ "project_name": "ja_core_news_trf", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, + "score": -0.1062828987702187, "first_commit": "2021-11-09 16:58:01", "latest_commit": "2023-10-10 06:27:03", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-1b on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", - "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-1b-japanese-hiragana-katakana", - "project_name": "wav2vec2-xls-r-1b-japanese-hiragana-katakana", + "description": "BERT small Japanese finance This is a BERT model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/bert-small-japanese-fin", + "project_name": "bert-small-japanese-fin", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2022-02-04 11:27:09", - "latest_commit": "2022-03-24 11:56:32", + "score": -0.1062828987702187, + "first_commit": "2021-10-04 13:15:37", + "latest_commit": "2022-12-09 00:41:24", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "HF Datasets version of Tanaka Corpus.", - "url": "https://huggingface.co./datasets/hpprc/tanaka-corpus", - "project_name": "tanaka-corpus", + "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-300m on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", + "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-300m-japanese", + "project_name": "wav2vec2-xls-r-300m-japanese", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2024-03-21 01:49:50", - "latest_commit": "2024-03-21 12:50:28", + "score": -0.1062828987702187, + "first_commit": "2022-01-26 15:43:02", + "latest_commit": "2022-03-23 18:34:20", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Dataset Description This is the Japanese Translation version of sciq.", - "url": "https://huggingface.co./datasets/izumi-lab/sciq-ja-mbartm2m", - "project_name": "sciq-ja-mbartm2m", + "description": "This model is a fine-tuned version of facebook/wav2vec2-xls-r-1b on the MOZILLA-FOUNDATION/COMMON_VOICE_8_0 - JA dataset.", + "url": "https://huggingface.co./AndrewMcDowell/wav2vec2-xls-r-1b-japanese-hiragana-katakana", + "project_name": "wav2vec2-xls-r-1b-japanese-hiragana-katakana", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, - "first_commit": "2023-05-19 02:03:47", - "latest_commit": "2023-05-19 03:54:18", + "score": -0.1062828987702187, + "first_commit": "2022-02-04 11:27:09", + "latest_commit": "2022-03-24 11:56:32", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "東方トカマクラブ データセット 概要 このデータセットは、東方Projectのトカマクラブに関する情報を収集したものです。", + "url": "https://huggingface.co./datasets/MakiAi/Tokama_Club_QA", + "project_name": "Tokama_Club_QA", + "downloads": 13, + "source": "Hugging Face", + "score": -0.1062828987702187, + "first_commit": "2023-12-20 15:37:51", + "latest_commit": "2023-12-20 15:46:02", + "languages": [], + "model_or_dataset": "dataset", + "model_size": null }, { "description": "It covers multiple fields such as tourism, medical treatment, daily life, news, etc. ", @@ -12074,11 +12926,12 @@ "project_name": "English-Japanese_Parallel_Corpus_Data", "downloads": 13, "source": "Hugging Face", - "score": -0.10453082580778104, + "score": -0.1062828987702187, "first_commit": "2023-11-08 10:47:40", "latest_commit": "2024-08-05 03:14:27", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Model trained on 800,000 Japanese sentences after reducing oshizo/japanese-e5-mistral-7b_slerp to 8 layers.", @@ -12086,119 +12939,116 @@ "project_name": "japanese-e5-mistral-1.9b", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, + "score": -0.10629750672764168, "first_commit": "2024-02-02 12:39:11", "latest_commit": "2024-02-03 00:28:28", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-generator", - "project_name": "electra-small-paper-japanese-generator", - "downloads": 12, - "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2021-10-04 13:47:24", - "latest_commit": "2023-10-21 13:21:31", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.88 }, { - "description": "japanese-gpt-1b This repository provides a 1.3B-parameter Japanese GPT model.", - "url": "https://huggingface.co./yohida/yoshida_gpt", - "project_name": "yoshida_gpt", + "description": "roberta-large-japanese-char-luw-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-char-luw-upos", + "project_name": "roberta-large-japanese-char-luw-upos", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2022-02-04 10:03:54", - "latest_commit": "2022-02-04 10:13:45", + "score": -0.10629750672764168, + "first_commit": "2021-12-30 15:56:46", + "latest_commit": "2022-09-18 19:44:49", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", - "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese", - "project_name": "wav2vec2-large-xlsr-japanese", + "description": "ELECTRA small Japanese generator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-paper-japanese-generator", + "project_name": "electra-small-paper-japanese-generator", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2021-03-28 04:21:20", - "latest_commit": "2023-02-08 00:15:23", + "score": -0.10629750672764168, + "first_commit": "2021-10-04 13:47:24", + "latest_commit": "2023-10-21 13:21:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.00491 }, { - "description": "ESを書くAI Japanese GPT-2 modelをファインチューニングしました。 ", - "url": "https://huggingface.co./huranokuma/es_IT", - "project_name": "es_IT", + "description": "ELECTRA small Japanese finance discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", + "url": "https://huggingface.co./izumi-lab/electra-small-japanese-fin-discriminator", + "project_name": "electra-small-japanese-fin-discriminator", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2022-08-13 06:48:58", - "latest_commit": "2022-08-14 05:47:05", + "score": -0.10629750672764168, + "first_commit": "2021-10-04 14:06:48", + "latest_commit": "2022-12-09 00:42:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Barba Barba is a multilingual natural language inference model for textual entailment and zero-shot text classification, available as an end-to-end service through TensorFlow Serving.", - "url": "https://huggingface.co./hyperonym/barba", - "project_name": "barba", + "description": "deberta-base-japanese-unidic Model Description", + "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic", + "project_name": "deberta-base-japanese-unidic", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-04-29 06:27:12", - "latest_commit": "2023-04-29 13:45:12", + "score": -0.10629750672764168, + "first_commit": "2022-06-08 08:05:33", + "latest_commit": "2022-06-18 23:02:31", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-8.0bpw-h6-exl2", + "description": "Model Card Summary This model was trained using H2O LLM Studio.", + "url": "https://huggingface.co./yukismd/JapaneseQuizChatbot_v1", + "project_name": "JapaneseQuizChatbot_v1", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-10-28 20:43:59", - "latest_commit": "2023-10-28 15:44:20", + "score": -0.10629750672764168, + "first_commit": "2023-06-08 00:25:01", + "latest_commit": "2023-06-08 00:48:50", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-4.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-4.0bpw-h6-exl2", + "description": "Superswallow-70b-v0.1 Known Performance Issues Two potential bugs have been found in this model: NEED repetition_penalty NEED high temperature Reference: Japanese LLM benchmark results at Nejumi LLM Leaderboad Neo", + "url": "https://huggingface.co./nitky/Superswallow-70b-v0.1", + "project_name": "Superswallow-70b-v0.1", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-10-28 20:23:03", - "latest_commit": "2023-10-28 15:23:16", + "score": -0.10629750672764168, + "first_commit": "2024-01-12 03:54:16", + "latest_commit": "2024-01-20 18:18:09", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 69.2 }, { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-6.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-6.0bpw-h6-exl2", + "description": "モデルについて Qwen/Qwen1.5-0.5Bを日英データ5Bトークンで継続事前学習したモデルです。 ", + "url": "https://huggingface.co./Kendamarron/Tokara-0.5B-v0.1", + "project_name": "Tokara-0.5B-v0.1", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-12-07 18:14:46", - "latest_commit": "2023-12-07 18:58:23", + "score": -0.10629750672764168, + "first_commit": "2024-05-06 11:39:26", + "latest_commit": "2024-05-08 12:44:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.464 }, { - "description": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF 概要 Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1の量子化済みGGUF版です。", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF", - "project_name": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1-GGUF", + "description": "kurogane/Llama3-BioYouri-8B-mergetest このモデルは生物学・医学に精通したOpenBioLLM-8Bをベースに、日本語対応を向上させるためにLlama-3-youko-8b-instruct-chatvectorとマージさせたモデルです。 ", + "url": "https://huggingface.co./kurogane/Llama3-BioYouri-8B-instruct-chatvector-mergetest", + "project_name": "Llama3-BioYouri-8B-instruct-chatvector-mergetest", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2024-03-06 15:44:10", - "latest_commit": "2024-03-07 13:23:01", + "score": -0.10629750672764168, + "first_commit": "2024-05-10 08:34:11", + "latest_commit": "2024-05-21 12:53:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 8.03 }, { "description": "モデル ベースモデル:microsoft/Phi-3-mini-4k-instruct 学習データセット:llm-jp/hh-rlhf-12k-ja 学習方式:フルパラメータチューニング サンプル import torch from transformers import AutoTokenizer, AutoModelForCausalLM tokenizer = AutoTokenizer.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", trust_remote_code=True, ) model = AutoModelForCausalLM.from_pretrained( \"ryota39/Phi-3-mini-4k-instruct-dpo\", device_map=\"auto\", torch_dtype='auto', trust_remote_code=True, ) text = \"<|user|>\\n与えられた質問に対して英語で思考し、日本語で答えてください。", @@ -12206,83 +13056,38 @@ "project_name": "phi-3", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, + "score": -0.10629750672764168, "first_commit": "2024-05-08 06:05:02", "latest_commit": "2024-05-08 15:48:53", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "TinySlime-1.1B-Chat-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", - "url": "https://huggingface.co./2121-8/TinySlime-1.1B-Chat-v1.0", - "project_name": "TinySlime-1.1B-Chat-v1.0", - "downloads": 12, - "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2024-07-02 03:34:30", - "latest_commit": "2024-07-02 08:53:11", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Oumuamua-7b-base This is a merge of pre-trained language models created using mergekit.", - "url": "https://huggingface.co./nitky/Oumuamua-7b-base", - "project_name": "Oumuamua-7b-base", - "downloads": 12, - "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2024-06-01 10:39:53", - "latest_commit": "2024-06-01 15:31:15", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-4.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-4.0bpw-h6-exl2", - "downloads": 12, - "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-12-07 17:59:51", - "latest_commit": "2023-12-07 18:54:26", - "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Fine-tuned Japanese Whisper model for speech recognition using whisper-base Fine-tuned openai/whisper-base on Japanese using Common Voice, JVS and JSUT.", - "url": "https://huggingface.co./Ivydata/whisper-base-japanese", - "project_name": "whisper-base-japanese", - "downloads": 12, - "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-05-17 04:36:41", - "latest_commit": "2023-06-08 00:17:50", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "このモデルはdeberta-v2-base-japaneseをファインチューニングしてCommonsenseQA(選択式の質問)に用いれるようにしたものです。 ", - "url": "https://huggingface.co./Mizuiro-sakura/deberta-v2-base-juman-finetuned-commonsenseqa", - "project_name": "deberta-v2-base-juman-finetuned-commonsenseqa", + "description": "Japanese Stable LM Instruct Gamma 7B +", + "url": "https://huggingface.co./ohwi/japanese-stablelm-instruct-gamma-7b-dpo-uf-v1", + "project_name": "japanese-stablelm-instruct-gamma-7b-dpo-uf-v1", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2023-02-03 04:49:19", - "latest_commit": "2023-05-26 15:07:53", + "score": -0.10629750672764168, + "first_commit": "2024-03-09 16:47:29", + "latest_commit": "2024-03-21 14:33:07", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "Electra Base Japanese Irony", - "url": "https://huggingface.co./kit-nlp/transformers-ud-japanese-electra-base-discriminator-irony", - "project_name": "transformers-ud-japanese-electra-base-discriminator-irony", + "description": "Barba Barba is a multilingual natural language inference model for textual entailment and zero-shot text classification, available as an end-to-end service through TensorFlow Serving.", + "url": "https://huggingface.co./hyperonym/barba", + "project_name": "barba", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, - "first_commit": "2022-11-07 07:55:57", - "latest_commit": "2023-06-09 06:50:49", + "score": -0.10629750672764168, + "first_commit": "2023-04-29 06:27:12", + "latest_commit": "2023-04-29 13:45:12", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Details: https://spacy.io/models/ja#ja_core_news_lg Japanese pipeline optimized for CPU.", @@ -12290,11 +13095,12 @@ "project_name": "ja_core_news_lg", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, + "score": -0.10629750672764168, "first_commit": "2021-07-07 12:08:11", "latest_commit": "2023-10-10 06:46:01", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "transformers-ud-japanese-electra-ginza (sudachitra-wordpiece, mC4 Japanese) -", @@ -12302,11 +13108,12 @@ "project_name": "transformers-ud-japanese-electra-base-discriminator", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, + "score": -0.10629750672764168, "first_commit": "2021-08-23 09:54:09", "latest_commit": "2021-09-22 11:00:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { "description": "Byt5-small-ain-jpn-mt is a machine translation model pretrained with Google's ByT5-small and fine-tuned on bilingual datasets crawled from the Web.", @@ -12314,59 +13121,51 @@ "project_name": "byt5-small-ain-jpn-mt", "downloads": 12, "source": "Hugging Face", - "score": -0.1045449659307226, + "score": -0.10629750672764168, "first_commit": "2022-01-25 06:37:11", "latest_commit": "2022-02-04 13:03:14", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "roberta-large-japanese-char-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/roberta-large-japanese-char-luw-upos", - "project_name": "roberta-large-japanese-char-luw-upos", - "downloads": 11, - "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2021-12-30 15:56:46", - "latest_commit": "2022-09-18 19:44:49", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "bert-base-japanese-char-extended Model Description", - "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-char-extended", - "project_name": "bert-base-japanese-char-extended", - "downloads": 11, + "description": "Japanese-Heron-Bench Dataset Description Japanese-Heron-Bench is a benchmark for evaluating Japanese VLMs (Vision-Language Models).", + "url": "https://huggingface.co./datasets/turing-motors/Japanese-Heron-Bench", + "project_name": "Japanese-Heron-Bench", + "downloads": 12, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2021-08-26 22:44:12", - "latest_commit": "2022-06-21 07:21:54", + "score": -0.10629750672764168, + "first_commit": "2024-04-12 01:54:01", + "latest_commit": "2024-04-12 08:59:36", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "ELECTRA small Japanese finance discriminator This is a ELECTRA model pretrained on texts in the Japanese language.", - "url": "https://huggingface.co./izumi-lab/electra-small-japanese-fin-discriminator", - "project_name": "electra-small-japanese-fin-discriminator", + "description": "roberta-small-japanese-aozora Model Description", + "url": "https://huggingface.co./KoichiYasuoka/roberta-small-japanese-aozora", + "project_name": "roberta-small-japanese-aozora", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2021-10-04 14:06:48", - "latest_commit": "2022-12-09 00:42:10", + "score": -0.10631211468506467, + "first_commit": "2021-11-02 12:54:50", + "latest_commit": "2021-11-03 23:44:50", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "deberta-base-japanese-unidic Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-base-japanese-unidic", - "project_name": "deberta-base-japanese-unidic", + "model_or_dataset": "model", + "model_size": null + }, + { + "description": "Wav2Vec2-Large-XLSR-53-Japanese Fine-tuned facebook/wav2vec2-large-xlsr-53 on Japanese using the Common Voice and Japanese speech corpus of Saruwatari-lab, University of Tokyo JSUT.", + "url": "https://huggingface.co./vumichien/wav2vec2-large-xlsr-japanese", + "project_name": "wav2vec2-large-xlsr-japanese", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2022-06-08 08:05:33", - "latest_commit": "2022-06-18 23:02:31", + "score": -0.10631211468506467, + "first_commit": "2021-03-28 04:21:20", + "latest_commit": "2023-02-08 00:15:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 0.318 }, { "description": "dolly-japanese-gpt-1b-clone 概要 rinna社の「japanese-gpt-1b」を、日本語データセット「databricks-dolly-15k-ja」を使用して学習させた推論モデルです。 ", @@ -12374,71 +13173,77 @@ "project_name": "dolly-japanese-gpt-1b-clone", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, + "score": -0.10631211468506467, "first_commit": "2023-05-06 03:16:54", "latest_commit": "2023-05-07 15:47:23", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.33 }, { - "description": "Japanese Stable LM Instruct Gamma 7B Model Description", - "url": "https://huggingface.co./LoneStriker/stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", - "project_name": "stabilityai_japanese-stablelm-instruct-gamma-7b-5.0bpw-h6-exl2", + "description": "Chatvector-llava-v1.5-plus-Houou-v3-7b Model Card Model Details ※好奇心から生まれたモデルです。", + "url": "https://huggingface.co./shinyice/chatvector-llava-v1.5-plus-houou-v3-7b", + "project_name": "chatvector-llava-v1.5-plus-houou-v3-7b", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2023-10-28 20:29:59", - "latest_commit": "2023-10-28 15:30:13", + "score": -0.10631211468506467, + "first_commit": "2024-06-04 04:24:06", + "latest_commit": "2024-06-04 05:12:10", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 7.06 }, { - "description": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1 English description here 概要 Llama-2ベースの学習済み日本語モデルであるelyza/ELYZA-japanese-Llama-2-7bと、そのinstruction tuningモデルであるelyza/ELYZA-japanese-Llama-2-7b-instruct を、mergekitを使ってMoEを行い作成したモデルです。 ", - "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1", - "project_name": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1", + "description": "gpt2-medium-japanese-upos Model Description", + "url": "https://huggingface.co./KoichiYasuoka/gpt2-medium-japanese-upos", + "project_name": "gpt2-medium-japanese-upos", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-03-06 11:07:05", - "latest_commit": "2024-03-19 02:31:33", + "score": -0.10631211468506467, + "first_commit": "2024-06-22 22:39:14", + "latest_commit": "2024-07-27 07:49:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "TigerBot-7B Japanese", - "url": "https://huggingface.co./atsuki-yamaguchi/tigerbot-7b-base-random-ja", - "project_name": "tigerbot-7b-base-random-ja", + "description": "TinySlime-1.1B-v1.0 TinySlime は日本語に特化した小規模言語モデルです。 ", + "url": "https://huggingface.co./2121-8/TinySlime-1.1B-v1.0", + "project_name": "TinySlime-1.1B-v1.0", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-04-21 16:43:53", - "latest_commit": "2024-04-22 09:05:05", + "score": -0.10631211468506467, + "first_commit": "2024-06-14 23:46:31", + "latest_commit": "2024-07-02 03:58:25", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.1 }, { - "description": "gpt2-large-japanese-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/gpt2-large-japanese-upos", - "project_name": "gpt2-large-japanese-upos", + "description": "TigerBot-7B Japanese", + "url": "https://huggingface.co./atsuki-yamaguchi/tigerbot-7b-base-random-ja", + "project_name": "tigerbot-7b-base-random-ja", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-06-22 22:44:06", - "latest_commit": "2024-07-27 07:49:47", + "score": -0.10631211468506467, + "first_commit": "2024-04-21 16:43:53", + "latest_commit": "2024-04-22 09:05:05", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 6.74 }, { - "description": "概要 GLM-4-9B-Chatを、日本語のWikiデータを選定し、追加学習した日本語に非常に強いスコアを出したモデルです。 ", - "url": "https://huggingface.co./HODACHI/glm-4-9b-chat-FT-ja-v0.3", - "project_name": "glm-4-9b-chat-FT-ja-v0.3", + "description": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1 English description here 概要 Llama-2ベースの学習済み日本語モデルであるelyza/ELYZA-japanese-Llama-2-7bと、そのinstruction tuningモデルであるelyza/ELYZA-japanese-Llama-2-7b-instruct を、mergekitを使ってMoEを行い作成したモデルです。 ", + "url": "https://huggingface.co./Aratako/ELYZA-japanese-Llama-2-MoE-2x7B-v0.1", + "project_name": "ELYZA-japanese-Llama-2-MoE-2x7B-v0.1", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-06-07 03:33:33", - "latest_commit": "2024-06-09 23:37:02", + "score": -0.10631211468506467, + "first_commit": "2024-03-06 11:07:05", + "latest_commit": "2024-03-19 02:31:33", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 11.1 }, { "description": "Orion-14B 🌐English | 🇨", @@ -12446,11 +13251,12 @@ "project_name": "Orion-14B-Chat-RAG-safetensors", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, + "score": -0.10631211468506467, "first_commit": "2024-01-25 00:05:06", "latest_commit": "2024-01-25 02:09:15", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 14.5 }, { "description": "モデルの概要 line-corporation/japanese-large-lm-1.7bのベースモデルに対し,sftによるfull instruction tuningを行いました.", @@ -12458,23 +13264,12 @@ "project_name": "line-japanese-large-lm-1.7b-kunishou-databricks-dolly-15k-ja-full-instruction-sft", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, + "score": -0.10631211468506467, "first_commit": "2024-01-20 12:35:16", "latest_commit": "2024-01-20 12:46:21", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "Shisa 7B Shisa 7B (shisa-7b-v1)", - "url": "https://huggingface.co./LoneStriker/shisa-7b-v1-5.0bpw-h6-exl2", - "project_name": "shisa-7b-v1-5.0bpw-h6-exl2", - "downloads": 11, - "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2023-12-07 18:07:21", - "latest_commit": "2023-12-07 18:54:27", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": 1.65 }, { "description": "COMET-GPT2 ja v2 Finetuned GPT-2 on the large version of ATOMIC ja using a causal language modeling (CLM) objective.", @@ -12482,121 +13277,90 @@ "project_name": "comet-v2-gpt2-small-japanese", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, + "score": -0.10631211468506467, "first_commit": "2023-03-05 13:39:03", "latest_commit": "2023-03-14 16:56:19", "languages": [], - "model_or_dataset": "model" - }, - { - "description": "deberta-large-japanese-wikipedia-ud-goeswith Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-wikipedia-ud-goeswith", - "project_name": "deberta-large-japanese-wikipedia-ud-goeswith", - "downloads": 11, - "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2022-09-18 08:41:06", - "latest_commit": "2023-05-12 01:29:13", - "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "deberta-large-japanese-luw-upos Model Description", - "url": "https://huggingface.co./KoichiYasuoka/deberta-large-japanese-luw-upos", - "project_name": "deberta-large-japanese-luw-upos", + "description": "モデル説明 (model explanation) YaguruMagiku 0.6 : AbyssOrangeMix2_sfw 0.4 マージ元のルーツにNAIリークが含まれるという噂があるので、NAIリークアンチには非推奨 理想の黒髪ポニテ顔が出せるYaguruMagikuを、ある程度顔が近くて制御しやすいAbyssOrangeMix2と混ぜてみた。 ", + "url": "https://huggingface.co./ThePioneer/MoeDiffusion", + "project_name": "MoeDiffusion", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2022-05-26 14:52:32", - "latest_commit": "2023-01-14 23:15:30", + "score": -0.10631211468506467, + "first_commit": "2023-01-18 11:14:31", + "latest_commit": "2023-01-21 02:10:41", "languages": [], - "model_or_dataset": "model" + "model_or_dataset": "model", + "model_size": null }, { - "description": "Japanese-Heron-Bench Dataset Description Japanese-Heron-Bench is a benchmark for evaluating Japanese VLMs (Vision-Language Models).", - "url": "https://huggingface.co./datasets/turing-motors/Japanese-Heron-Bench", - "project_name": "Japanese-Heron-Bench", + "description": "ESを書くAI Japanese GPT-2 modelをファインチューニングしました。 ", + "url": "https://huggingface.co./huranokuma/es_IT", + "project_name": "es_IT", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-04-12 01:54:01", - "latest_commit": "2024-04-12 08:59:36", + "score": -0.10631211468506467, + "first_commit": "2022-08-13 06:48:58", + "latest_commit": "2022-08-14 05:47:05", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "This is a Japanese portion of the Guanaco dataset.", - "url": "https://huggingface.co./datasets/fujiki/guanaco_ja", - "project_name": "guanaco_ja", + "description": "bert-base-japanese-char-extended Model Description", + "url": "https://huggingface.co./KoichiYasuoka/bert-base-japanese-char-extended", + "project_name": "bert-base-japanese-char-extended", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2023-06-24 08:27:30", - "latest_commit": "2023-07-16 15:01:30", + "score": -0.10631211468506467, + "first_commit": "2021-08-26 22:44:12", + "latest_commit": "2022-06-21 07:21:54", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": null }, { - "description": "cyberagent/calm2-7b-chatの出力を人手でチェック・修正することで作成した日本語Instructionデータセットです。 ", - "url": "https://huggingface.co./datasets/Kendamarron/jimba-instuction-1k-beta", - "project_name": "jimba-instuction-1k-beta", + "description": "Ninja-v1-RP-WIP 概要 Local-Novel-LLM-project/Ninja-v1-NSFWをロールプレイ用にLoRAでファインチューニングしたモデルです。 ", + "url": "https://huggingface.co./Aratako/Ninja-v1-RP-WIP", + "project_name": "Ninja-v1-RP-WIP", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-02-29 15:23:48", - "latest_commit": "2024-04-25 12:49:28", + "score": -0.10631211468506467, + "first_commit": "2024-05-19 15:31:02", + "latest_commit": "2024-05-20 16:56:00", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "model", + "model_size": 7.24 }, { - "description": "NVIDIA が公開している SteerLM 向けのトライアルデータセット HelpSteer2を日本語に自動翻訳したデータセットになります。", - "url": "https://huggingface.co./datasets/kunishou/HelpSteer2-20k-ja", - "project_name": "HelpSteer2-20k-ja", + "description": "HF Datasets version of Tanaka Corpus.", + "url": "https://huggingface.co./datasets/hpprc/tanaka-corpus", + "project_name": "tanaka-corpus", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-06-21 08:09:33", - "latest_commit": "2024-06-21 08:44:21", + "score": -0.10631211468506467, + "first_commit": "2024-03-21 01:49:50", + "latest_commit": "2024-03-21 12:50:28", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { - "description": "cosmopedia-japanese-20kのデータに、kunishou様から20k-100kをご提供いただけることになり100kまで拡大しました。 ", - "url": "https://huggingface.co./datasets/aixsatoshi/cosmopedia-japanese-100k", - "project_name": "cosmopedia-japanese-100k", + "description": "データセットについて オープンソースLLMの出力を人手でチェック・修正したinstructionにSwallow-MXでoutputを生成したデータセットです。 ", + "url": "https://huggingface.co./datasets/Kendamarron/pret-a-porter-instruction-v0.1", + "project_name": "pret-a-porter-instruction-v0.1", "downloads": 11, "source": "Hugging Face", - "score": -0.10455910605366416, - "first_commit": "2024-03-03 16:06:15", - "latest_commit": "2024-03-03 16:20:35", - "languages": [], - "model_or_dataset": "dataset" - }, - { - "description": "Python library for CJK (Chinese, Japanese, and Korean) language dictionary", - "url": "https://github.com/cihai/cihai", - "project_name": "cihai", - "stargazers_count": 80, - "source": "GitHub", - "score": -0.1046858475076997, - "first_commit": "2013-12-03 09:42:52", - "latest_commit": "2024-08-10 13:13:53", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, - { - "description": "Neologism dictionary based on the language resources on the Web for mecab-unidic", - "url": "https://github.com/neologd/mecab-unidic-neologd", - "project_name": "mecab-unidic-neologd", - "stargazers_count": 80, - "source": "GitHub", - "score": -0.1046858475076997, - "first_commit": "2015-03-19 10:52:02", - "latest_commit": "2020-09-14 19:58:39", + "score": -0.10631211468506467, + "first_commit": "2024-03-26 13:08:14", + "latest_commit": "2024-04-01 04:30:44", "languages": [], - "model_or_dataset": "dataset" + "model_or_dataset": "dataset", + "model_size": null }, { "description": "Japanese Dictionary", @@ -12604,7 +13368,7 @@ "project_name": "nihongo", "stargazers_count": 78, "source": "GitHub", - "score": -0.1105220594294721, + "score": -0.1105755451440999, "first_commit": "2013-09-03 00:22:50", "latest_commit": "2024-02-07 18:36:24", "languages": [ @@ -12619,7 +13383,7 @@ "project_name": "JESC", "stargazers_count": 78, "source": "GitHub", - "score": -0.1105220594294721, + "score": -0.1105755451440999, "first_commit": "2017-10-25 07:41:35", "latest_commit": "2017-10-31 21:08:56", "languages": [ @@ -12633,7 +13397,7 @@ "project_name": "goya", "stargazers_count": 77, "source": "GitHub", - "score": -0.1134401653903583, + "score": -0.11349858564182218, "first_commit": "2021-09-08 19:51:11", "latest_commit": "2021-12-30 19:44:15", "languages": [ @@ -12649,7 +13413,7 @@ "project_name": "SudachiTra", "stargazers_count": 77, "source": "GitHub", - "score": -0.1134401653903583, + "score": -0.11349858564182218, "first_commit": "2021-06-22 19:48:29", "latest_commit": "2023-12-15 08:13:45", "languages": [ @@ -12663,7 +13427,7 @@ "project_name": "emoji-ja", "stargazers_count": 77, "source": "GitHub", - "score": -0.1134401653903583, + "score": -0.11349858564182218, "first_commit": "2018-08-24 08:25:08", "latest_commit": "2023-05-09 14:57:44", "languages": [ @@ -12677,7 +13441,7 @@ "project_name": "KWDLC", "stargazers_count": 75, "source": "GitHub", - "score": -0.1192763773121307, + "score": -0.11934466663726677, "first_commit": "2015-05-20 19:13:17", "latest_commit": "2023-12-18 14:13:14", "languages": [ @@ -12691,7 +13455,7 @@ "project_name": "llm-japanese-dataset", "stargazers_count": 75, "source": "GitHub", - "score": -0.1192763773121307, + "score": -0.11934466663726677, "first_commit": "2023-04-19 14:34:02", "latest_commit": "2024-01-23 09:37:30", "languages": [ @@ -12705,7 +13469,7 @@ "project_name": "jrte-corpus", "stargazers_count": 75, "source": "GitHub", - "score": -0.1192763773121307, + "score": -0.11934466663726677, "first_commit": "2020-10-15 14:59:37", "latest_commit": "2023-06-23 14:06:26", "languages": [ @@ -12719,7 +13483,7 @@ "project_name": "JMTrans", "stargazers_count": 74, "source": "GitHub", - "score": -0.12219448327301691, + "score": -0.12226770713498906, "first_commit": "2020-08-19 23:30:03", "latest_commit": "2021-01-16 21:44:37", "languages": [ @@ -12734,7 +13498,7 @@ "project_name": "Dialog", "stargazers_count": 73, "source": "GitHub", - "score": -0.12511258923390312, + "score": -0.12519074763271135, "first_commit": "2019-09-12 13:05:51", "latest_commit": "2020-10-01 17:25:08", "languages": [ @@ -12749,7 +13513,7 @@ "project_name": "jmdict-yomitan", "stargazers_count": 73, "source": "GitHub", - "score": -0.12511258923390312, + "score": -0.12519074763271135, "first_commit": "2023-09-24 20:01:40", "latest_commit": "2024-07-30 10:44:53", "languages": [ @@ -12757,13 +13521,25 @@ ], "model_or_dataset": "dataset" }, + { + "description": "「言語処理100本ノック 2020」をPythonで解く", + "url": "https://github.com/upura/nlp100v2020", + "project_name": "nlp100v2020", + "stargazers_count": 73, + "source": "GitHub", + "score": -0.12519074763271135, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": null + }, { "description": "Laboro BERT Japanese: Japanese BERT Pre-Trained With Web-Corpus", "url": "https://github.com/laboroai/Laboro-BERT-Japanese", "project_name": "Laboro-BERT-Japanese", "stargazers_count": 72, "source": "GitHub", - "score": -0.1280306951947893, + "score": -0.12811378813043364, "first_commit": "2020-03-31 12:05:07", "latest_commit": "2022-05-12 17:06:31", "languages": [ @@ -12777,7 +13553,7 @@ "project_name": "resembla", "stargazers_count": 71, "source": "GitHub", - "score": -0.1309488011556755, + "score": -0.13103682862815594, "first_commit": "2017-07-24 17:07:39", "latest_commit": "2019-01-27 00:08:52", "languages": [ @@ -12793,7 +13569,7 @@ "project_name": "unidic-py", "stargazers_count": 71, "source": "GitHub", - "score": -0.1309488011556755, + "score": -0.13103682862815594, "first_commit": "2020-01-05 16:19:49", "latest_commit": "2023-06-16 20:50:30", "languages": [ @@ -12807,7 +13583,7 @@ "project_name": "wikipedia-utils", "stargazers_count": 71, "source": "GitHub", - "score": -0.1309488011556755, + "score": -0.13103682862815594, "first_commit": "2022-01-09 16:42:14", "latest_commit": "2024-04-10 08:41:09", "languages": [ @@ -12821,7 +13597,7 @@ "project_name": "TinySegmenterMaker", "stargazers_count": 69, "source": "GitHub", - "score": -0.13678501307744792, + "score": -0.13688290962360053, "first_commit": "2012-11-15 22:24:06", "latest_commit": "2022-09-30 13:41:19", "languages": [ @@ -12843,7 +13619,7 @@ "project_name": "aozorabunko_text", "stargazers_count": 69, "source": "GitHub", - "score": -0.13678501307744792, + "score": -0.13688290962360053, "first_commit": "2019-02-11 03:06:07", "latest_commit": "2023-03-22 01:21:29", "languages": [ @@ -12857,7 +13633,7 @@ "project_name": "5ch-analysis", "stargazers_count": 67, "source": "GitHub", - "score": -0.1426212249992203, + "score": -0.14272899061904512, "first_commit": "2018-11-11 16:58:44", "latest_commit": "2018-11-11 23:37:16", "languages": [ @@ -12871,7 +13647,7 @@ "project_name": "wana_kana_rust", "stargazers_count": 67, "source": "GitHub", - "score": -0.1426212249992203, + "score": -0.14272899061904512, "first_commit": "2018-02-02 18:39:03", "latest_commit": "2023-01-19 21:51:26", "languages": [ @@ -12886,7 +13662,7 @@ "project_name": "japanese-clip", "stargazers_count": 66, "source": "GitHub", - "score": -0.14553933096010652, + "score": -0.1456520311167674, "first_commit": "2022-04-25 17:19:28", "latest_commit": "2022-07-19 18:20:52", "languages": [ @@ -12900,7 +13676,7 @@ "project_name": "dic-nico-intersection-pixiv", "stargazers_count": 66, "source": "GitHub", - "score": -0.14553933096010652, + "score": -0.1456520311167674, "first_commit": "2017-03-09 08:44:44", "latest_commit": "2024-04-19 13:29:16", "languages": [ @@ -12914,7 +13690,7 @@ "project_name": "mouse_over_dictionary", "stargazers_count": 66, "source": "GitHub", - "score": -0.14553933096010652, + "score": -0.1456520311167674, "first_commit": "2020-01-09 20:26:17", "latest_commit": "2020-01-24 08:57:39", "languages": [ @@ -12928,7 +13704,7 @@ "project_name": "BSD", "stargazers_count": 66, "source": "GitHub", - "score": -0.14553933096010652, + "score": -0.1456520311167674, "first_commit": "2020-07-25 01:04:11", "latest_commit": "2021-11-10 21:33:34", "languages": [], @@ -12940,7 +13716,7 @@ "project_name": "neural_ime", "stargazers_count": 65, "source": "GitHub", - "score": -0.14845743692099272, + "score": -0.14857507161448968, "first_commit": "2016-10-31 15:23:42", "latest_commit": "2016-12-27 21:10:30", "languages": [ @@ -12954,7 +13730,7 @@ "project_name": "cskk", "stargazers_count": 65, "source": "GitHub", - "score": -0.14845743692099272, + "score": -0.14857507161448968, "first_commit": "2018-06-17 15:36:26", "latest_commit": "2024-03-10 13:45:41", "languages": [ @@ -12969,7 +13745,7 @@ "project_name": "ibus-hiragana", "stargazers_count": 64, "source": "GitHub", - "score": -0.1513755428818789, + "score": -0.15149811211221198, "first_commit": "2017-04-28 03:50:59", "latest_commit": "2024-08-15 04:09:53", "languages": [ @@ -12983,7 +13759,7 @@ "project_name": "pdmocrdataset-part1", "stargazers_count": 64, "source": "GitHub", - "score": -0.1513755428818789, + "score": -0.15149811211221198, "first_commit": "2022-04-20 11:55:33", "latest_commit": "2024-06-26 16:10:44", "languages": [], @@ -12995,7 +13771,7 @@ "project_name": "ja_sentence_segmenter", "stargazers_count": 63, "source": "GitHub", - "score": -0.1542936488427651, + "score": -0.15442115260993428, "first_commit": "2019-12-15 13:50:07", "latest_commit": "2023-04-03 13:09:20", "languages": [ @@ -13009,7 +13785,7 @@ "project_name": "daaja", "stargazers_count": 63, "source": "GitHub", - "score": -0.1542936488427651, + "score": -0.15442115260993428, "first_commit": "2022-02-12 20:22:34", "latest_commit": "2023-02-16 19:39:30", "languages": [ @@ -13024,7 +13800,7 @@ "project_name": "nlp-recipes-ja", "stargazers_count": 63, "source": "GitHub", - "score": -0.1542936488427651, + "score": -0.15442115260993428, "first_commit": "2020-08-01 09:09:07", "latest_commit": "2021-04-11 08:07:45", "languages": [ @@ -13039,7 +13815,7 @@ "project_name": "spacy_tutorial", "stargazers_count": 63, "source": "GitHub", - "score": -0.1542936488427651, + "score": -0.15442115260993428, "first_commit": "2019-12-29 04:28:30", "latest_commit": "2020-01-24 20:02:24", "languages": [ @@ -13053,7 +13829,7 @@ "project_name": "gptuber-by-langchain", "stargazers_count": 62, "source": "GitHub", - "score": -0.15721175480365132, + "score": -0.15734419310765657, "first_commit": "2023-01-07 00:37:20", "latest_commit": "2023-01-07 00:37:20", "languages": [ @@ -13067,7 +13843,7 @@ "project_name": "gptuber-by-langchain", "stargazers_count": 62, "source": "GitHub", - "score": -0.15721175480365132, + "score": -0.15734419310765657, "first_commit": "2023-01-07 00:37:20", "latest_commit": "2023-01-07 00:37:20", "languages": [ @@ -13081,7 +13857,7 @@ "project_name": "fugumt", "stargazers_count": 61, "source": "GitHub", - "score": -0.16012986076453753, + "score": -0.16026723360537887, "first_commit": "2021-01-02 20:35:49", "latest_commit": "2021-02-28 11:46:52", "languages": [ @@ -13095,7 +13871,7 @@ "project_name": "jakaroma", "stargazers_count": 61, "source": "GitHub", - "score": -0.16012986076453753, + "score": -0.16026723360537887, "first_commit": "2016-04-11 18:21:38", "latest_commit": "2021-03-30 23:21:16", "languages": [ @@ -13109,7 +13885,7 @@ "project_name": "jageocoder", "stargazers_count": 60, "source": "GitHub", - "score": -0.1630479667254237, + "score": -0.16319027410310116, "first_commit": "2021-02-20 17:31:56", "latest_commit": "2024-07-03 06:01:58", "languages": [ @@ -13123,7 +13899,7 @@ "project_name": "llm-leaderboard", "stargazers_count": 60, "source": "GitHub", - "score": -0.1630479667254237, + "score": -0.16319027410310116, "first_commit": "2023-06-27 15:09:25", "latest_commit": "2024-08-07 00:38:16", "languages": [ @@ -13137,7 +13913,7 @@ "project_name": "chatgpt-slackbot", "stargazers_count": 60, "source": "GitHub", - "score": -0.1630479667254237, + "score": -0.16319027410310116, "first_commit": "2022-12-06 22:50:09", "latest_commit": "2024-07-22 18:50:41", "languages": [ @@ -13151,7 +13927,7 @@ "project_name": "IOB2Corpus", "stargazers_count": 60, "source": "GitHub", - "score": -0.1630479667254237, + "score": -0.16319027410310116, "first_commit": "2016-01-29 09:21:25", "latest_commit": "2020-02-25 09:34:11", "languages": [], @@ -13163,7 +13939,7 @@ "project_name": "japanese", "stargazers_count": 60, "source": "GitHub", - "score": -0.1630479667254237, + "score": -0.16319027410310116, "first_commit": "2018-09-13 21:10:10", "latest_commit": "2018-09-13 22:02:23", "languages": [], @@ -13175,7 +13951,7 @@ "project_name": "japanese-numerals-to-number", "stargazers_count": 58, "source": "GitHub", - "score": -0.16888417864719613, + "score": -0.16903635509854573, "first_commit": "2017-02-25 22:53:18", "latest_commit": "2023-02-17 01:34:12", "languages": [ @@ -13189,7 +13965,7 @@ "project_name": "simple-simcse-ja", "stargazers_count": 57, "source": "GitHub", - "score": -0.17180228460808233, + "score": -0.17195939559626802, "first_commit": "2022-11-11 19:05:53", "latest_commit": "2023-10-31 14:18:17", "languages": [ @@ -13203,7 +13979,7 @@ "project_name": "mecab-rs", "stargazers_count": 55, "source": "GitHub", - "score": -0.17763849652985472, + "score": -0.1778054765917126, "first_commit": "2015-04-19 09:30:14", "latest_commit": "2023-09-03 22:03:49", "languages": [ @@ -13217,7 +13993,7 @@ "project_name": "YouyakuMan", "stargazers_count": 53, "source": "GitHub", - "score": -0.18347470845162714, + "score": -0.1836515575871572, "first_commit": "2019-10-29 17:43:01", "latest_commit": "2020-09-02 13:37:05", "languages": [ @@ -13231,7 +14007,7 @@ "project_name": "dvorakjp-romantable", "stargazers_count": 53, "source": "GitHub", - "score": -0.18347470845162714, + "score": -0.1836515575871572, "first_commit": "2015-10-11 16:49:41", "latest_commit": "2024-06-24 12:24:34", "languages": [ @@ -13245,7 +14021,7 @@ "project_name": "mozcdict-ext", "stargazers_count": 52, "source": "GitHub", - "score": -0.18639281441251332, + "score": -0.1865745980848795, "first_commit": "2023-01-12 18:13:26", "latest_commit": "2024-07-13 17:37:43", "languages": [ @@ -13259,7 +14035,7 @@ "project_name": "japanese-lm-fin-harness", "stargazers_count": 51, "source": "GitHub", - "score": -0.18931092037339953, + "score": -0.1894976385826018, "first_commit": "2023-09-28 10:48:05", "latest_commit": "2024-08-07 13:01:57", "languages": [ @@ -13273,7 +14049,7 @@ "project_name": "kakasi-java", "stargazers_count": 51, "source": "GitHub", - "score": -0.18931092037339953, + "score": -0.1894976385826018, "first_commit": "2012-01-18 17:33:15", "latest_commit": "2016-04-13 15:56:34", "languages": [ @@ -13287,7 +14063,7 @@ "project_name": "chikkarpy", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2021-05-24 17:10:56", "latest_commit": "2022-02-07 15:11:36", "languages": [ @@ -13301,7 +14077,7 @@ "project_name": "Retrieval-based-Voice-Conversion-WebUI-JP-localization", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2023-03-27 17:59:11", "latest_commit": "2023-04-11 11:08:47", "languages": [ @@ -13316,7 +14092,7 @@ "project_name": "lindera-tantivy", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2020-02-19 10:37:34", "latest_commit": "2023-12-02 21:03:48", "languages": [ @@ -13330,7 +14106,7 @@ "project_name": "kuroshiro-analyzer-kuromoji", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2018-03-09 17:41:37", "latest_commit": "2018-08-05 12:41:55", "languages": [ @@ -13344,7 +14120,7 @@ "project_name": "yomichan-jlpt-vocab", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2021-09-01 18:36:57", "latest_commit": "2023-04-06 22:29:12", "languages": [ @@ -13358,7 +14134,7 @@ "project_name": "rohan4600", "stargazers_count": 50, "source": "GitHub", - "score": -0.19222902633428574, + "score": -0.1924206790803241, "first_commit": "2021-07-31 23:43:43", "latest_commit": "2023-02-01 12:51:29", "languages": [], @@ -13370,7 +14146,7 @@ "project_name": "fasttext-vs-word2vec-on-twitter-data", "stargazers_count": 49, "source": "GitHub", - "score": -0.19514713229517194, + "score": -0.19534371957804636, "first_commit": "2017-03-30 23:10:04", "latest_commit": "2017-08-23 10:53:09", "languages": [ @@ -13384,7 +14160,7 @@ "project_name": "embedrank", "stargazers_count": 48, "source": "GitHub", - "score": -0.19806523825605812, + "score": -0.19826676007576866, "first_commit": "2019-02-01 11:40:50", "latest_commit": "2019-03-19 09:05:41", "languages": [ @@ -13398,7 +14174,7 @@ "project_name": "japanese-llama-experiment", "stargazers_count": 48, "source": "GitHub", - "score": -0.19806523825605812, + "score": -0.19826676007576866, "first_commit": "2023-06-28 17:43:53", "latest_commit": "2024-03-10 23:31:45", "languages": [ @@ -13414,7 +14190,7 @@ "project_name": "esupar", "stargazers_count": 47, "source": "GitHub", - "score": -0.20098334421694433, + "score": -0.20118980057349095, "first_commit": "2021-09-18 07:28:30", "latest_commit": "2024-08-15 20:37:47", "languages": [ @@ -13429,7 +14205,7 @@ "project_name": "japanese-llm-ranking", "stargazers_count": 47, "source": "GitHub", - "score": -0.20098334421694433, + "score": -0.20118980057349095, "first_commit": "2023-06-28 18:31:52", "latest_commit": "2024-03-04 18:17:06", "languages": [ @@ -13444,7 +14220,7 @@ "project_name": "tinysegmenter", "stargazers_count": 47, "source": "GitHub", - "score": -0.20098334421694433, + "score": -0.20118980057349095, "first_commit": "2014-07-04 17:23:23", "latest_commit": "2015-11-03 21:49:19", "languages": [ @@ -13458,7 +14234,7 @@ "project_name": "jslingua", "stargazers_count": 47, "source": "GitHub", - "score": -0.20098334421694433, + "score": -0.20118980057349095, "first_commit": "2016-03-22 10:52:37", "latest_commit": "2023-10-19 22:01:23", "languages": [ @@ -13473,7 +14249,7 @@ "project_name": "LLaVA-JP", "stargazers_count": 46, "source": "GitHub", - "score": -0.20390145017783054, + "score": -0.20411284107121325, "first_commit": "2023-12-01 12:26:17", "latest_commit": "2024-06-05 23:42:35", "languages": [ @@ -13487,7 +14263,7 @@ "project_name": "language-pretraining", "stargazers_count": 46, "source": "GitHub", - "score": -0.20390145017783054, + "score": -0.20411284107121325, "first_commit": "2021-07-07 12:07:22", "latest_commit": "2023-05-19 23:15:30", "languages": [ @@ -13501,7 +14277,7 @@ "project_name": "azure-search-openai-demo", "stargazers_count": 46, "source": "GitHub", - "score": -0.20390145017783054, + "score": -0.20411284107121325, "first_commit": "2023-02-08 13:00:55", "latest_commit": "2023-12-07 18:16:07", "languages": [ @@ -13517,7 +14293,7 @@ "project_name": "jawiki-kana-kanji-dict", "stargazers_count": 46, "source": "GitHub", - "score": -0.20390145017783054, + "score": -0.20411284107121325, "first_commit": "2020-08-23 02:36:22", "latest_commit": "2024-08-09 14:54:51", "languages": [ @@ -13531,7 +14307,7 @@ "project_name": "google-ime-user-dictionary-ja-en", "stargazers_count": 45, "source": "GitHub", - "score": -0.20681955613871675, + "score": -0.20703588156893554, "first_commit": "2009-12-11 10:24:53", "latest_commit": "2016-12-23 19:44:09", "languages": [], @@ -13543,7 +14319,7 @@ "project_name": "LINE-DistilBERT-Japanese", "stargazers_count": 44, "source": "GitHub", - "score": -0.20973766209960293, + "score": -0.20995892206665784, "first_commit": "2023-03-09 18:50:06", "latest_commit": "2023-03-22 15:09:22", "languages": [], @@ -13555,7 +14331,7 @@ "project_name": "JSICK", "stargazers_count": 44, "source": "GitHub", - "score": -0.20973766209960293, + "score": -0.20995892206665784, "first_commit": "2021-05-24 18:12:15", "latest_commit": "2023-05-31 17:48:45", "languages": [], @@ -13567,7 +14343,7 @@ "project_name": "Convert-Numbers-to-Japanese", "stargazers_count": 43, "source": "GitHub", - "score": -0.21265576806048914, + "score": -0.21288196256438013, "first_commit": "2017-03-24 12:30:39", "latest_commit": "2020-11-26 16:37:30", "languages": [ @@ -13581,7 +14357,7 @@ "project_name": "japanese-daily-dialogue", "stargazers_count": 43, "source": "GitHub", - "score": -0.21265576806048914, + "score": -0.21288196256438013, "first_commit": "2023-03-15 16:53:41", "latest_commit": "2023-03-17 18:53:28", "languages": [], @@ -13593,7 +14369,7 @@ "project_name": "namaco", "stargazers_count": 41, "source": "GitHub", - "score": -0.21849197998226155, + "score": -0.21872804355982473, "first_commit": "2017-10-11 09:53:23", "latest_commit": "2018-02-09 06:27:36", "languages": [ @@ -13608,7 +14384,7 @@ "project_name": "llm-jp-sft", "stargazers_count": 41, "source": "GitHub", - "score": -0.21849197998226155, + "score": -0.21872804355982473, "first_commit": "2023-09-22 14:30:09", "latest_commit": "2024-06-13 13:17:38", "languages": [ @@ -13622,7 +14398,7 @@ "project_name": "node-romaji-name", "stargazers_count": 41, "source": "GitHub", - "score": -0.21849197998226155, + "score": -0.21872804355982473, "first_commit": "2013-08-24 10:50:11", "latest_commit": "2023-12-27 13:27:03", "languages": [ @@ -13636,7 +14412,7 @@ "project_name": "japanese-toolkit", "stargazers_count": 41, "source": "GitHub", - "score": -0.21849197998226155, + "score": -0.21872804355982473, "first_commit": "2020-07-09 05:58:18", "latest_commit": "2023-01-08 23:53:43", "languages": [ @@ -13651,7 +14427,7 @@ "project_name": "make-meidai-dialogue", "stargazers_count": 40, "source": "GitHub", - "score": -0.22141008594314773, + "score": -0.221651084057547, "first_commit": "2016-11-10 13:45:38", "latest_commit": "2017-09-29 07:53:24", "languages": [ @@ -13665,7 +14441,7 @@ "project_name": "vits-japros-webui", "stargazers_count": 40, "source": "GitHub", - "score": -0.22141008594314773, + "score": -0.221651084057547, "first_commit": "2023-09-30 17:09:14", "latest_commit": "2024-01-05 22:42:43", "languages": [ @@ -13681,7 +14457,7 @@ "project_name": "bert-japanese-aozora", "stargazers_count": 40, "source": "GitHub", - "score": -0.22141008594314773, + "score": -0.221651084057547, "first_commit": "2020-03-08 10:20:43", "latest_commit": "2020-08-08 12:06:20", "languages": [], @@ -13693,7 +14469,7 @@ "project_name": "koniwa", "stargazers_count": 40, "source": "GitHub", - "score": -0.22141008594314773, + "score": -0.221651084057547, "first_commit": "2021-10-29 21:19:06", "latest_commit": "2024-08-02 10:48:21", "languages": [ @@ -13708,7 +14484,7 @@ "project_name": "DocumentClassificationUsingBERT-Japanese", "stargazers_count": 40, "source": "GitHub", - "score": -0.22141008594314773, + "score": -0.221651084057547, "first_commit": "2019-12-16 00:55:48", "latest_commit": "2021-01-29 10:59:18", "languages": [ @@ -13723,7 +14499,7 @@ "project_name": "Japanese-BPEEncoder", "stargazers_count": 39, "source": "GitHub", - "score": -0.22432819190403394, + "score": -0.2245741245552693, "first_commit": "2020-10-03 12:21:03", "latest_commit": "2021-09-12 09:58:42", "languages": [ @@ -13737,7 +14513,7 @@ "project_name": "kanjigrid", "stargazers_count": 39, "source": "GitHub", - "score": -0.22432819190403394, + "score": -0.2245741245552693, "first_commit": "2018-10-26 15:47:29", "latest_commit": "2018-11-19 14:14:00", "languages": [ @@ -13751,7 +14527,7 @@ "project_name": "t5-japanese", "stargazers_count": 39, "source": "GitHub", - "score": -0.22432819190403394, + "score": -0.2245741245552693, "first_commit": "2021-08-25 09:55:16", "latest_commit": "2021-09-07 14:11:02", "languages": [ @@ -13765,7 +14541,7 @@ "project_name": "emoticon", "stargazers_count": 39, "source": "GitHub", - "score": -0.22432819190403394, + "score": -0.2245741245552693, "first_commit": "2013-12-29 17:16:16", "latest_commit": "2020-05-07 13:36:42", "languages": [ @@ -13779,7 +14555,7 @@ "project_name": "llm-jp-corpus", "stargazers_count": 39, "source": "GitHub", - "score": -0.22432819190403394, + "score": -0.2245741245552693, "first_commit": "2023-06-14 13:21:33", "latest_commit": "2023-10-23 10:04:18", "languages": [ @@ -13793,7 +14569,7 @@ "project_name": "xvector_jtubespeech", "stargazers_count": 38, "source": "GitHub", - "score": -0.22724629786492015, + "score": -0.22749716505299158, "first_commit": "2022-03-08 11:00:20", "latest_commit": "2023-11-05 14:48:26", "languages": [ @@ -13807,7 +14583,7 @@ "project_name": "IgakuQA", "stargazers_count": 38, "source": "GitHub", - "score": -0.22724629786492015, + "score": -0.22749716505299158, "first_commit": "2023-03-31 10:29:20", "latest_commit": "2023-03-31 10:29:20", "languages": [ @@ -13821,7 +14597,7 @@ "project_name": "Japanese-BPEEncoder_V2", "stargazers_count": 37, "source": "GitHub", - "score": -0.23016440382580636, + "score": -0.23042020555071388, "first_commit": "2021-08-23 19:09:11", "latest_commit": "2023-01-15 12:43:44", "languages": [ @@ -13835,7 +14611,7 @@ "project_name": "Mykytea-python", "stargazers_count": 36, "source": "GitHub", - "score": -0.23308250978669254, + "score": -0.23334324604843618, "first_commit": "2011-07-15 11:45:10", "latest_commit": "2024-01-15 17:30:17", "languages": [ @@ -13850,7 +14626,7 @@ "project_name": "handwritten-japanese-ocr", "stargazers_count": 36, "source": "GitHub", - "score": -0.23308250978669254, + "score": -0.23334324604843618, "first_commit": "2020-05-01 17:27:13", "latest_commit": "2022-04-05 11:16:31", "languages": [ @@ -13865,7 +14641,7 @@ "project_name": "japanese-llm-roleplay-benchmark", "stargazers_count": 36, "source": "GitHub", - "score": -0.23308250978669254, + "score": -0.23334324604843618, "first_commit": "2023-09-15 23:52:27", "latest_commit": "2023-11-03 22:01:24", "languages": [ @@ -13880,7 +14656,7 @@ "project_name": "python-vibrato", "stargazers_count": 35, "source": "GitHub", - "score": -0.23600061574757875, + "score": -0.23626628654615847, "first_commit": "2022-12-08 12:29:38", "latest_commit": "2023-09-05 22:16:59", "languages": [ @@ -13895,7 +14671,7 @@ "project_name": "mozcpy", "stargazers_count": 35, "source": "GitHub", - "score": -0.23600061574757875, + "score": -0.23626628654615847, "first_commit": "2022-08-21 02:20:37", "latest_commit": "2023-12-12 00:56:24", "languages": [ @@ -13909,7 +14685,7 @@ "project_name": "ja-tokenizer-docker-py", "stargazers_count": 35, "source": "GitHub", - "score": -0.23600061574757875, + "score": -0.23626628654615847, "first_commit": "2022-05-08 13:45:30", "latest_commit": "2022-05-10 16:55:48", "languages": [ @@ -13924,7 +14700,7 @@ "project_name": "JIWC-Dictionary", "stargazers_count": 35, "source": "GitHub", - "score": -0.23600061574757875, + "score": -0.23626628654615847, "first_commit": "2020-07-31 16:28:26", "latest_commit": "2021-01-27 17:39:40", "languages": [], @@ -13936,7 +14712,7 @@ "project_name": "wikihow_japanese", "stargazers_count": 35, "source": "GitHub", - "score": -0.23600061574757875, + "score": -0.23626628654615847, "first_commit": "2020-06-29 03:11:23", "latest_commit": "2020-12-18 03:54:55", "languages": [ @@ -13950,7 +14726,7 @@ "project_name": "react-native-japanese-tokenizer", "stargazers_count": 34, "source": "GitHub", - "score": -0.23891872170846495, + "score": -0.23918932704388077, "first_commit": "2018-03-14 15:07:47", "latest_commit": "2023-06-19 09:34:01", "languages": [ @@ -13965,7 +14741,7 @@ "project_name": "pytorch_bert_japanese", "stargazers_count": 34, "source": "GitHub", - "score": -0.23891872170846495, + "score": -0.23918932704388077, "first_commit": "2019-06-05 21:42:27", "latest_commit": "2019-06-07 14:27:41", "languages": [ @@ -13979,7 +14755,7 @@ "project_name": "nayose-wikipedia-ja", "stargazers_count": 34, "source": "GitHub", - "score": -0.23891872170846495, + "score": -0.23918932704388077, "first_commit": "2020-03-09 09:16:39", "latest_commit": "2020-03-10 11:04:36", "languages": [ @@ -13993,7 +14769,7 @@ "project_name": "python-npylm", "stargazers_count": 33, "source": "GitHub", - "score": -0.24183682766935116, + "score": -0.24211236754160306, "first_commit": "2016-12-18 21:03:18", "latest_commit": "2019-01-30 16:35:14", "languages": [ @@ -14008,7 +14784,7 @@ "project_name": "EN-JP-ML-Lexicon", "stargazers_count": 33, "source": "GitHub", - "score": -0.24183682766935116, + "score": -0.24211236754160306, "first_commit": "2019-05-27 16:29:35", "latest_commit": "2021-03-13 09:19:56", "languages": [], @@ -14020,7 +14796,7 @@ "project_name": "albert-japanese", "stargazers_count": 33, "source": "GitHub", - "score": -0.24183682766935116, + "score": -0.24211236754160306, "first_commit": "2018-12-27 20:05:33", "latest_commit": "2021-10-28 19:57:23", "languages": [ @@ -14035,7 +14811,7 @@ "project_name": "pva-aoai-integration-solution", "stargazers_count": 33, "source": "GitHub", - "score": -0.24183682766935116, + "score": -0.24211236754160306, "first_commit": "2023-06-20 13:50:54", "latest_commit": "2023-08-14 18:03:34", "languages": [], @@ -14047,7 +14823,7 @@ "project_name": "unidic-lite", "stargazers_count": 33, "source": "GitHub", - "score": -0.24183682766935116, + "score": -0.24211236754160306, "first_commit": "2020-04-07 16:24:18", "latest_commit": "2020-09-01 22:50:07", "languages": [ @@ -14061,7 +14837,7 @@ "project_name": "UniDic2UD", "stargazers_count": 32, "source": "GitHub", - "score": -0.24475493363023734, + "score": -0.24503540803932536, "first_commit": "2019-08-27 00:45:01", "latest_commit": "2024-01-31 23:53:51", "languages": [ @@ -14076,7 +14852,7 @@ "project_name": "ja-vicuna-qa-benchmark", "stargazers_count": 32, "source": "GitHub", - "score": -0.24475493363023734, + "score": -0.24503540803932536, "first_commit": "2023-08-11 15:38:05", "latest_commit": "2024-06-11 16:24:06", "languages": [ @@ -14090,7 +14866,7 @@ "project_name": "text2text-japanese", "stargazers_count": 32, "source": "GitHub", - "score": -0.24475493363023734, + "score": -0.24503540803932536, "first_commit": "2021-02-11 12:28:53", "latest_commit": "2021-07-22 14:26:45", "languages": [ @@ -14104,7 +14880,7 @@ "project_name": "simple-jppdb", "stargazers_count": 32, "source": "GitHub", - "score": -0.24475493363023734, + "score": -0.24503540803932536, "first_commit": "2017-03-09 19:29:19", "latest_commit": "2017-03-13 00:01:48", "languages": [ @@ -14118,7 +14894,7 @@ "project_name": "whisper-asr-finetune", "stargazers_count": 31, "source": "GitHub", - "score": -0.24767303959112355, + "score": -0.24795844853704763, "first_commit": "2022-10-27 20:22:00", "latest_commit": "2022-12-04 21:29:47", "languages": [ @@ -14132,7 +14908,7 @@ "project_name": "moji4j", "stargazers_count": 31, "source": "GitHub", - "score": -0.24767303959112355, + "score": -0.24795844853704763, "first_commit": "2016-07-08 09:13:16", "latest_commit": "2022-06-24 09:00:44", "languages": [ @@ -14147,7 +14923,7 @@ "project_name": "jvs_hiho", "stargazers_count": 31, "source": "GitHub", - "score": -0.24767303959112355, + "score": -0.24795844853704763, "first_commit": "2020-02-02 01:24:43", "latest_commit": "2021-02-11 15:29:19", "languages": [], @@ -14159,7 +14935,7 @@ "project_name": "janome-tutorial", "stargazers_count": 31, "source": "GitHub", - "score": -0.24767303959112355, + "score": -0.24795844853704763, "first_commit": "2019-03-03 15:25:08", "latest_commit": "2019-03-03 16:46:00", "languages": [ @@ -14175,7 +14951,7 @@ "project_name": "marine", "stargazers_count": 30, "source": "GitHub", - "score": -0.25059114555200973, + "score": -0.2508814890347699, "first_commit": "2022-09-05 20:23:40", "latest_commit": "2022-09-20 10:26:24", "languages": [ @@ -14189,7 +14965,7 @@ "project_name": "ThreeLineSummaryDataset", "stargazers_count": 30, "source": "GitHub", - "score": -0.25059114555200973, + "score": -0.2508814890347699, "first_commit": "2018-01-25 02:27:22", "latest_commit": "2018-04-04 22:24:47", "languages": [], @@ -14201,7 +14977,7 @@ "project_name": "data_set", "stargazers_count": 30, "source": "GitHub", - "score": -0.25059114555200973, + "score": -0.2508814890347699, "first_commit": "2023-01-15 08:39:44", "latest_commit": "2024-05-14 17:37:05", "languages": [], @@ -14213,7 +14989,7 @@ "project_name": "rhoknp", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "2021-08-03 14:50:30", "latest_commit": "2024-07-16 09:26:45", "languages": [ @@ -14228,7 +15004,7 @@ "project_name": "knp", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "1999-03-12 09:09:08", "latest_commit": "2023-11-01 21:02:34", "languages": [ @@ -14248,7 +15024,7 @@ "project_name": "elasticsearch-analysis-japanese", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "2012-01-11 13:32:35", "latest_commit": "2012-03-06 23:36:46", "languages": [ @@ -14262,7 +15038,7 @@ "project_name": "nijisanji-ime-dic", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "2019-12-30 01:37:16", "latest_commit": "2024-08-14 12:35:13", "languages": [], @@ -14274,7 +15050,7 @@ "project_name": "anthy-unicode", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "2013-06-30 11:09:24", "latest_commit": "2024-05-02 14:12:10", "languages": [ @@ -14288,7 +15064,7 @@ "project_name": "BERT_Japanese_Google_Colaboratory", "stargazers_count": 29, "source": "GitHub", - "score": -0.25350925151289594, + "score": -0.2538045295324922, "first_commit": "2020-05-14 14:53:17", "latest_commit": "2022-01-25 11:58:44", "languages": [ @@ -14302,7 +15078,7 @@ "project_name": "bert", "stargazers_count": 28, "source": "GitHub", - "score": -0.25642735747378215, + "score": -0.2567275700302145, "first_commit": "2022-04-05 17:03:10", "latest_commit": "2022-04-05 17:03:55", "languages": [ @@ -14316,7 +15092,7 @@ "project_name": "UD_Japanese-GSD", "stargazers_count": 28, "source": "GitHub", - "score": -0.25642735747378215, + "score": -0.2567275700302145, "first_commit": "2016-10-12 10:33:42", "latest_commit": "2022-05-29 11:52:21", "languages": [ @@ -14330,7 +15106,7 @@ "project_name": "comet-atomic-ja", "stargazers_count": 28, "source": "GitHub", - "score": -0.25642735747378215, + "score": -0.2567275700302145, "first_commit": "2023-02-04 10:39:44", "latest_commit": "2024-03-08 22:31:37", "languages": [ @@ -14344,7 +15120,7 @@ "project_name": "transformer-copy", "stargazers_count": 27, "source": "GitHub", - "score": -0.25934546343466836, + "score": -0.2596506105279368, "first_commit": "2019-08-24 23:18:00", "latest_commit": "2020-09-25 23:12:54", "languages": [ @@ -14360,7 +15136,7 @@ "project_name": "crawdad", "stargazers_count": 27, "source": "GitHub", - "score": -0.25934546343466836, + "score": -0.2596506105279368, "first_commit": "2022-03-20 23:22:50", "latest_commit": "2023-02-20 22:23:22", "languages": [ @@ -14374,7 +15150,7 @@ "project_name": "jpreprocess", "stargazers_count": 27, "source": "GitHub", - "score": -0.25934546343466836, + "score": -0.2596506105279368, "first_commit": "2022-12-18 17:32:12", "latest_commit": "2024-08-14 09:22:07", "languages": [ @@ -14390,7 +15166,7 @@ "project_name": "Japanese_nlp_scripts", "stargazers_count": 26, "source": "GitHub", - "score": -0.26226356939555456, + "score": -0.2625736510256591, "first_commit": "2015-05-18 17:15:00", "latest_commit": "2019-06-30 18:33:13", "languages": [ @@ -14404,7 +15180,7 @@ "project_name": "yoin", "stargazers_count": 26, "source": "GitHub", - "score": -0.26226356939555456, + "score": -0.2625736510256591, "first_commit": "2017-01-15 23:54:52", "latest_commit": "2017-10-27 17:44:55", "languages": [ @@ -14418,7 +15194,7 @@ "project_name": "imas-ime-dic", "stargazers_count": 26, "source": "GitHub", - "score": -0.26226356939555456, + "score": -0.2625736510256591, "first_commit": "2018-04-25 02:07:26", "latest_commit": "2024-04-19 01:05:34", "languages": [ @@ -14432,7 +15208,7 @@ "project_name": "security_words", "stargazers_count": 26, "source": "GitHub", - "score": -0.26226356939555456, + "score": -0.2625736510256591, "first_commit": "2020-04-27 12:23:28", "latest_commit": "2023-08-18 10:02:08", "languages": [], @@ -14444,7 +15220,7 @@ "project_name": "yomikata", "stargazers_count": 25, "source": "GitHub", - "score": -0.26518167535644077, + "score": -0.2654966915233814, "first_commit": "2023-02-21 01:43:00", "latest_commit": "2023-10-03 09:10:45", "languages": [ @@ -14459,7 +15235,7 @@ "project_name": "nksnd", "stargazers_count": 25, "source": "GitHub", - "score": -0.26518167535644077, + "score": -0.2654966915233814, "first_commit": "2016-05-24 18:52:03", "latest_commit": "2018-05-17 08:54:24", "languages": [ @@ -14473,7 +15249,7 @@ "project_name": "tweetMapping", "stargazers_count": 25, "source": "GitHub", - "score": -0.26518167535644077, + "score": -0.2654966915233814, "first_commit": "2021-02-17 07:55:27", "latest_commit": "2023-12-08 21:42:44", "languages": [ @@ -14487,7 +15263,7 @@ "project_name": "ipadic-py", "stargazers_count": 25, "source": "GitHub", - "score": -0.26518167535644077, + "score": -0.2654966915233814, "first_commit": "2020-07-16 16:19:26", "latest_commit": "2021-10-31 04:47:19", "languages": [ @@ -14501,7 +15277,7 @@ "project_name": "JMRD", "stargazers_count": 25, "source": "GitHub", - "score": -0.26518167535644077, + "score": -0.2654966915233814, "first_commit": "2022-07-15 09:43:44", "latest_commit": "2022-07-19 10:02:29", "languages": [], @@ -14513,7 +15289,7 @@ "project_name": "vrchatbot", "stargazers_count": 24, "source": "GitHub", - "score": -0.268099781317327, + "score": -0.2684197320211037, "first_commit": "2022-12-10 22:48:39", "latest_commit": "2022-12-20 17:18:44", "languages": [ @@ -14527,7 +15303,7 @@ "project_name": "llm-jp-tokenizer", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2023-07-13 12:52:22", "latest_commit": "2024-07-05 13:56:54", "languages": [ @@ -14541,7 +15317,7 @@ "project_name": "python_asa", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2018-07-25 04:13:21", "latest_commit": "2020-01-14 17:09:11", "languages": [ @@ -14555,7 +15331,7 @@ "project_name": "hatsuon", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2018-04-06 12:06:12", "latest_commit": "2022-03-14 18:06:39", "languages": [ @@ -14569,7 +15345,7 @@ "project_name": "AISisterAIChan", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2023-03-30 18:40:44", "latest_commit": "2023-05-18 22:14:24", "languages": [], @@ -14581,7 +15357,7 @@ "project_name": "cl-skkserv", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2017-12-23 10:20:54", "latest_commit": "2024-02-11 10:42:23", "languages": [], @@ -14593,7 +15369,7 @@ "project_name": "asdc", "stargazers_count": 23, "source": "GitHub", - "score": -0.2710178872782132, + "score": -0.271342772518826, "first_commit": "2022-06-16 09:43:01", "latest_commit": "2023-08-09 12:03:48", "languages": [ @@ -14607,7 +15383,7 @@ "project_name": "rakutenma-python", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": "2015-01-02 06:52:27", "latest_commit": "2017-05-22 16:45:10", "languages": [ @@ -14621,7 +15397,7 @@ "project_name": "bertknp", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": "2021-02-10 09:20:17", "latest_commit": "2021-10-02 14:45:12", "languages": [ @@ -14635,7 +15411,7 @@ "project_name": "ILYS-aoba-chatbot", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": "2020-10-26 13:13:33", "latest_commit": "2021-10-01 22:55:52", "languages": [ @@ -14651,7 +15427,7 @@ "project_name": "RoBERTa-japanese", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": "2020-11-28 17:36:50", "latest_commit": "2021-11-13 10:37:23", "languages": [ @@ -14665,7 +15441,7 @@ "project_name": "jisyo", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": null, "latest_commit": null, "languages": [], @@ -14677,7 +15453,7 @@ "project_name": "instruction_ja", "stargazers_count": 22, "source": "GitHub", - "score": -0.27393599323909934, + "score": -0.2742658130165483, "first_commit": "2023-06-22 15:52:12", "latest_commit": "2023-07-13 16:02:15", "languages": [ @@ -14691,7 +15467,7 @@ "project_name": "sengiri", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2019-10-05 03:46:43", "latest_commit": "2022-08-10 20:45:00", "languages": [ @@ -14705,7 +15481,7 @@ "project_name": "rake-ja", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2018-10-11 19:07:50", "latest_commit": "2018-10-11 19:27:37", "languages": [ @@ -14719,7 +15495,7 @@ "project_name": "rime-jaroomaji", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2023-03-21 21:07:42", "latest_commit": "2024-08-15 11:40:00", "languages": [ @@ -14733,7 +15509,7 @@ "project_name": "TwitterCorpus", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2016-03-05 19:20:15", "latest_commit": "2016-03-14 19:55:35", "languages": [ @@ -14747,7 +15523,7 @@ "project_name": "do-not-answer-ja", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2023-09-09 08:21:26", "latest_commit": "2023-12-16 02:34:12", "languages": [ @@ -14762,7 +15538,7 @@ "project_name": "kantan-regex-book", "stargazers_count": 21, "source": "GitHub", - "score": -0.27685409919998555, + "score": -0.2771888535142706, "first_commit": "2024-01-28 10:37:12", "latest_commit": "2024-03-23 21:55:27", "languages": [ @@ -14777,7 +15553,7 @@ "project_name": "python-vaporetto", "stargazers_count": 20, "source": "GitHub", - "score": -0.27977220516087176, + "score": -0.2801118940119929, "first_commit": "2022-06-09 13:37:22", "latest_commit": "2023-09-05 22:15:48", "languages": [ @@ -14792,7 +15568,7 @@ "project_name": "darts-clone-python", "stargazers_count": 20, "source": "GitHub", - "score": -0.27977220516087176, + "score": -0.2801118940119929, "first_commit": "2018-11-17 00:57:52", "latest_commit": "2022-04-05 21:28:21", "languages": [ @@ -14806,7 +15582,7 @@ "project_name": "text-generation", "stargazers_count": 20, "source": "GitHub", - "score": -0.27977220516087176, + "score": -0.2801118940119929, "first_commit": "2022-07-17 11:55:29", "latest_commit": "2022-07-24 13:48:32", "languages": [ @@ -14821,7 +15597,7 @@ "project_name": "keigo_transfer_task", "stargazers_count": 20, "source": "GitHub", - "score": -0.27977220516087176, + "score": -0.2801118940119929, "first_commit": "2022-06-14 23:06:57", "latest_commit": "2022-11-24 13:01:06", "languages": [], @@ -14833,7 +15609,7 @@ "project_name": "JMMLU", "stargazers_count": 20, "source": "GitHub", - "score": -0.27977220516087176, + "score": -0.2801118940119929, "first_commit": "2024-01-09 16:43:45", "latest_commit": "2024-02-27 14:19:14", "languages": [], @@ -14845,7 +15621,7 @@ "project_name": "jntajis-python", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2021-08-20 17:32:20", "latest_commit": "2023-06-16 22:43:41", "languages": [ @@ -14860,7 +15636,7 @@ "project_name": "pygeonlp", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2021-06-26 17:45:22", "latest_commit": "2024-07-24 01:39:27", "languages": [ @@ -14875,7 +15651,7 @@ "project_name": "unicode-jp-rs", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2016-05-21 16:39:49", "latest_commit": "2020-04-11 12:01:21", "languages": [ @@ -14889,7 +15665,7 @@ "project_name": "kyujitai.js", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2014-09-06 17:03:02", "latest_commit": "2020-08-30 23:28:58", "languages": [ @@ -14903,7 +15679,7 @@ "project_name": "go-moji", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2018-01-15 23:57:06", "latest_commit": "2019-04-17 10:29:44", "languages": [ @@ -14917,7 +15693,7 @@ "project_name": "huriganacorpus-ndlbib", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2021-09-01 13:36:53", "latest_commit": "2021-09-21 14:20:03", "languages": [], @@ -14929,7 +15705,7 @@ "project_name": "japanese-toxic-dataset", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2023-01-10 17:19:58", "latest_commit": "2023-01-11 13:55:08", "languages": [], @@ -14941,7 +15717,7 @@ "project_name": "camera", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2023-02-22 10:33:39", "latest_commit": "2024-08-13 09:20:33", "languages": [], @@ -14953,7 +15729,7 @@ "project_name": "Japanese-Fakenews-Dataset", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2021-05-02 15:40:10", "latest_commit": "2021-05-02 15:40:10", "languages": [], @@ -14965,7 +15741,7 @@ "project_name": "aozorasearch", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2016-10-15 17:22:45", "latest_commit": "2020-09-04 14:28:15", "languages": [ @@ -14980,7 +15756,7 @@ "project_name": "jqara", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2024-03-02 09:09:08", "latest_commit": "2024-08-10 11:54:50", "languages": [ @@ -14994,7 +15770,7 @@ "project_name": "jacwir", "stargazers_count": 19, "source": "GitHub", - "score": -0.28269031112175796, + "score": -0.2830349345097152, "first_commit": "2024-03-19 15:31:13", "latest_commit": "2024-07-24 11:09:03", "languages": [ @@ -15008,7 +15784,7 @@ "project_name": "japanese-numbers-python", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2016-07-29 09:20:05", "latest_commit": "2020-04-04 10:36:27", "languages": [ @@ -15022,7 +15798,7 @@ "project_name": "Grongish", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2012-02-10 22:28:59", "latest_commit": "2022-04-21 21:31:37", "languages": [ @@ -15037,7 +15813,7 @@ "project_name": "termextract", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2018-09-26 22:20:04", "latest_commit": "2018-09-26 23:01:36", "languages": [ @@ -15051,7 +15827,7 @@ "project_name": "minimal-search-engine", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2019-06-25 01:58:26", "latest_commit": "2019-07-06 01:26:57", "languages": [ @@ -15065,7 +15841,7 @@ "project_name": "mozcdic-ut-jawiki", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2023-01-15 17:43:20", "latest_commit": "2024-07-29 01:27:13", "languages": [ @@ -15079,7 +15855,7 @@ "project_name": "analyze-desumasu-dearu", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2015-10-10 01:42:33", "latest_commit": "2021-12-10 12:01:50", "languages": [ @@ -15093,7 +15869,7 @@ "project_name": "JaNLI", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2021-11-02 20:26:27", "latest_commit": "2023-05-31 17:50:04", "languages": [ @@ -15107,7 +15883,7 @@ "project_name": "technological-book-corpus-ja", "stargazers_count": 18, "source": "GitHub", - "score": -0.2856084170826442, + "score": -0.2859579750074374, "first_commit": "2017-03-26 13:28:13", "latest_commit": "2023-07-12 03:26:27", "languages": [ @@ -15121,7 +15897,7 @@ "project_name": "rinna_gpt-neox_ggml-lora", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2023-05-24 03:21:40", "latest_commit": "2023-05-25 05:38:04", "languages": [ @@ -15135,7 +15911,7 @@ "project_name": "chainer_nic", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2018-07-02 19:57:17", "latest_commit": "2018-12-14 17:26:49", "languages": [ @@ -15149,7 +15925,7 @@ "project_name": "yomigana-ebook", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2023-04-25 18:21:37", "latest_commit": "2024-02-21 01:18:17", "languages": [ @@ -15164,7 +15940,7 @@ "project_name": "jmteb", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2024-03-15 10:28:52", "latest_commit": "2024-06-20 20:58:44", "languages": [ @@ -15178,7 +15954,7 @@ "project_name": "AnnotatedFKCCorpus", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2021-01-18 19:32:38", "latest_commit": "2023-12-18 14:27:29", "languages": [ @@ -15192,7 +15968,7 @@ "project_name": "ebe-dataset", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2020-10-14 08:15:37", "latest_commit": "2020-12-17 13:39:55", "languages": [], @@ -15204,7 +15980,7 @@ "project_name": "loanwords_gairaigo", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2019-10-21 16:49:45", "latest_commit": "2021-01-08 12:40:02", "languages": [ @@ -15218,7 +15994,7 @@ "project_name": "hurigana-speech-corpus-aozora", "stargazers_count": 17, "source": "GitHub", - "score": -0.2885265230435304, + "score": -0.2888810155051597, "first_commit": "2024-01-16 11:30:12", "latest_commit": "2024-01-31 16:04:24", "languages": [], @@ -15230,7 +16006,7 @@ "project_name": "JASS", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2020-02-19 11:19:42", "latest_commit": "2022-01-25 15:24:53", "languages": [], @@ -15242,7 +16018,7 @@ "project_name": "ginza-transformers", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2021-06-23 17:42:12", "latest_commit": "2022-08-09 18:19:33", "languages": [ @@ -15256,7 +16032,7 @@ "project_name": "MedNER-J", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2020-07-28 18:27:41", "latest_commit": "2022-05-17 20:01:05", "languages": [ @@ -15270,7 +16046,7 @@ "project_name": "AIO2_DPR_baseline", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2021-09-13 16:33:09", "latest_commit": "2022-01-08 23:15:11", "languages": [ @@ -15284,7 +16060,7 @@ "project_name": "unsupervised-pos-tagging", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2017-01-07 09:24:36", "latest_commit": "2017-10-11 23:23:04", "languages": [ @@ -15299,7 +16075,7 @@ "project_name": "furigana4epub", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2021-08-29 20:14:15", "latest_commit": "2021-09-11 14:03:11", "languages": [ @@ -15313,7 +16089,7 @@ "project_name": "Laboro-DistilBERT-Japanese", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2020-11-30 16:10:16", "latest_commit": "2020-12-17 15:26:01", "languages": [ @@ -15328,7 +16104,7 @@ "project_name": "character_chat", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2023-03-19 03:36:06", "latest_commit": "2023-06-03 23:30:23", "languages": [ @@ -15342,7 +16118,7 @@ "project_name": "TEDxJP-10K", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2021-01-14 13:14:28", "latest_commit": "2021-01-14 15:33:14", "languages": [ @@ -15356,7 +16132,7 @@ "project_name": "japanese-family-names", "stargazers_count": 16, "source": "GitHub", - "score": -0.2914446290044166, + "score": -0.291804056002882, "first_commit": "2017-06-08 18:00:02", "latest_commit": "2017-06-09 01:50:19", "languages": [ @@ -15370,7 +16146,7 @@ "project_name": "SuPar-UniDic", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2021-02-21 09:42:27", "latest_commit": "2024-06-28 15:56:24", "languages": [ @@ -15385,7 +16161,7 @@ "project_name": "furiganapad", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2019-05-14 02:56:41", "latest_commit": "2024-05-23 04:55:05", "languages": [ @@ -15399,7 +16175,7 @@ "project_name": "japanese_llm_simple_webui", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2023-06-04 23:06:13", "latest_commit": "2024-05-12 22:10:56", "languages": [ @@ -15413,7 +16189,7 @@ "project_name": "gpt4-autoeval", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2023-12-18 13:38:36", "latest_commit": "2024-06-06 13:50:35", "languages": [ @@ -15429,7 +16205,7 @@ "project_name": "llm-translator", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2023-12-31 00:47:57", "latest_commit": "2023-12-31 00:53:57", "languages": [ @@ -15443,7 +16219,7 @@ "project_name": "kana2ipa", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2019-12-23 00:07:17", "latest_commit": "2020-10-25 04:42:14", "languages": [ @@ -15457,7 +16233,7 @@ "project_name": "hololive-dictionary", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2021-06-13 09:49:48", "latest_commit": "2024-05-02 16:25:29", "languages": [ @@ -15471,7 +16247,7 @@ "project_name": "pixiv-yomitan", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2024-02-09 19:59:45", "latest_commit": "2024-07-23 22:00:16", "languages": [ @@ -15485,7 +16261,7 @@ "project_name": "Laboro-ParaCorpus", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2021-09-29 18:37:50", "latest_commit": "2021-11-09 10:18:54", "languages": [ @@ -15500,7 +16276,7 @@ "project_name": "ginza-examples", "stargazers_count": 15, "source": "GitHub", - "score": -0.2943627349653028, + "score": -0.2947270965006043, "first_commit": "2020-08-25 02:24:01", "latest_commit": "2021-01-29 00:04:15", "languages": [ @@ -15514,7 +16290,7 @@ "project_name": "japanese-sentence-breaker", "stargazers_count": 14, "source": "GitHub", - "score": -0.29728084092618895, + "score": -0.2976501369983266, "first_commit": "2021-02-28 21:40:27", "latest_commit": "2021-02-28 22:47:20", "languages": [ @@ -15528,7 +16304,7 @@ "project_name": "jsc", "stargazers_count": 14, "source": "GitHub", - "score": -0.29728084092618895, + "score": -0.2976501369983266, "first_commit": "2012-08-23 16:39:41", "latest_commit": "2012-12-19 18:36:09", "languages": [ @@ -15542,7 +16318,7 @@ "project_name": "aMLP-japanese", "stargazers_count": 14, "source": "GitHub", - "score": -0.29728084092618895, + "score": -0.2976501369983266, "first_commit": "2021-11-13 16:17:10", "latest_commit": "2022-05-10 14:16:55", "languages": [ @@ -15556,7 +16332,7 @@ "project_name": "openai-chatfriend", "stargazers_count": 14, "source": "GitHub", - "score": -0.29728084092618895, + "score": -0.2976501369983266, "first_commit": "2023-01-17 15:19:27", "latest_commit": "2023-04-03 10:19:35", "languages": [ @@ -15565,13 +16341,27 @@ ], "model_or_dataset": null }, + { + "description": "Mozc UT Place Name Dictionary is a dictionary converted from the Japan Post's ZIP code data for Mozc.", + "url": "https://github.com/utuhiro78/mozcdic-ut-place-names", + "project_name": "mozcdic-ut-place-names", + "stargazers_count": 14, + "source": "GitHub", + "score": -0.2976501369983266, + "first_commit": "2023-01-15 18:01:48", + "latest_commit": "2024-08-07 03:05:50", + "languages": [ + "Python" + ], + "model_or_dataset": "dataset" + }, { "description": "OpenAssistant のオープンソースデータ OASST1 を日本語に翻訳したデータセットになります。", "url": "https://github.com/kunishou/oasst1-89k-ja", "project_name": "oasst1-89k-ja", "stargazers_count": 14, "source": "GitHub", - "score": -0.29728084092618895, + "score": -0.2976501369983266, "first_commit": "2023-05-07 05:27:23", "latest_commit": "2023-11-20 00:23:10", "languages": [ @@ -15585,7 +16375,7 @@ "project_name": "dango", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2021-06-20 13:18:31", "latest_commit": "2021-11-21 19:41:04", "languages": [ @@ -15599,7 +16389,7 @@ "project_name": "PheMT", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2020-10-27 17:05:01", "latest_commit": "2021-02-18 14:05:26", "languages": [ @@ -15613,7 +16403,7 @@ "project_name": "entitypedia", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2017-11-08 13:54:21", "latest_commit": "2018-12-10 12:53:58", "languages": [ @@ -15629,7 +16419,7 @@ "project_name": "gector-ja", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2021-05-11 04:51:42", "latest_commit": "2021-07-16 18:55:03", "languages": [ @@ -15643,7 +16433,7 @@ "project_name": "negima", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2018-06-27 01:49:31", "latest_commit": "2018-08-20 12:31:43", "languages": [ @@ -15657,7 +16447,7 @@ "project_name": "clip-japanese", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2022-02-24 20:42:33", "latest_commit": "2023-03-12 18:43:17", "languages": [ @@ -15665,13 +16455,28 @@ ], "model_or_dataset": null }, + { + "description": "Japanese input method for fcitx5, powered by azooKey engine", + "url": "https://github.com/7ka-hiira/fcitx5-hazkey", + "project_name": "fcitx5-hazkey", + "stargazers_count": 13, + "source": "GitHub", + "score": -0.3005731774960489, + "first_commit": "2024-05-21 22:15:24", + "latest_commit": "2024-08-01 12:39:04", + "languages": [ + "C++", + "Swift" + ], + "model_or_dataset": "dataset" + }, { "description": "デジタル化資料から作成したOCRテキストデータのngram頻度統計情報のデータセット", "url": "https://github.com/ndl-lab/ndlngramdata", "project_name": "ndlngramdata", "stargazers_count": 13, "source": "GitHub", - "score": -0.30019894688707516, + "score": -0.3005731774960489, "first_commit": "2022-12-06 10:20:12", "latest_commit": "2023-01-10 10:38:37", "languages": [], @@ -15683,7 +16488,7 @@ "project_name": "alphabet2kana", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2021-03-21 12:29:41", "latest_commit": "2024-08-10 11:10:20", "languages": [ @@ -15697,7 +16502,7 @@ "project_name": "allennlp-shiba-model", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2021-06-26 18:37:25", "latest_commit": "2021-06-27 00:42:45", "languages": [ @@ -15711,7 +16516,7 @@ "project_name": "NTM", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2019-07-24 20:14:25", "latest_commit": "2019-07-24 22:26:58", "languages": [ @@ -15725,7 +16530,7 @@ "project_name": "ClipCap-for-Japanese", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2022-10-05 00:44:54", "latest_commit": "2022-10-05 02:08:58", "languages": [ @@ -15739,7 +16544,7 @@ "project_name": "easynovelassistant", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2024-04-22 15:59:35", "latest_commit": "2024-07-05 17:29:36", "languages": [ @@ -15753,7 +16558,7 @@ "project_name": "deep-question-generation", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2021-03-07 17:01:23", "latest_commit": "2023-03-12 18:47:52", "languages": [ @@ -15767,7 +16572,7 @@ "project_name": "kanaria", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2019-01-07 22:16:32", "latest_commit": "2024-06-28 09:48:18", "languages": [ @@ -15783,7 +16588,7 @@ "project_name": "medbertjp", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2020-10-23 03:07:15", "latest_commit": "2020-11-22 08:37:27", "languages": [ @@ -15798,7 +16603,7 @@ "project_name": "albert-japanese-tinysegmenter", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2023-06-04 15:21:32", "latest_commit": "2023-09-26 23:07:55", "languages": [ @@ -15812,7 +16617,7 @@ "project_name": "easylightchatassistant", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2024-04-06 17:40:16", "latest_commit": "2024-04-24 00:49:12", "languages": [], @@ -15824,7 +16629,7 @@ "project_name": "Ayashiy-Nipongo-Dic", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2021-12-14 11:50:25", "latest_commit": "2024-05-01 15:23:24", "languages": [], @@ -15836,7 +16641,7 @@ "project_name": "Web-Crawled-Corpus-for-Japanese-Chinese-NMT", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2021-11-12 23:19:26", "latest_commit": "2023-10-01 03:13:18", "languages": [], @@ -15848,7 +16653,7 @@ "project_name": "CourseraParallelCorpusMining", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2020-01-09 18:09:50", "latest_commit": "2022-06-14 14:18:29", "languages": [ @@ -15863,7 +16668,7 @@ "project_name": "honkoku-data", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2020-02-23 16:47:55", "latest_commit": "2024-06-14 17:43:19", "languages": [], @@ -15875,7 +16680,7 @@ "project_name": "commonsense-moral-ja", "stargazers_count": 12, "source": "GitHub", - "score": -0.30311705284796137, + "score": -0.3034962179937712, "first_commit": "2022-12-29 13:18:05", "latest_commit": "2023-12-18 11:41:00", "languages": [ @@ -15889,7 +16694,7 @@ "project_name": "text-cleaning", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2020-02-10 15:31:27", "latest_commit": "2022-11-21 10:21:56", "languages": [ @@ -15903,7 +16708,7 @@ "project_name": "VISA", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2022-03-25 17:15:42", "latest_commit": "2022-10-17 10:34:12", "languages": [], @@ -15915,7 +16720,7 @@ "project_name": "bert-japanese-ner-finetuning", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2021-11-14 22:57:16", "latest_commit": "2022-06-19 16:24:15", "languages": [ @@ -15930,7 +16735,7 @@ "project_name": "evaluate_japanese_w2v", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2020-02-07 08:02:49", "latest_commit": "2023-11-03 21:09:04", "languages": [ @@ -15944,7 +16749,7 @@ "project_name": "prefix-tuning-gpt", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2022-09-05 16:56:23", "latest_commit": "2023-03-22 14:13:22", "languages": [ @@ -15958,7 +16763,7 @@ "project_name": "asa-python", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2017-09-13 22:02:47", "latest_commit": "2019-02-16 23:52:13", "languages": [ @@ -15972,7 +16777,7 @@ "project_name": "jel", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2021-05-01 15:53:49", "latest_commit": "2021-07-25 13:01:46", "languages": [ @@ -15986,7 +16791,7 @@ "project_name": "python-npycrf", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2017-07-01 18:00:52", "latest_commit": "2018-03-23 00:04:16", "languages": [ @@ -16001,7 +16806,7 @@ "project_name": "aozora_classification", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2017-03-05 12:30:50", "latest_commit": "2017-09-03 12:01:40", "languages": [ @@ -16016,7 +16821,7 @@ "project_name": "ja-law-parser", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2023-11-26 19:38:36", "latest_commit": "2024-01-14 19:59:45", "languages": [ @@ -16030,7 +16835,7 @@ "project_name": "jawiki_word_vector_updater", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2019-02-13 16:18:58", "latest_commit": "2020-05-07 02:25:19", "languages": [], @@ -16042,7 +16847,7 @@ "project_name": "sbert-ja", "stargazers_count": 11, "source": "GitHub", - "score": -0.3060351588088476, + "score": -0.3064192584914935, "first_commit": "2021-07-31 01:11:17", "latest_commit": "2021-08-08 15:47:30", "languages": [ @@ -16056,7 +16861,7 @@ "project_name": "kyoto-reader", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2019-11-15 20:46:54", "latest_commit": "2024-06-26 12:45:20", "languages": [ @@ -16070,7 +16875,7 @@ "project_name": "zunda-python", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2019-02-24 01:08:40", "latest_commit": "2019-11-30 18:44:15", "languages": [ @@ -16084,7 +16889,7 @@ "project_name": "wikipedia-passages-jawiki-embeddings-utils", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2023-11-14 11:34:23", "latest_commit": "2024-03-29 08:18:44", "languages": [ @@ -16099,7 +16904,7 @@ "project_name": "jisho", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": null, "latest_commit": null, "languages": [], @@ -16111,7 +16916,7 @@ "project_name": "j-liwc2015", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2021-09-04 09:11:35", "latest_commit": "2022-11-15 15:37:27", "languages": [], @@ -16123,7 +16928,7 @@ "project_name": "huriganacorpus-aozora", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2021-08-31 16:37:20", "latest_commit": "2024-01-17 18:05:54", "languages": [], @@ -16135,7 +16940,7 @@ "project_name": "python-nlp-book", "stargazers_count": 10, "source": "GitHub", - "score": -0.3089532647697338, + "score": -0.3093422989892158, "first_commit": "2023-01-28 18:34:33", "latest_commit": "2023-05-07 23:55:27", "languages": [ @@ -16143,13 +16948,27 @@ ], "model_or_dataset": null }, + { + "description": "Easy wrapper for the postal code data of Japan", + "url": "https://github.com/nagataaaas/Jusho", + "project_name": "Jusho", + "stargazers_count": 9, + "source": "GitHub", + "score": -0.3122653394869381, + "first_commit": "2020-11-12 03:39:08", + "latest_commit": "2024-06-04 16:03:08", + "languages": [ + "Python" + ], + "model_or_dataset": null + }, { "description": "Comparison of Japanese Sentence Segmentation Tools", "url": "https://github.com/hkiyomaru/ja-senter-benchmark", "project_name": "ja-senter-benchmark", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2023-02-10 12:58:42", "latest_commit": "2023-02-27 17:58:52", "languages": [ @@ -16163,7 +16982,7 @@ "project_name": "japanese_text_classification", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2019-07-01 18:33:17", "latest_commit": "2020-01-15 19:12:53", "languages": [ @@ -16178,7 +16997,7 @@ "project_name": "pyknp-eventgraph", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2019-11-21 21:46:46", "latest_commit": "2022-09-26 12:21:44", "languages": [ @@ -16192,7 +17011,7 @@ "project_name": "japanese_summarizer", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2020-08-18 20:44:35", "latest_commit": "2022-08-01 20:28:21", "languages": [ @@ -16206,7 +17025,7 @@ "project_name": "WordCloud-Japanese", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2019-05-11 13:59:53", "latest_commit": "2020-01-02 06:45:49", "languages": [ @@ -16220,7 +17039,7 @@ "project_name": "snark", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2018-12-22 18:37:03", "latest_commit": "2020-03-11 22:01:51", "languages": [ @@ -16234,7 +17053,7 @@ "project_name": "PyKatsuyou", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2022-01-01 01:25:25", "latest_commit": "2023-09-04 17:26:36", "languages": [ @@ -16248,7 +17067,7 @@ "project_name": "JaSPICE", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2023-03-01 19:14:54", "latest_commit": "2023-11-08 21:16:53", "languages": [ @@ -16262,7 +17081,7 @@ "project_name": "bertjsc", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2023-03-16 11:05:59", "latest_commit": "2024-08-03 12:15:17", "languages": [ @@ -16277,7 +17096,7 @@ "project_name": "awabi", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2020-03-13 08:08:26", "latest_commit": "2024-07-01 16:35:36", "languages": [ @@ -16291,7 +17110,7 @@ "project_name": "dependency-based-japanese-word-embeddings", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2019-07-31 15:42:14", "latest_commit": "2019-08-14 11:39:35", "languages": [], @@ -16303,7 +17122,7 @@ "project_name": "AMI-Meeting-Parallel-Corpus", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2020-12-11 14:22:19", "latest_commit": "2020-12-11 16:41:42", "languages": [], @@ -16315,7 +17134,7 @@ "project_name": "huggingface-datasets_JGLUE", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2023-02-25 13:33:13", "latest_commit": "2024-05-21 11:23:51", "languages": [ @@ -16329,7 +17148,7 @@ "project_name": "meconaudio", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2023-04-12 13:43:26", "latest_commit": "2023-10-26 08:40:27", "languages": [], @@ -16341,7 +17160,7 @@ "project_name": "GEC-Info-ja", "stargazers_count": 9, "source": "GitHub", - "score": -0.31187137073062, + "score": -0.3122653394869381, "first_commit": "2022-07-02 01:07:27", "latest_commit": "2024-03-27 01:15:21", "languages": [], @@ -16353,7 +17172,7 @@ "project_name": "jagger-python", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2023-12-27 22:09:07", "latest_commit": "2024-03-12 02:15:59", "languages": [ @@ -16367,7 +17186,7 @@ "project_name": "text2phoneme", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2023-04-25 22:53:08", "latest_commit": "2023-05-17 00:44:01", "languages": [ @@ -16381,7 +17200,7 @@ "project_name": "DNorm-J", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2020-05-07 13:47:43", "latest_commit": "2022-06-30 12:09:11", "languages": [ @@ -16395,7 +17214,7 @@ "project_name": "JaMIE", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2019-10-09 11:44:21", "latest_commit": "2023-05-18 05:19:18", "languages": [ @@ -16409,7 +17228,7 @@ "project_name": "BLIP2-Japanese", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2023-05-31 22:24:24", "latest_commit": "2024-01-16 08:54:50", "languages": [ @@ -16424,7 +17243,7 @@ "project_name": "shisa-v2", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2024-03-29 15:12:36", "latest_commit": "2024-07-30 00:26:10", "languages": [ @@ -16438,7 +17257,7 @@ "project_name": "mecab-ipadic-seed", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2016-07-18 01:26:14", "latest_commit": "2016-07-30 19:09:57", "languages": [ @@ -16452,7 +17271,7 @@ "project_name": "AcademicRoBERTa", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2022-09-05 15:58:57", "latest_commit": "2023-05-16 13:48:22", "languages": [ @@ -16466,7 +17285,7 @@ "project_name": "mecab-mozcdic", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2017-07-21 20:37:42", "latest_commit": "2018-01-12 16:07:57", "languages": [], @@ -16478,7 +17297,7 @@ "project_name": "anthy", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2010-05-14 14:19:43", "latest_commit": "2023-02-25 21:37:47", "languages": [ @@ -16493,7 +17312,7 @@ "project_name": "jcms", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2022-11-07 15:44:38", "latest_commit": "2022-11-07 16:40:03", "languages": [ @@ -16507,7 +17326,7 @@ "project_name": "ndl-minhon-ocrdataset", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2023-01-19 15:22:00", "latest_commit": "2024-02-07 14:16:11", "languages": [ @@ -16521,7 +17340,7 @@ "project_name": "WLSP-familiarity", "stargazers_count": 8, "source": "GitHub", - "score": -0.3147894766915062, + "score": -0.31518837998466037, "first_commit": "2019-03-13 10:26:42", "latest_commit": "2024-06-30 15:05:18", "languages": [], @@ -16533,7 +17352,7 @@ "project_name": "jamorasep", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2023-03-08 22:54:34", "latest_commit": "2023-09-09 01:14:14", "languages": [ @@ -16547,7 +17366,7 @@ "project_name": "text_recognition", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2022-03-30 13:32:26", "latest_commit": "2023-07-10 18:03:39", "languages": [ @@ -16561,7 +17380,7 @@ "project_name": "kantan", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2015-07-15 12:45:17", "latest_commit": "2022-07-20 20:10:04", "languages": [ @@ -16575,7 +17394,7 @@ "project_name": "aozora-corpus-generator", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2017-10-09 16:11:26", "latest_commit": "2022-04-04 13:15:59", "languages": [ @@ -16589,7 +17408,7 @@ "project_name": "AugLy-jp", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2021-06-13 18:48:45", "latest_commit": "2021-09-30 13:16:24", "languages": [ @@ -16604,7 +17423,7 @@ "project_name": "julius4seg", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2018-02-20 23:19:39", "latest_commit": "2021-08-22 18:57:51", "languages": [ @@ -16618,7 +17437,7 @@ "project_name": "sentiment_ja_js", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2021-12-08 22:48:44", "latest_commit": "2021-12-10 00:56:29", "languages": [ @@ -16632,7 +17451,7 @@ "project_name": "manbyo-sudachi", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2021-04-05 15:59:36", "latest_commit": "2021-04-06 18:04:00", "languages": [ @@ -16646,7 +17465,7 @@ "project_name": "aws_dic_for_google_ime", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2019-11-24 03:47:55", "latest_commit": "2019-11-30 09:28:33", "languages": [ @@ -16661,7 +17480,7 @@ "project_name": "hirakanadic", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2021-06-22 21:38:13", "latest_commit": "2023-07-08 17:22:15", "languages": [ @@ -16675,7 +17494,7 @@ "project_name": "GazeVQA", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2024-02-22 06:53:00", "latest_commit": "2024-05-20 11:17:25", "languages": [], @@ -16687,7 +17506,7 @@ "project_name": "JapaneseNLI", "stargazers_count": 7, "source": "GitHub", - "score": -0.3177075826523924, + "score": -0.31811142048238267, "first_commit": "2020-03-10 22:43:24", "latest_commit": "2021-06-08 23:48:55", "languages": [ @@ -16701,7 +17520,7 @@ "project_name": "cabocha", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2011-07-29 04:08:14", "latest_commit": "2022-08-17 22:24:13", "languages": [ @@ -16720,7 +17539,7 @@ "project_name": "noyaki", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2021-05-23 18:41:17", "latest_commit": "2022-08-25 18:42:55", "languages": [ @@ -16734,7 +17553,7 @@ "project_name": "bert-ner-japanese", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2022-09-26 18:20:34", "latest_commit": "2022-09-26 21:44:38", "languages": [ @@ -16748,7 +17567,7 @@ "project_name": "jmlm_scoring", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2021-08-13 09:18:33", "latest_commit": "2022-02-20 22:39:25", "languages": [ @@ -16762,7 +17581,7 @@ "project_name": "swallow-evaluation", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2023-10-30 14:34:19", "latest_commit": "2024-07-13 21:29:54", "languages": [ @@ -16778,7 +17597,7 @@ "project_name": "showcase", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2018-06-26 13:19:53", "latest_commit": "2018-06-26 16:53:20", "languages": [ @@ -16792,7 +17611,7 @@ "project_name": "desuwa", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2021-04-20 16:37:43", "latest_commit": "2022-05-23 12:27:37", "languages": [ @@ -16806,7 +17625,7 @@ "project_name": "chirptext", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2014-12-01 20:39:06", "latest_commit": "2022-10-04 21:57:00", "languages": [ @@ -16820,7 +17639,7 @@ "project_name": "mixture-of-unigram-model", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2016-01-05 15:33:24", "latest_commit": "2017-06-16 13:33:57", "languages": [ @@ -16834,7 +17653,7 @@ "project_name": "mlm-scoring-transformers", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2022-10-03 14:27:09", "latest_commit": "2022-12-14 20:07:24", "languages": [ @@ -16848,7 +17667,7 @@ "project_name": "tra-fugu", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2023-02-28 21:41:49", "latest_commit": "2023-03-02 03:10:38", "languages": [ @@ -16862,7 +17681,7 @@ "project_name": "wikipedia-japanese-open-rag", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2023-12-28 07:55:40", "latest_commit": "2024-01-06 19:46:00", "languages": [ @@ -16876,7 +17695,7 @@ "project_name": "pydomino", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2024-05-27 10:52:07", "latest_commit": "2024-07-23 11:16:10", "languages": [ @@ -16892,7 +17711,7 @@ "project_name": "magpie-nemotron", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2024-07-05 21:07:54", "latest_commit": "2024-07-05 23:35:16", "languages": [ @@ -16906,7 +17725,7 @@ "project_name": "kana", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2022-06-28 22:39:35", "latest_commit": "2023-02-10 19:03:27", "languages": [], @@ -16918,7 +17737,7 @@ "project_name": "niinii", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2021-06-27 18:31:51", "latest_commit": "2024-08-11 11:54:57", "languages": [ @@ -16932,7 +17751,7 @@ "project_name": "AITuberDegikkoMirii", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2023-03-07 21:35:01", "latest_commit": "2023-03-18 06:59:43", "languages": [ @@ -16946,7 +17765,7 @@ "project_name": "Winograd-Schema-Challenge-Ja", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2019-01-25 19:14:29", "latest_commit": "2019-01-25 23:52:18", "languages": [ @@ -16960,7 +17779,7 @@ "project_name": "GeneralPolicySpeechOfPrimeMinisterOfJapan", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2014-10-28 00:56:59", "latest_commit": "2020-01-14 09:53:30", "languages": [], @@ -16972,7 +17791,7 @@ "project_name": "dcsg-ja", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2023-01-04 17:36:44", "latest_commit": "2023-03-10 17:29:26", "languages": [], @@ -16984,7 +17803,7 @@ "project_name": "ramendb", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2018-03-24 21:21:59", "latest_commit": "2024-08-06 10:47:04", "languages": [ @@ -16998,7 +17817,7 @@ "project_name": "J-CRe3", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2024-03-04 01:40:27", "latest_commit": "2024-06-14 18:24:05", "languages": [], @@ -17010,7 +17829,7 @@ "project_name": "pdmocrdataset-part2", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2022-05-24 10:37:30", "latest_commit": "2024-06-26 16:10:28", "languages": [], @@ -17022,7 +17841,7 @@ "project_name": "nlpbook", "stargazers_count": 6, "source": "GitHub", - "score": -0.32062568861327856, + "score": -0.32103446098010496, "first_commit": "2024-05-16 08:44:03", "latest_commit": "2024-07-03 14:25:21", "languages": [], @@ -17034,7 +17853,7 @@ "project_name": "jptranstokenizer", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2022-08-24 19:35:03", "latest_commit": "2024-02-03 03:07:59", "languages": [ @@ -17048,7 +17867,7 @@ "project_name": "compare-ja-tokenizer", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2023-06-13 18:08:18", "latest_commit": "2023-06-16 10:18:55", "languages": [ @@ -17062,7 +17881,7 @@ "project_name": "jawiki-cleaner", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2021-02-21 20:04:59", "latest_commit": "2021-02-21 21:41:44", "languages": [ @@ -17076,7 +17895,7 @@ "project_name": "hidden-markov-model", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2016-10-17 14:33:56", "latest_commit": "2017-06-16 13:33:24", "languages": [ @@ -17090,7 +17909,7 @@ "project_name": "Ngram-language-model", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2017-12-05 14:45:11", "latest_commit": "2017-12-05 15:03:52", "languages": [ @@ -17104,7 +17923,7 @@ "project_name": "akaza", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": null, "latest_commit": null, "languages": [], @@ -17116,7 +17935,7 @@ "project_name": "elmo-japanese", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2019-10-01 12:16:29", "latest_commit": "2019-10-07 10:37:31", "languages": [ @@ -17130,7 +17949,7 @@ "project_name": "japagen", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2024-01-12 10:53:54", "latest_commit": "2024-08-09 17:41:21", "languages": [], @@ -17142,7 +17961,7 @@ "project_name": "giant_ja-en_parallel_corpus", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2019-08-04 12:01:19", "latest_commit": "2019-08-04 17:40:02", "languages": [ @@ -17156,7 +17975,7 @@ "project_name": "BPersona-chat", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2022-10-10 14:15:10", "latest_commit": "2023-01-12 17:39:24", "languages": [], @@ -17168,7 +17987,7 @@ "project_name": "ita-corpus-chuwa", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2021-07-16 21:19:53", "latest_commit": "2021-08-25 12:22:06", "languages": [ @@ -17182,7 +18001,7 @@ "project_name": "ProSub", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2021-09-12 18:55:13", "latest_commit": "2024-06-02 19:06:13", "languages": [ @@ -17196,7 +18015,7 @@ "project_name": "allennlp-NER-ja", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2022-05-05 10:28:23", "latest_commit": "2022-05-10 00:52:35", "languages": [ @@ -17210,7 +18029,7 @@ "project_name": "chariot-PyTorch-Japanese-text-classification", "stargazers_count": 5, "source": "GitHub", - "score": -0.32354379457416477, + "score": -0.32395750147782726, "first_commit": "2019-03-02 15:04:41", "latest_commit": "2019-03-19 02:51:30", "languages": [ @@ -17224,7 +18043,7 @@ "project_name": "hasami", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2020-12-30 21:32:13", "latest_commit": "2021-02-21 14:39:11", "languages": [ @@ -17238,7 +18057,7 @@ "project_name": "nagisa_bert", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2022-09-26 03:45:52", "latest_commit": "2023-12-23 16:14:09", "languages": [ @@ -17253,7 +18072,7 @@ "project_name": "toEmoji", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2018-02-25 14:52:07", "latest_commit": "2018-04-16 00:59:20", "languages": [ @@ -17267,7 +18086,7 @@ "project_name": "jinf", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2022-02-03 17:59:01", "latest_commit": "2022-12-27 10:28:22", "languages": [ @@ -17281,7 +18100,7 @@ "project_name": "japanese-nli-model", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2022-10-26 17:42:42", "latest_commit": "2022-10-26 17:42:42", "languages": [ @@ -17295,7 +18114,7 @@ "project_name": "go-kakasi", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2024-02-11 19:08:03", "latest_commit": "2024-03-03 18:23:56", "languages": [ @@ -17309,7 +18128,7 @@ "project_name": "neologdn-java", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2017-02-20 18:07:05", "latest_commit": "2021-10-11 22:35:59", "languages": [ @@ -17323,7 +18142,7 @@ "project_name": "jp-azureopenai-samples", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2023-06-21 21:27:33", "latest_commit": "2024-08-16 11:31:48", "languages": [ @@ -17339,7 +18158,7 @@ "project_name": "kanji-flashcard-app-gpt4", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2023-10-17 23:33:19", "latest_commit": "2023-10-17 23:41:17", "languages": [ @@ -17354,7 +18173,7 @@ "project_name": "google-vs-deepl-je", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2020-03-22 19:45:11", "latest_commit": "2020-03-22 23:27:00", "languages": [ @@ -17368,7 +18187,7 @@ "project_name": "jawikicorpus", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2018-04-05 01:07:48", "latest_commit": "2018-11-24 16:44:02", "languages": [], @@ -17380,7 +18199,7 @@ "project_name": "huggingface-datasets_wrime", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2023-01-12 10:43:54", "latest_commit": "2023-01-15 12:39:01", "languages": [ @@ -17394,7 +18213,7 @@ "project_name": "japanese-technical-dict", "stargazers_count": 4, "source": "GitHub", - "score": -0.326461900535051, + "score": -0.32688054197554955, "first_commit": "2024-01-08 14:44:52", "latest_commit": "2024-06-18 00:54:03", "languages": [ @@ -17408,7 +18227,7 @@ "project_name": "mecab-text-cleaner", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2023-09-01 16:18:34", "latest_commit": "2024-03-29 00:06:20", "languages": [ @@ -17423,7 +18242,7 @@ "project_name": "python-habachen", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2023-10-04 07:40:00", "latest_commit": "2024-01-21 10:29:31", "languages": [ @@ -17440,7 +18259,7 @@ "project_name": "kuzukiri", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-11-20 00:05:51", "latest_commit": "2024-06-11 16:43:31", "languages": [ @@ -17456,7 +18275,7 @@ "project_name": "jrte-corpus_example", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-08-23 09:46:30", "latest_commit": "2021-11-19 13:11:47", "languages": [ @@ -17470,7 +18289,7 @@ "project_name": "mbart-finetuning", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-10-14 00:05:39", "latest_commit": "2021-10-14 00:16:10", "languages": [ @@ -17484,7 +18303,7 @@ "project_name": "tweet_extructor", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2018-06-20 16:23:11", "latest_commit": "2022-08-28 13:30:18", "languages": [ @@ -17498,7 +18317,7 @@ "project_name": "japanese_chatbot", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": null, "latest_commit": null, "languages": [], @@ -17510,7 +18329,7 @@ "project_name": "tokenizer-speed-bench", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-10-28 21:38:49", "latest_commit": "2023-03-01 14:07:29", "languages": [ @@ -17527,7 +18346,7 @@ "project_name": "listup_precedent", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2023-01-15 08:01:44", "latest_commit": "2024-05-11 22:30:27", "languages": [ @@ -17541,7 +18360,7 @@ "project_name": "juman-bin", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2017-05-11 13:18:41", "latest_commit": "2017-05-11 13:53:25", "languages": [ @@ -17555,7 +18374,7 @@ "project_name": "Japanese-Word-Of-The-Day", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-07-10 00:03:05", "latest_commit": "2021-08-11 03:03:38", "languages": [ @@ -17569,7 +18388,7 @@ "project_name": "jawikivec", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2018-06-04 01:22:54", "latest_commit": "2018-11-24 17:10:01", "languages": [], @@ -17581,7 +18400,7 @@ "project_name": "mh-dict-jp", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2023-04-27 20:01:36", "latest_commit": "2023-11-27 13:46:48", "languages": [ @@ -17595,7 +18414,7 @@ "project_name": "jesc_small", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2019-07-06 12:36:45", "latest_commit": "2019-07-06 12:49:24", "languages": [], @@ -17607,7 +18426,7 @@ "project_name": "japanese-corpus", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2018-08-19 04:18:55", "latest_commit": "2018-10-09 19:08:10", "languages": [], @@ -17619,7 +18438,7 @@ "project_name": "speechBSD", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2022-05-30 11:17:29", "latest_commit": "2024-02-07 22:15:37", "languages": [], @@ -17631,7 +18450,7 @@ "project_name": "anlp-jp-history", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2015-08-12 17:40:32", "latest_commit": "2024-04-05 19:33:15", "languages": [ @@ -17645,7 +18464,7 @@ "project_name": "handson-language-models", "stargazers_count": 3, "source": "GitHub", - "score": -0.3293800064959372, + "score": -0.32980358247327185, "first_commit": "2021-03-13 15:29:37", "latest_commit": "2021-03-18 19:33:34", "languages": [ @@ -17659,7 +18478,7 @@ "project_name": "jdepp-python", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2024-01-01 23:34:44", "latest_commit": "2024-02-14 22:09:31", "languages": [ @@ -17673,7 +18492,7 @@ "project_name": "wiredify", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-09-03 09:17:14", "latest_commit": "2023-12-19 01:01:46", "languages": [ @@ -17687,7 +18506,7 @@ "project_name": "utsuho", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-03-27 11:07:55", "latest_commit": "2023-11-20 08:53:43", "languages": [ @@ -17701,7 +18520,7 @@ "project_name": "Japanese-BERT-Sentiment-Analyzer", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2021-03-17 22:22:31", "latest_commit": "2021-04-20 00:41:29", "languages": [ @@ -17715,7 +18534,7 @@ "project_name": "ishi", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2019-12-21 17:24:25", "latest_commit": "2020-05-15 22:18:26", "languages": [ @@ -17729,7 +18548,7 @@ "project_name": "unihan-lm", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2020-09-14 16:41:10", "latest_commit": "2020-11-06 10:12:50", "languages": [ @@ -17743,7 +18562,7 @@ "project_name": "japanese_qa_demo_with_haystack_and_es", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-12-18 19:21:06", "latest_commit": "2022-12-19 03:57:34", "languages": [ @@ -17757,7 +18576,7 @@ "project_name": "trimatch", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2021-08-30 17:26:15", "latest_commit": "2024-02-08 10:06:06", "languages": [ @@ -17771,7 +18590,7 @@ "project_name": "tantivy-vibrato", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-08-25 13:31:30", "latest_commit": "2023-01-19 10:12:17", "languages": [ @@ -17785,7 +18604,7 @@ "project_name": "find-simdoc", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-08-31 12:56:10", "latest_commit": "2022-09-27 11:39:27", "languages": [ @@ -17800,7 +18619,7 @@ "project_name": "stringmatch-bench", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-09-29 12:24:29", "latest_commit": "2022-09-30 11:36:55", "languages": [ @@ -17815,7 +18634,7 @@ "project_name": "japanki", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-10-04 17:43:27", "latest_commit": "2023-10-17 01:00:00", "languages": [ @@ -17829,7 +18648,7 @@ "project_name": "pitch-accent", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-08-09 13:36:52", "latest_commit": "2023-09-08 08:19:43", "languages": [ @@ -17845,7 +18664,7 @@ "project_name": "gpt-ja", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2021-06-13 13:14:18", "latest_commit": "2021-09-27 21:08:39", "languages": [ @@ -17859,7 +18678,7 @@ "project_name": "AcademicBART", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-01-11 16:19:21", "latest_commit": "2024-07-11 22:09:11", "languages": [ @@ -17873,7 +18692,7 @@ "project_name": "denonbu-ime-dic", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2019-12-30 01:37:16", "latest_commit": "2022-11-13 23:09:25", "languages": [], @@ -17885,7 +18704,7 @@ "project_name": "anthy", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2013-06-30 11:09:24", "latest_commit": "2013-07-27 22:45:26", "languages": [ @@ -17899,7 +18718,7 @@ "project_name": "Data-on-Japanese-Diet-Members", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-07-02 22:58:11", "latest_commit": "2022-09-29 13:37:11", "languages": [], @@ -17911,7 +18730,7 @@ "project_name": "ndlngramviewer_v2", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2022-12-26 13:29:58", "latest_commit": "2023-07-20 11:05:53", "languages": [ @@ -17929,7 +18748,7 @@ "project_name": "huggingface-datasets_livedoor-news-corpus", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-01-17 23:16:18", "latest_commit": "2023-10-28 14:40:17", "languages": [ @@ -17943,7 +18762,7 @@ "project_name": "jpn_explainable_qa_dataset", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": null, "latest_commit": null, "languages": [], @@ -17955,7 +18774,7 @@ "project_name": "jemhopqa", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2023-08-01 02:07:45", "latest_commit": "2024-06-05 14:54:08", "languages": [ @@ -17969,7 +18788,7 @@ "project_name": "bert-classification-tutorial-2024", "stargazers_count": 2, "source": "GitHub", - "score": -0.3322981124568234, + "score": -0.33272662297099415, "first_commit": "2024-05-28 10:50:41", "latest_commit": "2024-07-08 17:10:46", "languages": [ @@ -17977,13 +18796,25 @@ ], "model_or_dataset": null }, + { + "description": "Fine-Tuning Google Gemma for Japanese Instructions", + "url": "https://github.com/qianniu95/gemma2_2b_finetune_jp_tutorial/blob/main/Gemma2_2b_Japanese_finetuning_colab.ipynb", + "project_name": "Gemma2_2b_Japanese_finetuning_colab.ipynb", + "stargazers_count": 2, + "source": "GitHub", + "score": -0.33272662297099415, + "first_commit": null, + "latest_commit": null, + "languages": [], + "model_or_dataset": null + }, { "description": "詳細なアノテーション基準に基づく症例報告コーパスからの固有表現及び関係の抽出精度の推論を行うコード", "url": "https://github.com/aih-uth/joint-information-extraction-hs", "project_name": "joint-information-extraction-hs", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2021-11-05 12:32:31", "latest_commit": "2021-11-17 12:29:39", "languages": [ @@ -17998,7 +18829,7 @@ "project_name": "t5_japanese_dialogue_generation", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2021-11-19 18:32:46", "latest_commit": "2021-11-28 10:48:04", "languages": [ @@ -18013,7 +18844,7 @@ "project_name": "lm-evaluation-harness-jp-stable", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2020-08-27 18:08:04", "latest_commit": "2023-06-19 10:48:40", "languages": [ @@ -18028,7 +18859,7 @@ "project_name": "japanese2phoneme", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2021-02-09 16:27:24", "latest_commit": "2022-02-24 16:11:04", "languages": [ @@ -18042,7 +18873,7 @@ "project_name": "anlp_nlp2021_d3-1", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2021-03-12 02:34:47", "latest_commit": "2022-03-08 13:40:28", "languages": [ @@ -18056,7 +18887,7 @@ "project_name": "JDT-with-KenLM-scoring", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2022-06-28 09:22:47", "latest_commit": "2022-07-01 21:29:00", "languages": [ @@ -18070,7 +18901,7 @@ "project_name": "japanese-word-aggregation", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2018-08-08 13:39:47", "latest_commit": "2018-08-20 07:51:01", "languages": [ @@ -18084,7 +18915,7 @@ "project_name": "SAT-for-Japanese", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2022-10-04 23:48:47", "latest_commit": "2022-10-05 00:40:09", "languages": [ @@ -18098,7 +18929,7 @@ "project_name": "japanese_llm_eval", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2024-03-28 14:56:01", "latest_commit": "2024-04-22 07:39:03", "languages": [ @@ -18113,7 +18944,7 @@ "project_name": "qlora_ja", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-09-10 15:04:15", "latest_commit": "2024-07-13 10:01:12", "languages": [ @@ -18127,7 +18958,7 @@ "project_name": "japanese-address-parser", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-11-17 23:16:38", "latest_commit": "2024-08-12 18:23:09", "languages": [ @@ -18142,7 +18973,7 @@ "project_name": "friendly_JA-Model", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2022-01-10 12:03:59", "latest_commit": "2022-05-22 14:42:46", "languages": [], @@ -18154,7 +18985,7 @@ "project_name": "ChuanhuChatGPTJapanese", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-03-02 21:37:13", "latest_commit": "2023-03-07 14:10:10", "languages": [ @@ -18168,7 +18999,7 @@ "project_name": "chrome-ext-translate-to-hiragana-with-chatgpt", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-03-25 20:09:31", "latest_commit": "2023-04-01 16:05:53", "languages": [ @@ -18182,7 +19013,7 @@ "project_name": "chatvrm", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-04-28 17:25:29", "latest_commit": "2024-07-18 13:49:25", "languages": [ @@ -18197,7 +19028,7 @@ "project_name": "sftly-replace", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2023-05-04 20:51:22", "latest_commit": "2023-05-24 02:03:16", "languages": [ @@ -18211,7 +19042,7 @@ "project_name": "JumanDIC", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2015-12-07 17:42:10", "latest_commit": "2022-08-18 19:01:36", "languages": [ @@ -18227,7 +19058,7 @@ "project_name": "uchinaaguchi_dict", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2024-03-12 06:53:05", "latest_commit": "2024-08-12 17:05:46", "languages": [ @@ -18242,7 +19073,7 @@ "project_name": "skk-jisyo.emoji-ja", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2018-03-13 01:04:56", "latest_commit": "2018-03-13 02:01:32", "languages": [ @@ -18256,7 +19087,7 @@ "project_name": "kokkosho_data", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2017-04-16 15:04:26", "latest_commit": "2019-07-09 23:36:27", "languages": [], @@ -18268,7 +19099,7 @@ "project_name": "isbn4groups", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2022-08-03 16:31:28", "latest_commit": "2024-06-25 14:11:40", "languages": [], @@ -18280,7 +19111,7 @@ "project_name": "jacred", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2024-01-11 16:26:25", "latest_commit": "2024-03-08 17:58:20", "languages": [], @@ -18292,7 +19123,7 @@ "project_name": "jades", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2022-10-15 09:32:35", "latest_commit": "2022-12-13 15:57:29", "languages": [], @@ -18304,7 +19135,7 @@ "project_name": "japanese-ir-tutorial", "stargazers_count": 1, "source": "GitHub", - "score": -0.3352162184177096, + "score": -0.33564966346871644, "first_commit": "2024-04-29 10:52:11", "latest_commit": "2024-06-05 18:56:44", "languages": [ @@ -18318,7 +19149,7 @@ "project_name": "pynormalizenumexp", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2021-10-11 21:02:26", "latest_commit": "2024-04-28 19:55:49", "languages": [ @@ -18326,27 +19157,13 @@ ], "model_or_dataset": null }, - { - "description": "Easy wrapper for the postal code data of Japan", - "url": "https://github.com/nagataaaas/Jusho", - "project_name": "Jusho", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3381343243785958, - "first_commit": "2020-11-12 03:39:08", - "latest_commit": "2024-06-04 16:03:08", - "languages": [ - "Python" - ], - "model_or_dataset": null - }, { "description": "OCR system for recognizing modern Japanese magazines", "url": "https://github.com/ducanh841988/Kindai-OCR", "project_name": "Kindai-OCR", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2020-07-08 10:12:27", "latest_commit": "2023-07-12 12:14:52", "languages": [ @@ -18360,7 +19177,7 @@ "project_name": "JGLUE-benchmark", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-03-18 01:19:37", "latest_commit": "2024-08-09 18:31:33", "languages": [ @@ -18374,7 +19191,7 @@ "project_name": "yubin", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2019-10-28 07:11:54", "latest_commit": "2019-10-28 07:20:26", "languages": [ @@ -18388,7 +19205,7 @@ "project_name": "ASRDeepSpeech", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2020-03-03 15:08:25", "latest_commit": "2022-09-26 00:11:29", "languages": [ @@ -18403,7 +19220,7 @@ "project_name": "radicalchar", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2022-11-29 19:17:38", "latest_commit": "2022-12-30 01:40:44", "languages": [ @@ -18419,7 +19236,7 @@ "project_name": "natsume", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": null, "latest_commit": null, "languages": [], @@ -18431,7 +19248,7 @@ "project_name": "RAG-Japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-11-14 14:06:31", "latest_commit": "2023-11-29 19:47:20", "languages": [ @@ -18445,7 +19262,7 @@ "project_name": "jglue-evaluation-scripts", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-03-18 01:19:37", "latest_commit": "2024-08-09 18:31:33", "languages": [ @@ -18459,7 +19276,7 @@ "project_name": "oskim", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-02-24 15:08:36", "latest_commit": "2023-02-24 15:43:20", "languages": [ @@ -18474,7 +19291,7 @@ "project_name": "aovec", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2021-06-22 16:51:15", "latest_commit": "2023-02-01 02:27:34", "languages": [ @@ -18488,7 +19305,7 @@ "project_name": "BERT-Japan-vaccination", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2022-04-05 16:46:38", "latest_commit": "2022-05-23 00:47:45", "languages": [ @@ -18502,7 +19319,7 @@ "project_name": "VRChatGPT", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-03-21 19:53:54", "latest_commit": "2023-03-22 21:04:37", "languages": [ @@ -18516,7 +19333,7 @@ "project_name": "chatgpt-prompt-sample-japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-04-13 14:22:50", "latest_commit": "2024-08-09 15:15:17", "languages": [], @@ -18528,7 +19345,7 @@ "project_name": "pokemon-ime-dic", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2020-01-10 23:13:19", "latest_commit": "2020-01-10 23:25:48", "languages": [], @@ -18540,7 +19357,7 @@ "project_name": "jitenbot", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": null, "latest_commit": null, "languages": [], @@ -18552,7 +19369,7 @@ "project_name": "azookey-desktop", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2021-09-07 21:50:12", "latest_commit": "2024-08-13 22:08:17", "languages": [ @@ -18560,42 +19377,13 @@ ], "model_or_dataset": "dataset" }, - { - "description": "Japanese input method for fcitx5, powered by azooKey engine", - "url": "https://github.com/7ka-hiira/fcitx5-hazkey", - "project_name": "fcitx5-hazkey", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3381343243785958, - "first_commit": "2024-05-21 22:15:24", - "latest_commit": "2024-08-01 12:39:04", - "languages": [ - "C++", - "Swift" - ], - "model_or_dataset": "dataset" - }, - { - "description": "Mozc UT Place Name Dictionary is a dictionary converted from the Japan Post's ZIP code data for Mozc.", - "url": "https://github.com/utuhiro78/mozcdic-ut-place-names", - "project_name": "mozcdic-ut-place-names", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3381343243785958, - "first_commit": "2023-01-15 18:01:48", - "latest_commit": "2024-08-07 03:05:50", - "languages": [ - "Python" - ], - "model_or_dataset": "dataset" - }, { "description": "Kana-Kanji Conversion Module written in Swift", "url": "https://github.com/ensan-hcl/azookeykanakanjiconverter", "project_name": "azookeykanakanjiconverter", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-05-28 11:51:25", "latest_commit": "2024-08-11 00:53:16", "languages": [ @@ -18609,7 +19397,7 @@ "project_name": "UD_Japanese-PUD", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2017-05-23 10:31:45", "latest_commit": "2020-05-16 10:57:47", "languages": [], @@ -18621,7 +19409,7 @@ "project_name": "graded-enja-corpus", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2021-06-05 11:55:23", "latest_commit": "2023-03-14 00:24:51", "languages": [ @@ -18635,7 +19423,7 @@ "project_name": "WikipediaWordFrequencyList", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2022-04-17 16:35:32", "latest_commit": "2022-04-17 16:44:19", "languages": [ @@ -18649,7 +19437,7 @@ "project_name": "friendly_JA-Corpus", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": null, "latest_commit": null, "languages": [], @@ -18661,7 +19449,7 @@ "project_name": "copa-japanese", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-01-13 09:04:08", "latest_commit": "2023-02-24 11:28:31", "languages": [], @@ -18673,7 +19461,7 @@ "project_name": "huggingface-datasets_CAMERA", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-03-17 23:02:32", "latest_commit": "2023-03-17 23:49:35", "languages": [ @@ -18687,7 +19475,7 @@ "project_name": "FactCheckSentenceNLI-FCSNLI-", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2021-02-26 14:08:54", "latest_commit": "2021-03-03 11:15:47", "languages": [ @@ -18701,7 +19489,7 @@ "project_name": "EaST-MELD", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2023-04-12 00:16:46", "latest_commit": "2023-06-23 11:09:20", "languages": [], @@ -18713,7 +19501,7 @@ "project_name": "reazonspeech", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2022-10-19 10:08:01", "latest_commit": "2024-08-01 17:38:15", "languages": [ @@ -18728,7 +19516,7 @@ "project_name": "j-unimorph", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2024-01-10 20:05:15", "latest_commit": "2024-05-12 20:42:38", "languages": [ @@ -18742,7 +19530,7 @@ "project_name": "jmed-llm", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2024-07-11 20:01:36", "latest_commit": "2024-08-10 08:49:25", "languages": [ @@ -18756,7 +19544,7 @@ "project_name": "lawtext", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2017-12-06 23:09:02", "latest_commit": "2024-08-10 23:37:12", "languages": [ @@ -18771,22 +19559,10 @@ "project_name": "nlp2024-tutorial-3", "stargazers_count": 0, "source": "GitHub", - "score": -0.3381343243785958, + "score": -0.3385727039664387, "first_commit": "2024-03-05 09:03:21", "latest_commit": "2024-04-02 14:38:06", "languages": [], "model_or_dataset": null - }, - { - "description": "Fine-Tuning Google Gemma for Japanese Instructions", - "url": "https://github.com/qianniu95/gemma2_2b_finetune_jp_tutorial/blob/main/Gemma2_2b_Japanese_finetuning_colab.ipynb", - "project_name": "Gemma2_2b_Japanese_finetuning_colab.ipynb", - "stargazers_count": 0, - "source": "GitHub", - "score": -0.3381343243785958, - "first_commit": null, - "latest_commit": null, - "languages": [], - "model_or_dataset": null } ] \ No newline at end of file