Datasets:
File size: 2,684 Bytes
653ab7d bf253b3 653ab7d fd4339a 653ab7d c0f131f 653ab7d 49474bf fd4339a bf253b3 c0f131f 49474bf bf253b3 fd4339a c0f131f 5f14eb9 fd4339a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 |
# Licensed under the Creative Commons License, Version CC By 4.0;
# You may obtain a copy of the License at
#
# https://creativecommons.org/licenses/by/4.0/legalcode
"""Depression: Reddit Dataset (Cleaned)"""
import csv
import json
import os
import datasets
from datasets.tasks import TextClassification
_DESCRIPTION = """\
The dataset provided is a Depression: Reddit Dataset (Cleaned)containing approximately
7,000 labeled instances. It consists of two main features: 'text' and 'label'.
The 'text' feature contains the text data from Reddit posts related to depression, while
the 'label' feature indicates whether a post is classified as depression or not.
The raw data for this dataset was collected by web scraping Subreddits. To ensure the data's
quality and usefulness, multiple natural language processing (NLP) techniques were applied
to clean the data. The dataset exclusively consists of English-language posts, and its
primary purpose is to facilitate mental health classification tasks.
This dataset can be employed in various natural language processing tasks related to
depression,such as sentiment analysis, topic modeling, text classification, or any other NLP
task that requires labeled data pertaining to depression from Reddit.
"""
_TRAIN_URL = "depression_reddit_cleaned_ds.csv"
class DepressionRedditCleaned(datasets.GeneratorBasedBuilder):
"""
~7000 Cleaned Reddit Labelled Dataset on Depression
The raw data is collected through web-scrapping Subreddits and is cleaned using multiple NLP techniques.
The data is only in English. It mainly targets mental health classification.
"""
VERSION = datasets.Version("1.1.0")
def _info(self):
return datasets.DatasetInfo(
description=_DESCRIPTION,
features=datasets.Features(
{
"text": datasets.Value("string"),
"label": datasets.features.ClassLabel(
num_classes=2,
names=["not_depression", "depression"]
)
}
),
task_templates=[TextClassification(text_column="text", label_column="label")]
)
def _split_generators(self, dl_manager):
train_path = dl_manager.download_and_extract(_TRAIN_URL)
return [
datasets.SplitGenerator(
name=datasets.Split.TRAIN,
gen_kwargs={"filepath": train_path}
)
]
def _generate_examples(self, filepath):
"""Yields examples as (key, example) tuples."""
with open(filepath, encoding="utf-8") as f:
csv_reader = csv.reader(f, quotechar='"', delimiter=",", quoting=csv.QUOTE_ALL, skipinitialspace=True)
# call next to skip header
next(csv_reader)
for id_, row in enumerate(csv_reader):
text, label = row
yield id_, {"text": text, "label": label}
|