Datasets:
Sub-tasks:
multi-class-classification
Languages:
English
Size:
1K<n<10K
Tags:
natural-language-understanding
ideology classification
text classification
natural language processing
License:
EricR401S
commited on
Commit
·
ec42311
1
Parent(s):
8f25a0c
data
Browse files- Pill_Ideologies-Post_Titles.py +51 -17
- reddit_posts_fm.csv +2 -2
- redditscraper_fm.py +1 -26
Pill_Ideologies-Post_Titles.py
CHANGED
@@ -164,24 +164,33 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
164 |
urls = _URLS[self.config.name]
|
165 |
data_dir = dl_manager.download_and_extract(urls)
|
166 |
data = pd.read_csv(data_dir)
|
|
|
|
|
167 |
|
168 |
-
def clean_data_nans(df):
|
169 |
-
|
170 |
-
|
171 |
-
|
172 |
-
|
173 |
-
|
174 |
-
|
175 |
-
|
176 |
-
|
177 |
-
|
178 |
-
|
179 |
-
|
180 |
-
|
181 |
-
|
182 |
-
|
183 |
-
|
184 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
185 |
print("PAssed the cleaning")
|
186 |
# commented out the splits, due to google colab being uncooperative
|
187 |
# raised too many errors that my local machine did not
|
@@ -264,3 +273,28 @@ class SubRedditPosts(datasets.GeneratorBasedBuilder):
|
|
264 |
"upvote_ratio": row.get("upvote_ratio"),
|
265 |
"is_video": row.get("is_video"),
|
266 |
}
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
urls = _URLS[self.config.name]
|
165 |
data_dir = dl_manager.download_and_extract(urls)
|
166 |
data = pd.read_csv(data_dir)
|
167 |
+
data = self.process_data(data)
|
168 |
+
print("PAssed the processing")
|
169 |
|
170 |
+
# def clean_data_nans(df):
|
171 |
+
# """This function takes a dataframe and fills all NaNs with a value
|
172 |
+
# This is to appease google colab, because my local machine did not raise errors
|
173 |
+
# ... and it's a windows. That should tell you a lot."""
|
174 |
+
# for col in data.columns:
|
175 |
+
# print(f"Cleaning NaNs in {col}")
|
176 |
+
# if df[col].dtype == "object":
|
177 |
+
# df[col].fillna("No-NAN-Nothing found", inplace=True)
|
178 |
+
# elif df[col].dtype in ["int64", "float64", "int32", "float32", "int"]:
|
179 |
+
# df[col].fillna(0, inplace=True)
|
180 |
+
# elif df[col].dtype == "bool":
|
181 |
+
# df[col].fillna(False, inplace=True)
|
182 |
+
# else:
|
183 |
+
# df[col].fillna("NAN - problematic {col} found", inplace=True)
|
184 |
+
# return None
|
185 |
+
|
186 |
+
def show_nans_profile(df):
|
187 |
+
|
188 |
+
for col in df.columns:
|
189 |
+
|
190 |
+
print(col, df[col].isna().sum())
|
191 |
+
|
192 |
+
show_nans_profile(data)
|
193 |
+
# clean_data_nans(data)
|
194 |
print("PAssed the cleaning")
|
195 |
# commented out the splits, due to google colab being uncooperative
|
196 |
# raised too many errors that my local machine did not
|
|
|
273 |
"upvote_ratio": row.get("upvote_ratio"),
|
274 |
"is_video": row.get("is_video"),
|
275 |
}
|
276 |
+
|
277 |
+
def process_data(df):
|
278 |
+
"""This function takes a dataframe and processes it to remove any unwanted columns and rows"""
|
279 |
+
# columns that cannot be empty,so drop rows
|
280 |
+
df = df.dropna(subset=["subreddit"])
|
281 |
+
df = df.dropna(subset=["title"])
|
282 |
+
|
283 |
+
# cleaning to make colab importing the dataset through huggingface work
|
284 |
+
values = {
|
285 |
+
"id": "",
|
286 |
+
"text": "",
|
287 |
+
"url": "",
|
288 |
+
"score": 0,
|
289 |
+
"date": 0.0,
|
290 |
+
"subreddit_subscribers": 0,
|
291 |
+
"num_comments": 0,
|
292 |
+
"ups": 0,
|
293 |
+
"downs": 0,
|
294 |
+
"upvote_ratio": 0.0,
|
295 |
+
"is_video": False,
|
296 |
+
}
|
297 |
+
|
298 |
+
df.fillna(value=values, inplace=True)
|
299 |
+
|
300 |
+
return df
|
reddit_posts_fm.csv
CHANGED
@@ -1,3 +1,3 @@
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
-
oid sha256:
|
3 |
-
size
|
|
|
1 |
version https://git-lfs.github.com/spec/v1
|
2 |
+
oid sha256:c1c32caf7adba027c63be0a0bc8263bad86ab30049290b53b458eb235a69d115
|
3 |
+
size 11259261
|
redditscraper_fm.py
CHANGED
@@ -121,7 +121,6 @@ def pull_info_from_reddit_dict(
|
|
121 |
"ups",
|
122 |
"downs",
|
123 |
"upvote_ratio",
|
124 |
-
"num_reports",
|
125 |
"is_video",
|
126 |
],
|
127 |
):
|
@@ -151,39 +150,15 @@ df = pd.DataFrame(
|
|
151 |
"text",
|
152 |
"url",
|
153 |
"score",
|
154 |
-
"author",
|
155 |
"date",
|
156 |
"subreddit_subscribers",
|
157 |
"num_comments",
|
158 |
"ups",
|
159 |
"downs",
|
160 |
"upvote_ratio",
|
161 |
-
"num_reports",
|
162 |
"is_video",
|
163 |
],
|
164 |
)
|
165 |
|
166 |
-
# columns that cannot be empty,so drop rows
|
167 |
-
df = df.dropna(subset=["subreddit"])
|
168 |
-
df = df.dropna(subset=["title"])
|
169 |
-
df = df.drop(columns=["num_reports"]) # drop num_reports, always empty
|
170 |
-
|
171 |
-
# cleaning to make colab importing the dataset through huggingface work
|
172 |
-
values = {
|
173 |
-
"id": "NOTEXT",
|
174 |
-
"text": "NOTEXT",
|
175 |
-
"url": "NOTEXT",
|
176 |
-
"score": 0,
|
177 |
-
"date": 0.0,
|
178 |
-
"subreddit_subscribers": 0,
|
179 |
-
"num_comments": 0,
|
180 |
-
"ups": 0,
|
181 |
-
"downs": 0,
|
182 |
-
"upvote_ratio": 0.0,
|
183 |
-
"is_video": "False",
|
184 |
-
}
|
185 |
-
df.fillna(value=values, inplace=True)
|
186 |
-
|
187 |
-
df = df[df["subreddit"].isin(subreddit_list)]
|
188 |
-
|
189 |
df.to_csv("reddit_posts_fm.csv", index=False)
|
|
|
121 |
"ups",
|
122 |
"downs",
|
123 |
"upvote_ratio",
|
|
|
124 |
"is_video",
|
125 |
],
|
126 |
):
|
|
|
150 |
"text",
|
151 |
"url",
|
152 |
"score",
|
153 |
+
# "author", author is not useful for the analysis
|
154 |
"date",
|
155 |
"subreddit_subscribers",
|
156 |
"num_comments",
|
157 |
"ups",
|
158 |
"downs",
|
159 |
"upvote_ratio",
|
|
|
160 |
"is_video",
|
161 |
],
|
162 |
)
|
163 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
164 |
df.to_csv("reddit_posts_fm.csv", index=False)
|