LMartinezEXEX
commited on
Commit
•
870ce84
1
Parent(s):
0cbc1ea
Text changes
Browse files- languages/en.js +1 -1
languages/en.js
CHANGED
@@ -2,7 +2,7 @@ const english_data = {
|
|
2 |
"toolName": "EDIA: Stereotypes and Discrimination in Artificial Intelligence",
|
3 |
"toolSubname": "Stereotypes and Discrimination in Artificial Intelligence",
|
4 |
"introduction_1": "Language models and word representations obtained with machine learning have been shown to contain discriminatory stereotypes. Here we present a set of inspection tools: EDIA (Stereotypes and Discrimination in Artificial Intelligence). This project aimed to design and evaluate a methodology that allows social scientists and domain experts in Latin America to explore biases and discriminatory stereotypes present in word embeddings (WE) and language models (LM). It also allowed them to define the type of bias to explore and do an intersectional analysis using two binary dimensions (for example, <i>female-male</i> intersected with <i>fat-skinny</i>).",
|
5 |
-
"introduction_2": "EDIA contains several
|
6 |
"wordBias": {
|
7 |
"title": "Biases in words",
|
8 |
"description": "Based on a technique to detect biases in WE, this function allows us to visualize the distribution of words in 2D space and thus observe the distance between them. The more occurrence contexts they share, the closer they will be, and the fewer occurrence contexts they share, the further they will be. This usually makes words with a similar meaning appear close. From the creation of word lists to define semantic fields, we will be able to observe biases and explore neighboring words between those meanings.",
|
|
|
2 |
"toolName": "EDIA: Stereotypes and Discrimination in Artificial Intelligence",
|
3 |
"toolSubname": "Stereotypes and Discrimination in Artificial Intelligence",
|
4 |
"introduction_1": "Language models and word representations obtained with machine learning have been shown to contain discriminatory stereotypes. Here we present a set of inspection tools: EDIA (Stereotypes and Discrimination in Artificial Intelligence). This project aimed to design and evaluate a methodology that allows social scientists and domain experts in Latin America to explore biases and discriminatory stereotypes present in word embeddings (WE) and language models (LM). It also allowed them to define the type of bias to explore and do an intersectional analysis using two binary dimensions (for example, <i>female-male</i> intersected with <i>fat-skinny</i>).",
|
5 |
+
"introduction_2": "EDIA contains several tools that serve to detect and inspect biases in natural language processing systems based on language models or word embeddings. We have models in Spanish and English to work with and explore biases in different languages at the user's request. Each of the following spaces contains different tools that bring us closer to a particular aspect of the problem of bias and they allow us to understand different but complementary parts of it.",
|
6 |
"wordBias": {
|
7 |
"title": "Biases in words",
|
8 |
"description": "Based on a technique to detect biases in WE, this function allows us to visualize the distribution of words in 2D space and thus observe the distance between them. The more occurrence contexts they share, the closer they will be, and the fewer occurrence contexts they share, the further they will be. This usually makes words with a similar meaning appear close. From the creation of word lists to define semantic fields, we will be able to observe biases and explore neighboring words between those meanings.",
|