Spaces:
Runtime error
Runtime error
import gradio as gr | |
from css import custom_css | |
import pandas as pd | |
from gradio_modal import Modal | |
import os | |
import yaml | |
import itertools | |
folder_path = 'configs' | |
# List to store data from YAML files | |
data_list = [] | |
metadata_dict = {} | |
def expand_string_list(string_list): | |
expanded_list = [] | |
# Add individual strings to the expanded list | |
expanded_list.extend(string_list) | |
# Generate combinations of different lengths from the input list | |
for r in range(2, len(string_list) + 1): | |
combinations = itertools.combinations(string_list, r) | |
for combination in combinations: | |
# Generate permutations of each combination | |
permutations = itertools.permutations(combination) | |
for permutation in permutations: | |
expanded_list.append(' + '.join(permutation)) | |
return expanded_list | |
# Iterate over each file in the folder | |
for filename in os.listdir(folder_path): | |
if filename.endswith('.yaml'): | |
# Read YAML file | |
file_path = os.path.join(folder_path, filename) | |
with open(file_path, 'r') as yamlfile: | |
yaml_data = yaml.safe_load(yamlfile) | |
# Append YAML data to list | |
data_list.append(yaml_data) | |
metadata_dict['<u>'+yaml_data['Link']+'</u>'] = yaml_data | |
globaldf = pd.DataFrame(data_list) | |
globaldf['Link'] = '<u>'+globaldf['Link']+'</u>' | |
# Define the desired order of categories | |
modality_order = ["Text", "Image", "Audio", "Video"] | |
level_order = ["Model", "Dataset", "Output", "Taxonomy"] | |
modality_order = expand_string_list(modality_order) | |
level_order = expand_string_list(level_order) | |
# Convert Modality and Level columns to categorical with specified order | |
globaldf['Modality'] = pd.Categorical(globaldf['Modality'], categories=modality_order, ordered=True) | |
globaldf['Level'] = pd.Categorical(globaldf['Level'], categories=level_order, ordered=True) | |
# Sort DataFrame by Modality and Level | |
globaldf.sort_values(by=['Modality', 'Level'], inplace=True) | |
# create a gradio page with tabs and accordions | |
# Path: taxonomy.py | |
def filter_modality_level(fulltable, modality_filter, level_filter): | |
filteredtable = fulltable[fulltable['Modality'].str.contains('|'.join(modality_filter)) & fulltable['Level'].str.contains('|'.join(level_filter))] | |
return filteredtable | |
def showmodal(evt: gr.SelectData): | |
print(evt.value, evt.index, evt.target) | |
modal = Modal(visible=False) | |
titlemd = gr.Markdown("",visible=False) | |
authormd = gr.Markdown("",visible=False) | |
affiliationmd = gr.Markdown("",visible=False) | |
tagsmd = gr.Markdown("",visible=False) | |
abstractmd = gr.Markdown("",visible=False) | |
whatisbeingmd = gr.Markdown("",visible=False) | |
methodmd = gr.Markdown("",visible=False) | |
considerationsmd = gr.Markdown("",visible=False) | |
modelsmd = gr.Markdown("",visible=False) | |
datasetmd = gr.Markdown("",visible=False) | |
metricsmd = gr.Markdown("",visible=False) | |
gallery = gr.Gallery([],visible=False) | |
if evt.index[1] == 4: | |
modal = Modal(visible=True) | |
itemdic = metadata_dict[evt.value] | |
tags = itemdic['Hashtags'] | |
if isinstance(tags, list): | |
if len(tags) > 0: | |
tagstr = ''.join(['<span class="tag">#'+tag+'</span> ' for tag in tags]) | |
tagsmd = gr.Markdown(tagstr, visible=True) | |
models = itemdic['Applicable Models'] | |
if isinstance(models, list): | |
if len(models) > 0: | |
modelstr = '### Applicable Models: '+''.join(['<span class="tag">'+model+'</span> ' for model in models]) | |
modelsmd = gr.Markdown(modelstr, visible=True) | |
titlemd = gr.Markdown('# ['+itemdic['Link']+']('+itemdic['URL']+')',visible=True) | |
if pd.notnull(itemdic['Authors']): | |
authormd = gr.Markdown('## '+itemdic['Authors'],visible=True) | |
if pd.notnull(itemdic['Affiliations']): | |
affiliationmd = gr.Markdown('<strong>Affiliations: </strong>'+ itemdic['Affiliations'],visible=True) | |
if pd.notnull(itemdic['Abstract']): | |
abstractmd = gr.Markdown(itemdic['Abstract'],visible=True) | |
if pd.notnull(itemdic['What it is evaluating']): | |
whatisbeingmd = gr.Markdown('<strong>Concept being evaluated: </strong>'+ itemdic['What it is evaluating'],visible=True) | |
if pd.notnull(itemdic['Methodology']): | |
methodmd = gr.Markdown('<strong>Method of Evaluation: </strong>'+ itemdic['Methodology'],visible=True) | |
if pd.notnull(itemdic['Considerations']): | |
considerationsmd = gr.Markdown('<strong>Considerations: </strong>'+ itemdic['Considerations'],visible=True) | |
if pd.notnull(itemdic['Datasets']): | |
datasetmd = gr.Markdown('#### [Dataset]('+itemdic['Datasets']+')',visible=True) | |
metrics = itemdic['Metrics'] | |
if isinstance(metrics, list): | |
if len(metrics) > 0: | |
metricstr = '### Metrics: '+''.join(['<span class="tag">'+metric+'</span> ' for metric in metrics]) | |
metricsmd = gr.Markdown(metricstr, visible=True) | |
screenshots = itemdic['Screenshots'] | |
if isinstance(screenshots, list): | |
if len(screenshots) > 0: | |
gallery = gr.Gallery(screenshots, visible=True, height=450, object_fit="scale-down", interactive=False, show_share_button=False) | |
return [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery] | |
with gr.Blocks(title = "Social Impact Measurement V2", css=custom_css, theme=gr.themes.Base()) as demo: #theme=gr.themes.Soft(), | |
# create tabs for the app, moving the current table to one titled "rewardbench" and the benchmark_text to a tab called "About" | |
with gr.Row(): | |
gr.Markdown(""" | |
# Social Impact Measurement | |
## A taxonomy of the social impacts of AI models and measurement techniques. | |
""") | |
with gr.Row(): | |
gr.Markdown(""" | |
#### Technical Base System Evaluations: | |
Below we list the aspects possible to evaluate in a generative system. Context-absent evaluations only provide narrow insights into the described aspects of the level of generative AI system. The depth of literature and research on evaluations differ by modality with some modalities having sparse or no relevant literature, but the themes for evaluations can be applied to most systems. | |
The following categories are high-level, non-exhaustive, and present a synthesis of the findings across different modalities. They refer solely to what can be evaluated in a base technical system: | |
""") | |
with gr.Tabs(elem_classes="tab-buttons") as tabs1: | |
with gr.TabItem("Bias/Stereotypes"): | |
fulltable = globaldf[globaldf['Group'] == 'BiasEvals'] | |
fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']] | |
gr.Markdown(""" | |
Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereotypes related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks. | |
""") | |
with gr.Row(): | |
modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"], | |
value=["Text", "Image", "Audio", "Video"], | |
label="Modality", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
level_filter = gr.CheckboxGroup(["Model", "Dataset", "Output", "Taxonomy"], | |
value=["Model", "Dataset", "Output", "Taxonomy"], | |
label="Level", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
with gr.Row(): | |
table_full = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=False, interactive=False) | |
table_filtered = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=True, interactive=False) | |
modality_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
level_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
with Modal(visible=False) as modal: | |
titlemd = gr.Markdown(visible=False) | |
authormd = gr.Markdown(visible=False) | |
affiliationmd = gr.Markdown(visible=False) | |
tagsmd = gr.Markdown(visible=False) | |
abstractmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Construct Validity<br> | |
##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True) | |
whatisbeingmd = gr.Markdown(visible=False) | |
methodmd = gr.Markdown(visible=False) | |
considerationsmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Resources<br> | |
##### <em>What you need to do this evaluation</em>""", visible=True) | |
modelsmd = gr.Markdown(visible=False) | |
datasetmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Results<br> | |
##### <em>Available evaluation results</em>""", visible=True) | |
metricsmd = gr.Markdown(visible=False) | |
gallery = gr.Gallery(visible=False) | |
table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery]) | |
with gr.TabItem("Cultural Values/Sensitive Content"): | |
fulltable = globaldf[globaldf['Group'] == 'CulturalEvals'] | |
fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']] | |
gr.Markdown(""" | |
Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereotypes related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks. | |
""") | |
with gr.Row(): | |
modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"], | |
value=["Text", "Image", "Audio", "Video"], | |
label="Modality", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
level_filter = gr.CheckboxGroup(["Model", "Dataset", "Output", "Taxonomy"], | |
value=["Model", "Dataset", "Output", "Taxonomy"], | |
label="Level", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
with gr.Row(): | |
table_full = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=False, interactive=False) | |
table_filtered = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=True, interactive=False) | |
modality_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
level_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
with Modal(visible=False) as modal: | |
titlemd = gr.Markdown(visible=False) | |
authormd = gr.Markdown(visible=False) | |
affiliationmd = gr.Markdown(visible=False) | |
tagsmd = gr.Markdown(visible=False) | |
abstractmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Construct Validity<br> | |
##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True) | |
whatisbeingmd = gr.Markdown(visible=False) | |
methodmd = gr.Markdown(visible=False) | |
considerationsmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Resources<br> | |
##### <em>What you need to do this evaluation</em>""", visible=True) | |
modelsmd = gr.Markdown(visible=False) | |
datasetmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Results<br> | |
##### <em>Available evaluation results</em>""", visible=True) | |
metricsmd = gr.Markdown(visible=False) | |
gallery = gr.Gallery(visible=False) | |
table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery]) | |
# with gr.TabItem("Disparate Performance"): | |
# with gr.Row(): | |
# gr.Image() | |
with gr.TabItem("Privacy/Data Protection"): | |
fulltable = globaldf[globaldf['Group'] == 'PrivacyEvals'] | |
fulltable = fulltable[['Modality','Level', 'Suggested Evaluation', 'What it is evaluating', 'Link']] | |
gr.Markdown(""" | |
Generative AI systems can perpetuate harmful biases from various sources, including systemic, human, and statistical biases. These biases, also known as "fairness" considerations, can manifest in the final system due to choices made throughout the development process. They include harmful associations and stereotypes related to protected classes, such as race, gender, and sexuality. Evaluating biases involves assessing correlations, co-occurrences, sentiment, and toxicity across different modalities, both within the model itself and in the outputs of downstream tasks. | |
""") | |
with gr.Row(): | |
modality_filter = gr.CheckboxGroup(["Text", "Image", "Audio", "Video"], | |
value=["Text", "Image", "Audio", "Video"], | |
label="Modality", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
level_filter = gr.CheckboxGroup(["Model", "Dataset", "Output", "Taxonomy"], | |
value=["Model", "Dataset", "Output", "Taxonomy"], | |
label="Level", | |
show_label=True, | |
# info="Which modality to show." | |
) | |
with gr.Row(): | |
table_full = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=False, interactive=False) | |
table_filtered = gr.DataFrame(value=fulltable, wrap=True, datatype="markdown", visible=True, interactive=False) | |
modality_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
level_filter.change(filter_modality_level, inputs=[table_full, modality_filter, level_filter], outputs=table_filtered) | |
with Modal(visible=False) as modal: | |
titlemd = gr.Markdown(visible=False) | |
authormd = gr.Markdown(visible=False) | |
affiliationmd = gr.Markdown(visible=False) | |
tagsmd = gr.Markdown(visible=False) | |
abstractmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Construct Validity<br> | |
##### <em>How well it measures the concept it was designed to evaluate</em>""", visible=True) | |
whatisbeingmd = gr.Markdown(visible=False) | |
methodmd = gr.Markdown(visible=False) | |
considerationsmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Resources<br> | |
##### <em>What you need to do this evaluation</em>""", visible=True) | |
modelsmd = gr.Markdown(visible=False) | |
datasetmd = gr.Markdown(visible=False) | |
gr.Markdown("""## Results<br> | |
##### <em>Available evaluation results</em>""", visible=True) | |
metricsmd = gr.Markdown(visible=False) | |
gallery = gr.Gallery(visible=False) | |
table_filtered.select(showmodal, None, [modal, titlemd, authormd, affiliationmd, tagsmd, abstractmd, whatisbeingmd, methodmd, considerationsmd, modelsmd, datasetmd, metricsmd, gallery]) | |
# with gr.TabItem("Financial Costs"): | |
# with gr.Row(): | |
# gr.Image() | |
with gr.TabItem("Environmental Costs"): | |
with gr.Row(): | |
gr.Image() | |
# with gr.TabItem("Data and Content Moderation Labor"): | |
# with gr.Row(): | |
# gr.Image() | |
# with gr.Row(): | |
# gr.Markdown(""" | |
# #### B: People and Society Impact Evaluations: | |
# Long-term effects of systems embedded in society, such as economic or labor impact, largely require ideation of generative AI systems’ possible use cases and have fewer available general evaluations. The following categories heavily depend on how generative AI systems are deployed, including sector and application. In the broader ecosystem, methods of deployment affect social impact. | |
# The following categories are high-level, non-exhaustive, and present a synthesis of the findings across different modalities. They refer solely to what can be evaluated in people and society: | |
# """) | |
# with gr.Tabs(elem_classes="tab-buttons") as tabs2: | |
# with gr.TabItem("Trustworthiness and Autonomy"): | |
# with gr.Accordion("Trust in Media and Information", open=False): | |
# gr.Image() | |
# with gr.Accordion("Overreliance on Outputs", open=False): | |
# gr.Image() | |
# with gr.Accordion("Personal Privacy and Sense of Self", open=False): | |
# gr.Image() | |
# with gr.TabItem("Inequality, Marginalization, and Violence"): | |
# with gr.Accordion("Community Erasure", open=False): | |
# gr.Image() | |
# with gr.Accordion("Long-term Amplifying Marginalization by Exclusion (and Inclusion)", open=False): | |
# gr.Image() | |
# with gr.Accordion("Abusive or Violent Content", open=False): | |
# gr.Image() | |
# with gr.TabItem("Concentration of Authority"): | |
# with gr.Accordion("Militarization, Surveillance, and Weaponization", open=False): | |
# gr.Image() | |
# with gr.Accordion("Imposing Norms and Values", open=False): | |
# gr.Image() | |
# with gr.TabItem("Labor and Creativity"): | |
# with gr.Accordion("Intellectual Property and Ownership", open=False): | |
# gr.Image() | |
# with gr.Accordion("Economy and Labor Market", open=False): | |
# gr.Image() | |
# with gr.TabItem("Ecosystem and Environment"): | |
# with gr.Accordion("Widening Resource Gaps", open=False): | |
# gr.Image() | |
# with gr.Accordion("Environmental Impacts", open=False): | |
# gr.Image() | |
with gr.Row(): | |
with gr.Accordion("📚 Citation", open=False): | |
citation_button = gr.Textbox( | |
value=r"""BOOK CHAPTER CITE GOES HERE""", | |
lines=7, | |
label="Copy the following to cite this work.", | |
elem_id="citation-button", | |
show_copy_button=True, | |
) | |
demo.launch(debug=True) |