xinchen9 commited on
Commit
ca1772a
1 Parent(s): 23d59a5

[Update]Add new introduction on about

Browse files
Files changed (1) hide show
  1. about.py +6 -8
about.py CHANGED
@@ -20,21 +20,19 @@ NUM_FEWSHOT = 0 # Change with your few shot
20
 
21
 
22
 
23
- # Your leaderboard name
24
- TITLE = """<h1 align="center" id="space-title">UnlearnDiffAtk Benchmark</h1>"""
25
 
26
  # subtitle
27
- SUB_TITLE = """<h2 align="center" id="space-title">Effective and efficient adversarial prompt generation approach for diffusion models</h1>"""
28
 
29
  # What does your leaderboard evaluate?
30
  INTRODUCTION_TEXT = """
31
- This benchmark is evaluates the robustness of safety-driven unlearned diffusion models (DMs)
32
- (i.e., DMs after unlearning undesirable concepts, styles, or objects) across a variety of tasks. For more details, please visit the [project](https://www.optml-group.com/posts/mu_attack),
33
- check the [code](https://github.com/OPTML-Group/Diffusion-MU-Attack), and read the [paper](https://arxiv.org/abs/2310.11868).\\
34
- Demo of our offensive method: [UnlearnDiffAtk](https://huggingface.co/spaces/xinchen9/SD_Offense)\\
35
- Demo of our defensive method: [AdvUnlearn](https://huggingface.co/spaces/xinchen9/SD_Defense)
36
  """
37
 
 
38
  # Which evaluations are you running? how can people reproduce what you have?
39
  LLM_BENCHMARKS_TEXT = f"""
40
  ## How it works
 
20
 
21
 
22
 
23
+ TITLE = """<h1 align="center" id="space-title"> Demo of AdvUnlearn</h1>"""
 
24
 
25
  # subtitle
26
+ SUB_TITLE = """<h2 align="center" id="space-title">A robust unlearning framework </h1>"""
27
 
28
  # What does your leaderboard evaluate?
29
  INTRODUCTION_TEXT = """
30
+ AdvUnlearn is a robust unlearning framework. It aims to enhance the robustness of concept erasing by integrating
31
+ the principle of adversarial training (AT) into machine unlearning and also achieves a balanced tradeoff with model utility. For details, please
32
+ read the [paper](https://arxiv.org/abs/2405.15234) and check the [code](https://github.com/OPTML-Group/AdvUnlearn)
 
 
33
  """
34
 
35
+
36
  # Which evaluations are you running? how can people reproduce what you have?
37
  LLM_BENCHMARKS_TEXT = f"""
38
  ## How it works