Spaces:
Runtime error
Runtime error
Add application file
Browse files- .gitignore +4 -0
- app.py +187 -0
- requirements.txt +3 -0
.gitignore
ADDED
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
1 |
+
.idea/
|
2 |
+
__pycache__
|
3 |
+
projects
|
4 |
+
.DS_Store
|
app.py
ADDED
@@ -0,0 +1,187 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import gradio as gr
|
2 |
+
from gradio.components import Textbox, Slider, Plot
|
3 |
+
import numpy as np
|
4 |
+
import matplotlib.pyplot as plt
|
5 |
+
from tqdm.auto import tqdm
|
6 |
+
|
7 |
+
plt.style.use('seaborn-v0_8-whitegrid')
|
8 |
+
|
9 |
+
columns = ["difficulty", "stability", "retrievability", "delta_t",
|
10 |
+
"reps", "lapses", "last_date", "due", "ivl", "cost", "rand"]
|
11 |
+
col = {key: i for i, key in enumerate(columns)}
|
12 |
+
|
13 |
+
|
14 |
+
def simulate(w, request_retention=0.9, deck_size=10000, learn_span=100, max_cost_perday=200, max_ivl=36500, recall_cost=10, forget_cost=30, learn_cost=10):
|
15 |
+
card_table = np.zeros((len(columns), deck_size))
|
16 |
+
card_table[col["due"]] = learn_span
|
17 |
+
card_table[col["difficulty"]] = 1e-10
|
18 |
+
card_table[col["stability"]] = 1e-10
|
19 |
+
|
20 |
+
review_cnt_per_day = np.zeros(learn_span)
|
21 |
+
learn_cnt_per_day = np.zeros(learn_span)
|
22 |
+
memorized_cnt_per_day = np.zeros(learn_span)
|
23 |
+
|
24 |
+
def cal_next_recall_stability(s, r, d, response):
|
25 |
+
if response == 1:
|
26 |
+
return s * (1 + np.exp(w[8]) * (11 - d) * np.power(s, -w[9]) * (np.exp((1 - r) * w[10]) - 1))
|
27 |
+
else:
|
28 |
+
return np.minimum(w[11] * np.power(d, -w[12]) * (np.power(s + 1, w[13]) - 1) * np.exp((1 - r) * w[14]), s)
|
29 |
+
|
30 |
+
for today in tqdm(range(learn_span)):
|
31 |
+
has_learned = card_table[col["stability"]] > 1e-10
|
32 |
+
card_table[col["delta_t"]][has_learned] = today - \
|
33 |
+
card_table[col["last_date"]][has_learned]
|
34 |
+
card_table[col["retrievability"]][has_learned] = np.power(
|
35 |
+
1 + card_table[col["delta_t"]][has_learned] / (9 * card_table[col["stability"]][has_learned]), -1)
|
36 |
+
|
37 |
+
card_table[col["cost"]] = 0
|
38 |
+
need_review = card_table[col["due"]] <= today
|
39 |
+
card_table[col["rand"]][need_review] = np.random.rand(
|
40 |
+
np.sum(need_review))
|
41 |
+
forget = card_table[col["rand"]] > card_table[col["retrievability"]]
|
42 |
+
card_table[col["cost"]][need_review & forget] = forget_cost
|
43 |
+
card_table[col["cost"]][need_review & ~forget] = recall_cost
|
44 |
+
true_review = need_review & (
|
45 |
+
np.cumsum(card_table[col["cost"]]) <= max_cost_perday)
|
46 |
+
card_table[col["last_date"]][true_review] = today
|
47 |
+
|
48 |
+
card_table[col["lapses"]][true_review & forget] += 1
|
49 |
+
card_table[col["reps"]][true_review & ~forget] += 1
|
50 |
+
|
51 |
+
card_table[col["stability"]][true_review & forget] = cal_next_recall_stability(
|
52 |
+
card_table[col["stability"]][true_review & forget], card_table[col["retrievability"]][true_review & forget], card_table[col["difficulty"]][true_review & forget], 0)
|
53 |
+
|
54 |
+
card_table[col["stability"]][true_review & ~forget] = cal_next_recall_stability(
|
55 |
+
card_table[col["stability"]][true_review & ~forget], card_table[col["retrievability"]][true_review & ~forget], card_table[col["difficulty"]][true_review & ~forget], 1)
|
56 |
+
|
57 |
+
card_table[col["difficulty"]][true_review & forget] = np.clip(
|
58 |
+
card_table[col["difficulty"]][true_review & forget] + 2 * w[6], 1, 10)
|
59 |
+
|
60 |
+
need_learn = card_table[col["due"]] == learn_span
|
61 |
+
card_table[col["cost"]][need_learn] = learn_cost
|
62 |
+
true_learn = need_learn & (
|
63 |
+
np.cumsum(card_table[col["cost"]]) <= max_cost_perday)
|
64 |
+
card_table[col["last_date"]][true_learn] = today
|
65 |
+
first_ratings = np.random.randint(0, 4, np.sum(true_learn))
|
66 |
+
card_table[col["stability"]][true_learn] = np.choose(
|
67 |
+
first_ratings, w[:4])
|
68 |
+
card_table[col["difficulty"]][true_learn] = w[4] - \
|
69 |
+
w[5] * (first_ratings - 3)
|
70 |
+
|
71 |
+
card_table[col["ivl"]][true_review | true_learn] = np.clip(np.round(
|
72 |
+
9 * card_table[col["stability"]][true_review | true_learn] * (1 / request_retention - 1), 0), 1, max_ivl)
|
73 |
+
card_table[col["due"]][true_review | true_learn] = today + \
|
74 |
+
card_table[col["ivl"]][true_review | true_learn]
|
75 |
+
|
76 |
+
review_cnt_per_day[today] = np.sum(true_review)
|
77 |
+
learn_cnt_per_day[today] = np.sum(true_learn)
|
78 |
+
memorized_cnt_per_day[today] = card_table[col["retrievability"]].sum()
|
79 |
+
return card_table, review_cnt_per_day, learn_cnt_per_day, memorized_cnt_per_day
|
80 |
+
|
81 |
+
|
82 |
+
def interface_func(weights: str, learning_time: int, learn_span: int, deck_size: int, max_ivl: int, recall_cost: int, forget_cost: int, learn_cost: int,
|
83 |
+
progress=gr.Progress(track_tqdm=True)):
|
84 |
+
np.random.seed(42)
|
85 |
+
w = list(map(lambda x: float(x.strip()), weights.split(',')))
|
86 |
+
max_cost_perday = learning_time * 60
|
87 |
+
|
88 |
+
def moving_average(data, window_size=learn_span//20):
|
89 |
+
weights = np.ones(window_size) / window_size
|
90 |
+
return np.convolve(data, weights, mode='valid')
|
91 |
+
|
92 |
+
for request_retention in [0.95, 0.9, 0.85, 0.8, 0.75]:
|
93 |
+
(_,
|
94 |
+
review_cnt_per_day,
|
95 |
+
learn_cnt_per_day,
|
96 |
+
memorized_cnt_per_day) = simulate(w,
|
97 |
+
request_retention=request_retention,
|
98 |
+
deck_size=deck_size,
|
99 |
+
learn_span=learn_span,
|
100 |
+
max_cost_perday=max_cost_perday,
|
101 |
+
max_ivl=max_ivl,
|
102 |
+
recall_cost=recall_cost,
|
103 |
+
forget_cost=forget_cost,
|
104 |
+
learn_cost=learn_cost)
|
105 |
+
|
106 |
+
plt.figure(1)
|
107 |
+
plt.plot(moving_average(review_cnt_per_day),
|
108 |
+
label=f"R={request_retention*100:.0f}%")
|
109 |
+
plt.title("Review Count per Day")
|
110 |
+
plt.legend()
|
111 |
+
plt.figure(2)
|
112 |
+
plt.plot(moving_average(learn_cnt_per_day),
|
113 |
+
label=f"R={request_retention*100:.0f}%")
|
114 |
+
plt.title("Learn Count per Day")
|
115 |
+
plt.legend()
|
116 |
+
plt.figure(3)
|
117 |
+
plt.plot(np.cumsum(learn_cnt_per_day),
|
118 |
+
label=f"R={request_retention*100:.0f}%")
|
119 |
+
plt.title("Cumulative Learn Count")
|
120 |
+
plt.legend()
|
121 |
+
plt.figure(4)
|
122 |
+
plt.plot(memorized_cnt_per_day,
|
123 |
+
label=f"R={request_retention*100:.0f}%")
|
124 |
+
plt.title("Memorized Count per Day")
|
125 |
+
plt.legend()
|
126 |
+
|
127 |
+
return plt.figure(1), plt.figure(2), plt.figure(3), plt.figure(4)
|
128 |
+
|
129 |
+
description = f"""
|
130 |
+
# FSRS4Anki Simulator
|
131 |
+
|
132 |
+
Here is a simulator for FSRS4Anki. It can simulate the learning process of a deck with given weights and parameters.
|
133 |
+
|
134 |
+
It will help you to find the expected requestRetention for FSRS4Anki.
|
135 |
+
|
136 |
+
The simulator assumes that you spend the same amount of time on Anki every day.
|
137 |
+
"""
|
138 |
+
|
139 |
+
with gr.Blocks() as demo:
|
140 |
+
with gr.Box():
|
141 |
+
gr.Markdown(description)
|
142 |
+
with gr.Box():
|
143 |
+
with gr.Row():
|
144 |
+
with gr.Column():
|
145 |
+
weights = Textbox(label="Weights", lines=1,
|
146 |
+
value="0.4, 0.6, 2.4, 5.8, 4.93, 0.94, 0.86, 0.01, 1.49, 0.14, 0.94, 2.18, 0.05, 0.34, 1.26, 0.29, 2.61")
|
147 |
+
learning_time = Slider(label="Learning Time perday (minutes)",
|
148 |
+
minimum=5, maximum=1440, step=5, value=30)
|
149 |
+
learn_span = Slider(label="Learning Period (days)", minimum=30,
|
150 |
+
maximum=3650, step=10, value=365)
|
151 |
+
deck_size = Slider(label="Deck Size (cards)", minimum=100,
|
152 |
+
maximum=100000, step=100, value=10000)
|
153 |
+
with gr.Column():
|
154 |
+
max_ivl = Slider(label="Maximum Interval (days)", minimum=30,
|
155 |
+
maximum=36500, step=10, value=36500)
|
156 |
+
recall_cost = Slider(label="Review Cost (seconds)", minimum=1,
|
157 |
+
maximum=600, step=1, value=10)
|
158 |
+
forget_cost = Slider(label="Relearn Cost (seconds)",
|
159 |
+
minimum=1, maximum=600, step=1, value=30)
|
160 |
+
learn_cost = Slider(label="Learn Cost (seconds)", minimum=1,
|
161 |
+
maximum=600, step=1, value=10)
|
162 |
+
with gr.Row():
|
163 |
+
btn_plot = gr.Button("Simulate")
|
164 |
+
with gr.Row():
|
165 |
+
with gr.Column():
|
166 |
+
review_count = Plot(label="Review Count per Day")
|
167 |
+
learn_count = Plot(label="Learn Count per Day")
|
168 |
+
with gr.Column():
|
169 |
+
cumulative_learn_count = Plot(label="Cumulative Learn Count")
|
170 |
+
memorized_count = Plot(label="Memorized Count per Day")
|
171 |
+
|
172 |
+
btn_plot.click(
|
173 |
+
fn=interface_func,
|
174 |
+
inputs=[weights,
|
175 |
+
learning_time,
|
176 |
+
learn_span,
|
177 |
+
deck_size,
|
178 |
+
max_ivl,
|
179 |
+
recall_cost,
|
180 |
+
forget_cost,
|
181 |
+
learn_cost,
|
182 |
+
],
|
183 |
+
outputs=[review_count, learn_count,
|
184 |
+
cumulative_learn_count, memorized_count],
|
185 |
+
)
|
186 |
+
|
187 |
+
demo.queue().launch(show_error=True)
|
requirements.txt
ADDED
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
1 |
+
matplotlib>=3.7.0
|
2 |
+
numpy>=1.22.4
|
3 |
+
tqdm>=4.64.1
|