File size: 35,185 Bytes
6eff5e7
 
 
 
4bea31b
05a7bdc
6eff5e7
 
 
 
c2dbf38
 
 
 
7269ffa
580aef7
5ee7598
 
6eff5e7
 
580aef7
6eff5e7
c2dbf38
5ee7598
 
 
6004e76
 
6eff5e7
c2dbf38
 
 
6eff5e7
e7933f3
6eff5e7
 
c2dbf38
 
300debd
c2dbf38
7269ffa
e7933f3
 
05a7bdc
 
6eff5e7
 
81ca652
7269ffa
81ca652
6eff5e7
 
300debd
7269ffa
0532283
fd61399
6eff5e7
 
e7933f3
6eff5e7
 
a3a8d41
6eff5e7
 
b5a0311
648fab4
c2dbf38
6eff5e7
 
81ca652
fd61399
 
 
6eff5e7
 
c2dbf38
 
 
 
 
fd61399
 
6eff5e7
e16ae7e
05a7bdc
649e53c
 
963bf46
7269ffa
05a7bdc
 
6eff5e7
6004e76
091bb76
3448819
0532283
091bb76
0532283
5ee7598
0532283
 
c2dbf38
 
0532283
6004e76
0532283
091bb76
 
6004e76
 
 
 
 
 
 
 
0532283
b5a0311
0532283
 
6004e76
300debd
0532283
091bb76
3448819
fd61399
 
 
0532283
c2dbf38
05a7bdc
649e53c
 
091bb76
b5a0311
c2dbf38
 
 
 
fd61399
c2dbf38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573dc49
 
 
 
 
c2dbf38
fd61399
c2dbf38
 
573dc49
 
 
c2dbf38
 
fd61399
091bb76
6eff5e7
fd61399
 
c2dbf38
 
 
 
 
 
 
fd61399
 
c2dbf38
 
 
fd61399
c2dbf38
 
 
 
 
 
091bb76
648fab4
c2dbf38
fd61399
c2dbf38
 
648fab4
c2dbf38
fd61399
c2dbf38
 
 
fd61399
 
 
648fab4
c2dbf38
fd61399
c2dbf38
 
648fab4
c2dbf38
fd61399
 
 
c2dbf38
 
 
 
091bb76
648fab4
c2dbf38
 
648fab4
 
 
c2dbf38
 
648fab4
c2dbf38
 
 
 
 
 
 
 
 
 
 
 
 
648fab4
c2dbf38
 
 
fd61399
c2dbf38
 
 
 
 
 
300debd
c2dbf38
 
 
648fab4
fd61399
649e53c
648fab4
649e53c
0532283
961f39c
091bb76
 
 
961f39c
fd61399
961f39c
6004e76
573dc49
0532283
6eff5e7
649e53c
 
 
 
6eff5e7
 
 
963bf46
6eff5e7
 
6004e76
 
 
 
 
 
6eff5e7
b5a0311
 
 
6004e76
 
6eff5e7
 
 
6004e76
 
 
 
 
 
b5a0311
573dc49
b5a0311
fd61399
 
6004e76
b5a0311
 
 
3448819
fd61399
961f39c
fd61399
6004e76
 
fd61399
6004e76
fd61399
e16ae7e
 
 
6004e76
 
 
 
 
 
 
 
 
 
 
 
 
 
c2dbf38
 
 
 
 
 
 
fd61399
c2dbf38
 
 
 
 
 
 
 
 
 
 
 
4068146
b5a0311
6eff5e7
a6756ef
2fad322
 
961f39c
 
5ee7598
961f39c
 
e7933f3
2fad322
5ee7598
f582acb
648fab4
fd61399
4068146
fd61399
961f39c
 
cc467f5
4068146
961f39c
 
 
4068146
fd61399
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
6eff5e7
 
e7933f3
 
 
 
 
 
6eff5e7
648fab4
6eff5e7
648fab4
6eff5e7
e7933f3
 
fd61399
 
 
 
 
 
 
e7933f3
 
fd61399
e7933f3
 
6eff5e7
300debd
 
 
649e53c
e7933f3
c2dbf38
 
 
091bb76
fd61399
c2dbf38
 
fd61399
 
 
 
 
091bb76
c2dbf38
 
fd61399
c2dbf38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
fd61399
 
 
c2dbf38
 
 
 
 
 
 
 
 
fd61399
 
c2dbf38
 
 
 
fd61399
 
c2dbf38
 
 
 
 
 
 
091bb76
 
fd61399
963bf46
 
648fab4
961f39c
648fab4
961f39c
 
 
 
6004e76
 
 
 
 
e16ae7e
fd61399
e16ae7e
6eff5e7
961f39c
 
 
 
 
 
 
6eff5e7
 
 
 
 
961f39c
 
 
6004e76
 
 
 
 
 
 
 
 
 
 
961f39c
 
6004e76
961f39c
 
6004e76
961f39c
fd61399
 
 
573dc49
961f39c
 
e7933f3
963bf46
 
c2dbf38
 
 
 
 
 
 
 
 
 
fd61399
c2dbf38
 
 
 
 
 
 
 
 
 
 
649e53c
 
 
 
 
 
6eff5e7
 
 
e7933f3
6eff5e7
b5a0311
c2dbf38
 
b5a0311
6eff5e7
c2dbf38
7269ffa
 
091bb76
 
 
 
 
648fab4
6eff5e7
648fab4
e16ae7e
6eff5e7
e16ae7e
961f39c
fd61399
961f39c
6004e76
 
6eff5e7
091bb76
6eff5e7
963bf46
6eff5e7
573dc49
0532283
 
b5a0311
6004e76
b5a0311
0532283
6eff5e7
 
e16ae7e
963bf46
e16ae7e
 
b5a0311
 
573dc49
6004e76
b5a0311
 
e16ae7e
 
 
0532283
fd61399
6004e76
 
 
 
 
c2dbf38
6004e76
 
 
 
 
 
 
 
 
0532283
e16ae7e
 
4f8ef7b
c2dbf38
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
573dc49
c2dbf38
860bafc
d3d8e12
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
import gradio as gr
from transformers import AutoTokenizer, AutoModel
from sentence_transformers import SentenceTransformer
import pickle
import nltk
import time

from input_format import *
from score import *

nltk.download('punkt') # tokenizer
nltk.download('averaged_perceptron_tagger') # postagger

## load document scoring model
#torch.cuda.is_available = lambda : False  # uncomment to test with CPU only
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
#pretrained_model = 'allenai/specter'
pretrained_model = 'allenai/specter2'
tokenizer = AutoTokenizer.from_pretrained(pretrained_model)
doc_model = AutoModel.from_pretrained(pretrained_model) 
doc_model.to(device)

## load sentence model 
sent_model = doc_model # have the same model for document and sentence level

# OR specify different model for sentence level
#sent_model = SentenceTransformer('sentence-transformers/gtr-t5-base')
#sent_model.to(device)

NUM_PAPERS_SHOW = 5 # max number of top papers to show from the reviewer upfront
NUM_PAIRS_SHOW = 5 # max number of top sentence pairs to show

def get_similar_paper(
    title_input,
    abstract_text_input, 
    author_id_input, 
    top_paper_slider,
    top_pair_slider,
    results={}, # this state variable will be updated and returned
):
    progress = gr.Progress()
    if title_input == None:
        title_input = '' # if no title is given, just focus on abstract.
    print('retrieving similar papers...')
    start = time.time()
    input_sentences = sent_tokenize(abstract_text_input)
    
    # Get author papers from id
    #progress(0.1, desc="Retrieving reviewer papers ...")
    name, papers = get_text_from_author_id(author_id_input)
    
    # Compute Doc-level affinity scores for the Papers 
    # print('computing document scores...') 
    #progress(0.5, desc="Computing document scores...")
    # TODO detect duplicate papers?
    titles, abstracts, paper_urls, doc_scores, paper_years, paper_citations = compute_document_score(
        doc_model, 
        tokenizer,
        title_input,
        abstract_text_input, 
        papers,
        batch=10
    )
    
    results = {
        'name': name,
        'author_url': author_id_input,
        'titles': titles,
        'abstracts': abstracts,
        'urls': paper_urls,
        'doc_scores': doc_scores,
        'years': paper_years,
        'citations': paper_citations,
    }
    
    # Select top 10 papers to show
    titles = titles[:10]
    abstracts = abstracts[:10]
    doc_scores = doc_scores[:10]
    paper_urls = paper_urls[:10]
    paper_years = paper_years[:10]
    paper_citations = paper_citations[:10]
    
    display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(titles, doc_scores)]
    end = time.time()
    retrieval_time = end - start
    print('paper retrieval complete in [%0.2f] seconds'%(retrieval_time))
    
    progress(0.9, desc="Obtaining relevant information from the papers...")
    print('obtaining highlights..')
    start = time.time()
    input_sentences = sent_tokenize(abstract_text_input)
    num_input_sents = len(input_sentences)
    
    for aa, (tt, ab, ds, url) in enumerate(zip(titles, abstracts, doc_scores, paper_urls)):
        # Compute sent-level and phrase-level affinity scores for each papers
        sent_ids, sent_scores, info, top_pairs_info = get_highlight_info(
            sent_model, 
            tokenizer,
            abstract_text_input, 
            ab,
            K=None,  
            top_pair_num=10, # top ten sentence pairs at max to show upfront 
        )
        num_cand_sents = sent_ids.shape[1]
        
        # get scores for each word in the format for Gradio Interpretation component
        word_scores = dict()
        for i in range(num_input_sents):
            word_scores[str(i)] = dict()
            for j in range(1, num_cand_sents+1):
                ww, ss = remove_spaces(info['all_words'], info[i][j]['scores'])
                word_scores[str(i)][str(j)] = {
                    "original": ab,
                    "interpretation": list(zip(ww, ss))
                } 

        results[display_title[aa]] = {
            'title': tt,
            'abstract': ab,
            'num_cand_sents': num_cand_sents,
            'doc_score': '%0.3f'%ds,
            'source_sentences': input_sentences,
            'highlight': word_scores,
            'top_pairs': top_pairs_info,
            'url': url,
            'year': paper_years[aa],
            'citations': paper_citations[aa],
        }
    
    end = time.time()
    highlight_time = end - start
    print('done in [%0.2f] seconds'%(highlight_time)) 
    
    ## Set up output elements
    ## Components for Initial Part
    result1_desc_value = """
        <h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
        
        For each paper, top %d sentence pairs (one from the submission on the left, one from the paper on the right) with the highest relevance scores are shown.
        
        **<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
        """%(int(top_paper_slider), author_id_input, results['name'], int(top_pair_slider))

    out1 = [
        gr.update(visible=True), # Explore more button
        gr.update(value=result1_desc_value, visible=True), # result 1 description
        gr.update(value='Done (in %0.1f seconds)'%(retrieval_time+highlight_time), visible=True), # search status
        gr.update(visible=True),  # top paper slider
        gr.update(visible=True) # top pair slider
    ]
    
    ### Components for Results in Initial Part
    top_papers_show = int(top_paper_slider) # number of top papers to show upfront
    top_num_info_show = int(top_pair_slider) # number of sentence pairs from each paper to show upfront
    output = setup_outputs(results, top_papers_show, top_num_info_show)
    out2 = []
    for x in output:
        out2 += x 

    ### Components for Explore More Section
    # list of top papers, sentences to select from, paper_title, affinity
    title = results[display_title[0]]['title'] # set default title as the top paper
    url = results[display_title[0]]['url']
    aff_score = results[display_title[0]]['doc_score']
    title_out = """<a href="%s" target="_blank"><h5>%s</h5></a>"""%(url, title)
    aff_score_out = '##### Affinity Score: %s'%aff_score
    result2_desc_value = """
        ##### Click a paper by %s (left, sorted by affinity scores), and a sentence from the submission abstract (center), to see which parts of the paper's abstract are relevant (right).
        """%results['name']
    out3 = [
        gr.update(choices=display_title, value=display_title[0],  interactive=True), # set of papers (radio)
        gr.update(choices=input_sentences, value=input_sentences[0], interactive=True), # submission sentences 
        gr.update(value=title_out), # paper_title
        gr.update(value=aff_score_out),  # affinity
        gr.update(value=result2_desc_value), # result 2 description (show more section)
        gr.update(value=2, maximum=len(sent_tokenize(abstracts[0]))), # highlight slider to control
    ]
    
    torch.cuda.empty_cache()
    
    ## Return by adding the State variable info
    return out1 + out2 + out3 + [results]

def setup_outputs(info, top_papers_show, top_num_info_show):
    titles = info['titles']
    doc_scores = info['doc_scores']
    paper_urls = info['urls']
    paper_years = info['years']
    paper_citations = info['citations']
    display_title = ['[ %0.3f ] %s'%(s, t) for t, s in zip(info['titles'], info['doc_scores'])]
    title = []
    affinity = []
    citation_count = []
    sent_pair_score = []
    sent_text_query = []
    sent_text_candidate = []
    sent_hl_query = []
    sent_hl_candidate = []
    demarc_lines = []
    for i in range(top_papers_show):
        if i == 0:
            title.append(
                gr.update(value="""<a href="%s" target="_blank"><h4>%s (%s)</h4></a>"""%(paper_urls[i], titles[i], str(paper_years[i])), visible=True)
            )
            affinity.append(
                gr.update(value="""#### Affinity Score: %0.3f 
                        <div class="help-tip">
                            <p>Measures how similar the paper's abstract is to the submission abstract.</p>
                        </div>
                        """%doc_scores[i], visible=True) # document affinity
            )
            citation_count.append(
                 gr.update(value="""#### Citation Count: %d"""%paper_citations[i], visible=True) # document affinity
            )
        else:
            title.append(
                gr.update(value="""<a href="%s" target="_blank"><h4>%s (%s)</h4></a>"""%(paper_urls[i], titles[i], str(paper_years[i])), visible=True)
            )
            affinity.append(
                gr.update(value='#### Affinity Score: %0.3f'%doc_scores[i], visible=True) # document affinity
            )
            citation_count.append(
                 gr.update(value="""#### Citation Count: %d"""%paper_citations[i], visible=True) # document affinity
            )
        demarc_lines.append(gr.Markdown.update(visible=True))

        # fill in the rest as 
        tp = info[display_title[i]]['top_pairs']
        for j in range(top_num_info_show):
            if i == 0 and j == 0:
                # for the first entry add help tip
                sent_pair_score.append(
                    gr.update(value="""Sentence Relevance:\n%0.3f
                            <div class="help-tip">
                                <p>Measures how similar the sentence pairs are.</p>
                            </div>"""%tp[j]['score'], visible=True)
                ) 
            else:
                sent_pair_score.append(
                    gr.Textbox.update(value='Sentence Relevance:\n%0.3f'%tp[j]['score'], visible=True)
                ) 
            sent_text_query.append(gr.Textbox.update(tp[j]['query']['original']))
            sent_text_candidate.append(gr.Textbox.update(tp[j]['candidate']['original']))
            sent_hl_query.append(tp[j]['query'])
            sent_hl_candidate.append(tp[j]['candidate'])
            #row2.append(gr.update(visible=True))
        sent_pair_score += [gr.Markdown.update(visible=False)] * (NUM_PAIRS_SHOW - top_num_info_show)
        sent_text_query += [gr.Textbox.update(value='', visible=False)] * (NUM_PAIRS_SHOW - top_num_info_show)
        sent_text_candidate += [gr.Textbox.update(value='', visible=False)] * (NUM_PAIRS_SHOW - top_num_info_show)
        sent_hl_query += [None] * (NUM_PAIRS_SHOW - top_num_info_show)
        sent_hl_candidate += [None] * (NUM_PAIRS_SHOW - top_num_info_show)

    # mark others not visible
    title += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
    affinity += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
    citation_count += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
    demarc_lines += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show)
    sent_pair_score += [gr.Markdown.update(visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
    sent_text_query += [gr.Textbox.update(value='', visible=False)] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
    sent_text_candidate += [gr.Textbox.update(value='', visible=False)]  * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
    sent_hl_query += [None] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW
    sent_hl_candidate += [None] * (NUM_PAPERS_SHOW - top_papers_show) * NUM_PAIRS_SHOW

    assert(len(title) == NUM_PAPERS_SHOW)
    assert(len(affinity) == NUM_PAPERS_SHOW)
    assert(len(sent_pair_score) == NUM_PAIRS_SHOW * NUM_PAPERS_SHOW)

    return title, affinity, citation_count, demarc_lines, sent_pair_score, sent_text_query, sent_text_candidate, sent_hl_query, sent_hl_candidate

def show_more(info):
    # show the interactive part of the app
    return (
        gr.update(visible=True), # description
        gr.update(visible=True), # set of papers
        gr.update(visible=True), # submission sentences
        gr.update(visible=True), # title row
        gr.update(visible=True), # affinity row
        gr.update(visible=True), # citation row
        gr.update(visible=True), # highlight legend
        gr.update(visible=True), # highlight slider
        gr.update(visible=True), # highlight abstract
    )

def show_status():
    # show search status field when search button is clicked
    return gr.update(visible=True)

def update_name(author_id_input):
    # update the name of the author based on the id input
    name, _ = get_text_from_author_id(author_id_input)
    
    return gr.update(value=name)

def change_sentence(
    selected_papers_radio, 
    source_sent_choice, 
    highlight_slider, 
    info={}
):
    # change the output highlight based on the sentence selected from the submission
    if len(info.keys()) != 0: # if the info is not empty
        source_sents = info[selected_papers_radio]['source_sentences']
        highlights = info[selected_papers_radio]['highlight']
        idx = source_sents.index(source_sent_choice)
        return highlights[str(idx)][str(highlight_slider)]
    else:
        return

def change_paper(
    selected_papers_radio, 
    source_sent_choice, 
    highlight_slider,
    info={}
): 
    if len(info.keys()) != 0: # if the info is not empty
        source_sents = info[selected_papers_radio]['source_sentences']
        title = info[selected_papers_radio]['title']
        year = info[selected_papers_radio]['year']
        citation_count = info[selected_papers_radio]['citations']
        num_sents = info[selected_papers_radio]['num_cand_sents']
        abstract = info[selected_papers_radio]['abstract']
        aff_score = info[selected_papers_radio]['doc_score']
        highlights = info[selected_papers_radio]['highlight']
        url = info[selected_papers_radio]['url']
        title_out = """<a href="%s" target="_blank"><h5>%s (%s)</h5></a>"""%(url, title, str(year))
        aff_score_out = '##### Affinity Score: %s'%aff_score
        citation_count_out = '##### Citation Count: %s'%citation_count
        idx = source_sents.index(source_sent_choice)
        if highlight_slider <= num_sents:
            return title_out, abstract, aff_score_out, citation_count_out, highlights[str(idx)][str(highlight_slider)], gr.update(value=highlight_slider, maximum=num_sents)
        else: # if the slider is set to more than the current number of sentences, show the max number of highlights
            return title_out, abstract, aff_score_out, citation_count_out, highlights[str(idx)][str(num_sents)], gr.update(value=num_sents, maximum=num_sents)
    else:
        return

def change_num_highlight(
    selected_papers_radio, 
    source_sent_choice,
    highlight_slider, 
    info={}
):
    if len(info.keys()) != 0: # if the info is not empty
        source_sents = info[selected_papers_radio]['source_sentences']
        highlights = info[selected_papers_radio]['highlight']
        idx = source_sents.index(source_sent_choice)
        return highlights[str(idx)][str(highlight_slider)] 
    else:
        return 

def change_top_output(top_paper_slider, top_pair_slider, info={}):
    top_papers_show = int(top_paper_slider)
    top_num_info_show = int(top_pair_slider)
    
    result1_desc_value = """
    <h3>Top %d relevant papers by the reviewer <a href="%s" target="_blank">%s</a></h3>
    
    For each paper, top %d sentence pairs (one from the submission on the left, one from the paper on the right) with the highest relevance scores are shown.
    
    **<span style="color:black;background-color:#65B5E3;">Blue highlights</span>**: phrases that appear in both sentences.
    """%(int(top_paper_slider), info['author_url'], info['name'], int(top_pair_slider))
    if len(info.keys()) != 0:
        tmp = setup_outputs(info, top_papers_show, top_num_info_show)
        x = []
        for t in tmp:
            x += t
        return x + [gr.update(value=result1_desc_value)]
    else:
        return 
    
with gr.Blocks(css='style.css') as demo:
    info = gr.State({})  # cached search results as a State variable shared throughout
    
    # Text description about the app and disclaimer
    ### TEXT Description

    # General instruction
    general_instruction = """
# R2P2: An Assistance Tool for Reviewer-Paper Matching in Peer Review 

#### Who is it for?
It is for meta-reviewers, area chairs, program chairs, or anyone who oversees the submission-reviewer matching process in peer review for academic conferences, journals, and grants. 

<center><img src="file/tool-img.jpeg" width="70%" alt="general workflow"></center>

#### How does it help?
A typical meta-reviewer workflow lacks supportive information on **what makes the pre-selected candidate reviewers a good fit** for the submission. Only affinity scores between the reviewer and the paper are shown, without additional details on what makes them similar/different. 

R2P2 provides more information about each reviewer. Given a paper and a reviewer, it searches for the **most relevant papers** among the reviewer's previous publications and **highlights relevant parts** within them. 
    """
    # More details (video, addendum)
    more_details_instruction = """Check out <a href="https://drive.google.com/file/d/1Ex_-cOplBitO7riNGliecFc8H3chXUN-/view?usp=share_link", target="_blank">this video</a> for an overview of what R2P2 is and <a href="https://drive.google.com/file/d/1C_fadFNdJkbrgeaoeoqeJdVCvg5iJi5V/view?usp=share_link", target="_blank">this video</a> for how to use it. You can find more details <a href="file/details.html", target="_blank">here</a>, along with our privacy policy and disclaimer."""

    gr.Markdown(general_instruction)
    gr.HTML(more_details_instruction)
    gr.Markdown("""---""")
    
    # Add main example
    example_title ="The Toronto Paper Matching System: An automated paper-reviewer assignment system"
    example_submission = """One of the most important tasks of conference organizers is the assignment of papers to reviewers. Reviewers' assessments of papers is a crucial step in determining the conference program, and in a certain sense to shape the direction of a field. However this is not a simple task: large conferences typically have to assign hundreds of papers to hundreds of reviewers, and time constraints make the task impossible for one person to accomplish. Furthermore other constraints, such as reviewer load have to be taken into account, preventing the process from being completely distributed. We built the first version of a system to suggest reviewer assignments for the NIPS 2010 conference, followed, in 2012, by a release that better integrated our system with Microsoft's popular Conference Management Toolkit (CMT). Since then our system has been widely adopted by the leading conferences in both the machine learning and computer vision communities. This paper provides an overview of the system, a summary of learning models and methods of evaluation that we have been using, as well as some of the recent progress and open issues."""
    example_reviewer = "https://www.semanticscholar.org/author/Nihar-B.-Shah/1737249"
    
    ## Add other examples for the task

    # match 1
    # example1_title = "VoroCNN: Deep convolutional neural network built on 3D Voronoi tessellation of protein structures"
    # example1_submission = """Effective use of evolutionary information has recently led to tremendous progress in computational prediction of three-dimensional (3D) structures of proteins and their complexes. Despite the progress, the accuracy of predicted structures tends to vary considerably from case to case. Since the utility of computational models depends on their accuracy, reliable estimates of deviation between predicted and native structures are of utmost importance. Results For the first time we present a deep convolutional neural network (CNN) constructed on a Voronoi tessellation of 3D molecular structures. Despite the irregular data domain, our data representation allows to efficiently introduce both convolution and pooling operations of the network. We trained our model, called VoroCNN, to predict local qualities of 3D protein folds. The prediction results are competitive to the state of the art and superior to the previous 3D CNN architectures built for the same task. We also discuss practical applications of VoroCNN, for example, in the recognition of protein binding interfaces. Availability The model, data, and evaluation tests are available at https://team.inria.fr/nano-d/software/vorocnn/. Contact [email protected], [email protected]"""
    # example1_reviewer = "https://www.semanticscholar.org/author/2025052385"

    # # match 2
    # example2_title = "Model-based Policy Optimization with Unsupervised Model Adaptation"
    # example2_submission = """Model-based reinforcement learning methods learn a dynamics model with real data sampled from the environment and leverage it to generate simulated data to derive an agent. However, due to the potential distribution mismatch between simulated data and real data, this could lead to degraded performance. Despite much effort being devoted to reducing this distribution mismatch, existing methods fail to solve it explicitly. In this paper, we investigate how to bridge the gap between real and simulated data due to inaccurate model estimation for better policy optimization. To begin with, we first derive a lower bound of the expected return, which naturally inspires a bound maximization algorithm by aligning the simulated and real data distributions. To this end, we propose a novel model-based reinforcement learning framework AMPO, which introduces unsupervised model adaptation to minimize the integral probability metric (IPM) between feature distributions from real and simulated data. Instantiating our framework with Wasserstein-1 distance gives a practical model-based approach. Empirically, our approach achieves state-of-the-art performance in terms of sample efficiency on a range of continuous control benchmark tasks."""
    # example2_reviewer = "https://www.semanticscholar.org/author/144974941"

    # # match 3
    # example3_title = "Sharp asymptotic and finite-sample rates of convergence of empirical measures in Wasserstein distance"
    # example3_submission = """The Wasserstein distance between two probability measures on a metric space is a measure of closeness with applications in statistics, probability, and machine learning. In this work, we consider the fundamental question of how quickly the empirical measure obtained from $n$ independent samples from $\mu$ approaches $\mu$ in the Wasserstein distance of any order. We prove sharp asymptotic and finite-sample results for this rate of convergence for general measures on general compact metric spaces. Our finite-sample results show the existence of multi-scale behavior, where measures can exhibit radically different rates of convergence as $n$ grows."""
    # example3_reviewer = "https://www.semanticscholar.org/author/27911143"
    
    # # match 4
    # example4_title = "Deep Neural Networks for Estimation and Inference: Application to Causal Effects and Other Semiparametric Estimands"
    # example4_submission = """We study deep neural networks and their use in semiparametric inference. We prove valid inference after first-step estimation with deep learning, a result new to the literature. We provide new rates of convergence for deep feedforward neural nets and, because our rates are sufficiently fast (in some cases minimax optimal), obtain valid semiparametric inference. Our estimation rates and semiparametric inference results handle the current standard architecture: fully connected feedforward neural networks (multi-layer perceptrons), with the now-common rectified linear unit activation function and a depth explicitly diverging with the sample size. We discuss other architectures as well, including fixed-width, very deep networks. We establish nonasymptotic bounds for these deep nets for nonparametric regression, covering the standard least squares and logistic losses in particular. We then apply our theory to develop semiparametric inference, focusing on treatment effects, expected welfare, and decomposition effects for concreteness. Inference in many other semiparametric contexts can be readily obtained. We demonstrate the effectiveness of deep learning with a Monte Carlo analysis and an empirical application to direct mail marketing."""
    # example4_reviewer = "https://www.semanticscholar.org/author/3364789"
    
    ### INPUT
    with gr.Row() as input_row:
        with gr.Column(scale=3):
            with gr.Row():
                title_input = gr.Textbox(label='Submission Title', info='Paste in the title of the submission.')
            with gr.Row():
                abstract_text_input = gr.Textbox(label='Submission Abstract', info='Paste in the abstract of the submission.')
        with gr.Column(scale=2):
            with gr.Row():
                author_id_input = gr.Textbox(label='Reviewer Profile Link (Semantic Scholar)', info="Paste in the reviewer's Semantic Scholar link")
            with gr.Row():
                name = gr.Textbox(label='Confirm Reviewer Name', info='This will be automatically updated based on the reviewer profile link above', interactive=False)
                author_id_input.change(fn=update_name, inputs=author_id_input, outputs=name)
                
    gr.Examples(
        examples=[
            [example_title, example_submission, example_reviewer],
            # [example1_title, example1_submission, example1_reviewer],
            # [example2_title, example2_submission, example2_reviewer],
            # [example3_title, example3_submission, example3_reviewer],
            # [example4_title, example4_submission, example4_reviewer],
        ],
        inputs=[title_input, abstract_text_input, author_id_input],
        cache_examples=False,
        label="Try out the following example input. Click on a row to fill in the input fields accordingly."
    )
    
    with gr.Row():
        compute_btn = gr.Button('What Makes This a Good Match?') 

    with gr.Row():
        search_status = gr.Textbox(label='Search Status', interactive=False, visible=False)
    
    ### OVERVIEW RESULTS
    # Paper title, score, and top-ranking sentence pairs 
    # a knob for controlling the number of output displayed
    with gr.Row():
        with gr.Column(scale=3):
            result1_desc = gr.Markdown(value='', visible=False)
        with gr.Column(scale=2):
        #    with gr.Row():
            top_paper_slider = gr.Slider(label='Number of papers to show', value=3, minimum=3, step=1, maximum=NUM_PAPERS_SHOW, visible=False)
        with gr.Column(scale=2):
            #with gr.Row():
            top_pair_slider = gr.Slider(label='Number of sentence pairs to show', value=2, minimum=2, step=1, maximum=NUM_PAIRS_SHOW, visible=False)

    paper_title_up = []
    paper_affinity_up = []
    citation_count = []
    sent_pair_score = []
    sent_text_query = []
    sent_text_candidate = []
    sent_hl_query = []
    sent_hl_candidate = []
    demarc_lines = []
    
    row_elems1 = []
    row_elems2 = []
    
    for i in range(NUM_PAPERS_SHOW):
        with gr.Row():
            with gr.Column(scale=3):
                tt = gr.Markdown(value='', visible=False)
                paper_title_up.append(tt)
            with gr.Column(scale=1):
                cc = gr.Markdown(value='', visible=False)
                citation_count.append(cc)
            with gr.Column(scale=1):
                aff = gr.Markdown(value='', visible=False)
                paper_affinity_up.append(aff)
        for j in range(NUM_PAIRS_SHOW):
            with gr.Row():
                with gr.Column(scale=1):
                    sps = gr.Markdown(value='', visible=False)
                    sent_pair_score.append(sps)
                with gr.Column(scale=5):
                    #stq = gr.Textbox(label='Sentence from Submission', visible=False)
                    stq = gr.Textbox(label='', visible=False)
                    shq = gr.components.Interpretation(stq, visible=False)
                    sent_text_query.append(stq)
                    sent_hl_query.append(shq)
                with gr.Column(scale=5):
                    #stc = gr.Textbox(label="Sentence from Reviewer's Paper", visible=False)
                    stc = gr.Textbox(label="", visible=False)
                    shc = gr.components.Interpretation(stc, visible=False)
                    sent_text_candidate.append(stc)
                    sent_hl_candidate.append(shc)
        with gr.Row():
            dml = gr.Markdown("""---""", visible=False)
            demarc_lines.append(dml)
      
    ## Show more button
    with gr.Row():
        see_more_rel_btn = gr.Button('Not Enough Information? Explore More', visible=False)
    
    ### PAPER INFORMATION
    
    # Description for Explore More Section
    with gr.Row():
        result2_desc = gr.Markdown(value='', visible=False)
        
    # Highlight description 
    hl_desc = """
    <font size="2">**<span style="color:black;background-color:#DB7262;">Red</span>**: sentences simiar to the selected sentence from submission. Darker = more similar.</font>
    
    <font size="2">**<span style="color:black;background-color:#65B5E3;">Blue</span>**: phrases that appear in both sentences.</font>
    """ 
    #---"""
    # show multiple papers in radio check box to select from
    paper_abstract = gr.Textbox(label='', interactive=False, visible=False)
    with gr.Row():
        with gr.Column(scale=1):
            selected_papers_radio = gr.Radio(
                choices=[], # will be udpated with the button click
                visible=False, # also will be updated with the button click
                label='Top Relevant Papers from the Reviewer'
            )
        with gr.Column(scale=2):
            # sentences from submission
            source_sentences = gr.Radio(
                choices=[], 
                visible=False, 
                label='Sentences from Submission Abstract',
            )
        with gr.Column(scale=3):
            # selected paper and highlight
            with gr.Row():
                # slider for highlight amount
                highlight_slider = gr.Slider(
                    label='Number of Highlighted Sentences', 
                    minimum=1, 
                    maximum=15, 
                    step=1, 
                    value=2, 
                    visible=False
                )
            with gr.Row():
                # highlight legend
                highlight_legend = gr.Markdown(value=hl_desc, visible=False)
            with gr.Row(visible=False) as title_row:
                # selected paper title
                paper_title = gr.Markdown(value='')
            with gr.Row(visible=False) as aff_row:
                # selected paper's affinity score
                affinity = gr.Markdown(value='')
            with gr.Row(visible=False) as cite_row:
                # selected paper's citation count 
                citation = gr.Markdown(value='')
            with gr.Row(visible=False) as hl_row: 
                # highlighted text from paper
                highlight = gr.components.Interpretation(paper_abstract) 
                
    ### EVENT LISTENERS
    
    # components to work with
    init_components = [
        see_more_rel_btn, # explore more button
        result1_desc, # description for first results
        search_status, # search status
        top_paper_slider,
        top_pair_slider
    ]
    
    init_result_components = \
        paper_title_up + paper_affinity_up + citation_count + demarc_lines + sent_pair_score + \
            sent_text_query + sent_text_candidate + sent_hl_query + sent_hl_candidate 
    
    explore_more_components = [
        selected_papers_radio, # list of papers for show more section
        source_sentences, # list of sentences for show more section
        paper_title, # paper title for show more section
        affinity, # affinity for show more section
        result2_desc, # description for explore more
        highlight_slider, # highlight slider
    ]
    
    compute_btn.click(
        fn=show_status,
        inputs=[],
        outputs=search_status
    )
    
    compute_btn.click(
        fn=get_similar_paper,
        inputs=[
            title_input,
            abstract_text_input, 
            author_id_input,
            top_paper_slider,
            top_pair_slider,
            info
        ],
        outputs=init_components + init_result_components + explore_more_components + [info],
        show_progress=True,
        scroll_to_output=True
    )      
    
    # Get more info (move to more interactive portion)
    see_more_rel_btn.click(
        fn=show_more,
        inputs=info,
        outputs=[
            result2_desc,
            selected_papers_radio,
            source_sentences,
            title_row,
            aff_row,
            cite_row,
            highlight_legend,
            highlight_slider,
            hl_row,
        ]
    )
    
    # change highlight based on selected sentences from submission
    source_sentences.change(
        fn=change_sentence,
        inputs=[
            selected_papers_radio,
            source_sentences,
            highlight_slider,
            info
        ],
        outputs=highlight
    )
    
    # change paper to show based on selected papers
    selected_papers_radio.change(
        fn=change_paper,
        inputs=[
            selected_papers_radio,
            source_sentences,
            highlight_slider,
            info,
        ],
        outputs= [
            paper_title,
            paper_abstract,
            affinity,
            citation,
            highlight, 
            highlight_slider
        ]
    )
    
    # change number of higlights to show
    highlight_slider.change(
        fn=change_num_highlight,
        inputs=[
            selected_papers_radio,
            source_sentences,
            highlight_slider,
            info
        ],
        outputs=[
            highlight
        ]
    )
    
    # change number of top papers to show initially
    top_paper_slider.change(
        fn=change_top_output,
        inputs=[
            top_paper_slider,
            top_pair_slider,
            info   
        ],
        outputs=init_result_components+[result1_desc]
    ) 
    
    # change number of top sentence pairs to show initially
    top_pair_slider.change(
        fn=change_top_output,
        inputs=[
            top_paper_slider,
            top_pair_slider,
            info
        ],
        outputs=init_result_components+[result1_desc]
    )
    
if __name__ == "__main__":
    demo.queue().launch() # add ?__theme=light to force light mode
    #demo.queue().launch(share=True)  # add ?__theme=light to force light mode