gopiashokan commited on
Commit
1bca89e
1 Parent(s): d393c3a

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +637 -624
app.py CHANGED
@@ -1,624 +1,637 @@
1
- import time
2
- import numpy as np
3
- import pandas as pd
4
- import streamlit as st
5
- from streamlit_option_menu import option_menu
6
- from streamlit_extras.add_vertical_space import add_vertical_space
7
- from PyPDF2 import PdfReader
8
- from langchain.text_splitter import RecursiveCharacterTextSplitter
9
- from langchain.embeddings.openai import OpenAIEmbeddings
10
- from langchain.vectorstores import FAISS
11
- from langchain.chat_models import ChatOpenAI
12
- from langchain.chains.question_answering import load_qa_chain
13
- from selenium import webdriver
14
- from selenium.webdriver.common.by import By
15
- from selenium.webdriver.common.keys import Keys
16
- from selenium.common.exceptions import NoSuchElementException
17
- import warnings
18
- warnings.filterwarnings('ignore')
19
-
20
-
21
- def streamlit_config():
22
-
23
- # page configuration
24
- st.set_page_config(page_title='Resume Analyzer AI', layout="wide")
25
-
26
- # page header transparent color
27
- page_background_color = """
28
- <style>
29
-
30
- [data-testid="stHeader"]
31
- {
32
- background: rgba(0,0,0,0);
33
- }
34
-
35
- </style>
36
- """
37
- st.markdown(page_background_color, unsafe_allow_html=True)
38
-
39
- # title and position
40
- st.markdown(f'<h1 style="text-align: center;">AI-Powered Resume Analyzer and <br> LinkedIn Scraper with Selenium</h1>',
41
- unsafe_allow_html=True)
42
-
43
-
44
- class resume_analyzer:
45
-
46
- def pdf_to_chunks(pdf):
47
- # read pdf and it returns memory address
48
- pdf_reader = PdfReader(pdf)
49
-
50
- # extrat text from each page separately
51
- text = ""
52
- for page in pdf_reader.pages:
53
- text += page.extract_text()
54
-
55
- # Split the long text into small chunks.
56
- text_splitter = RecursiveCharacterTextSplitter(
57
- chunk_size=700,
58
- chunk_overlap=200,
59
- length_function=len)
60
-
61
- chunks = text_splitter.split_text(text=text)
62
- return chunks
63
-
64
-
65
- def openai(openai_api_key, chunks, analyze):
66
-
67
- # Using OpenAI service for embedding
68
- embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
69
-
70
- # Facebook AI Similarity Serach library help us to convert text data to numerical vector
71
- vectorstores = FAISS.from_texts(chunks, embedding=embeddings)
72
-
73
- # compares the query and chunks, enabling the selection of the top 'K' most similar chunks based on their similarity scores.
74
- docs = vectorstores.similarity_search(query=analyze, k=3)
75
-
76
- # creates an OpenAI object, using the ChatGPT 3.5 Turbo model
77
- llm = ChatOpenAI(model='gpt-3.5-turbo', api_key=openai_api_key)
78
-
79
- # question-answering (QA) pipeline, making use of the load_qa_chain function
80
- chain = load_qa_chain(llm=llm, chain_type='stuff')
81
-
82
- response = chain.run(input_documents=docs, question=analyze)
83
- return response
84
-
85
-
86
- def summary_prompt(query_with_chunks):
87
-
88
- query = f''' need to detailed summarization of below resume and finally conclude them
89
-
90
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
91
- {query_with_chunks}
92
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
93
- '''
94
- return query
95
-
96
-
97
- def resume_summary():
98
-
99
- with st.form(key='Summary'):
100
-
101
- # User Upload the Resume
102
- add_vertical_space(1)
103
- pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
104
- add_vertical_space(1)
105
-
106
- # Enter OpenAI API Key
107
- col1,col2 = st.columns([0.6,0.4])
108
- with col1:
109
- openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
110
- add_vertical_space(2)
111
-
112
- # Click on Submit Button
113
- submit = st.form_submit_button(label='Submit')
114
- add_vertical_space(1)
115
-
116
- add_vertical_space(3)
117
- if submit:
118
- if pdf is not None and openai_api_key != '':
119
- try:
120
- with st.spinner('Processing...'):
121
-
122
- pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
123
-
124
- summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
125
-
126
- summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
127
-
128
- st.markdown(f'<h4 style="color: orange;">Summary:</h4>', unsafe_allow_html=True)
129
- st.write(summary)
130
-
131
- except Exception as e:
132
- st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
133
-
134
- elif pdf is None:
135
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
136
-
137
- elif openai_api_key == '':
138
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
139
-
140
-
141
- def strength_prompt(query_with_chunks):
142
- query = f'''need to detailed analysis and explain of the strength of below resume and finally conclude them
143
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
144
- {query_with_chunks}
145
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
146
- '''
147
- return query
148
-
149
-
150
- def resume_strength():
151
-
152
- with st.form(key='Strength'):
153
-
154
- # User Upload the Resume
155
- add_vertical_space(1)
156
- pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
157
- add_vertical_space(1)
158
-
159
- # Enter OpenAI API Key
160
- col1,col2 = st.columns([0.6,0.4])
161
- with col1:
162
- openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
163
- add_vertical_space(2)
164
-
165
- # Click on Submit Button
166
- submit = st.form_submit_button(label='Submit')
167
- add_vertical_space(1)
168
-
169
- add_vertical_space(3)
170
- if submit:
171
- if pdf is not None and openai_api_key != '':
172
- try:
173
- with st.spinner('Processing...'):
174
-
175
- pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
176
-
177
- summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
178
-
179
- summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
180
-
181
- strength_prompt = resume_analyzer.strength_prompt(query_with_chunks=summary)
182
-
183
- strength = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=strength_prompt)
184
-
185
- st.markdown(f'<h4 style="color: orange;">Strength:</h4>', unsafe_allow_html=True)
186
- st.write(strength)
187
-
188
- except Exception as e:
189
- st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
190
-
191
- elif pdf is None:
192
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
193
-
194
- elif openai_api_key == '':
195
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
196
-
197
-
198
- def weakness_prompt(query_with_chunks):
199
- query = f'''need to detailed analysis and explain of the weakness of below resume and how to improve make a better resume.
200
-
201
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
202
- {query_with_chunks}
203
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
204
- '''
205
- return query
206
-
207
-
208
- def resume_weakness():
209
-
210
- with st.form(key='Weakness'):
211
-
212
- # User Upload the Resume
213
- add_vertical_space(1)
214
- pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
215
- add_vertical_space(1)
216
-
217
- # Enter OpenAI API Key
218
- col1,col2 = st.columns([0.6,0.4])
219
- with col1:
220
- openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
221
- add_vertical_space(2)
222
-
223
- # Click on Submit Button
224
- submit = st.form_submit_button(label='Submit')
225
- add_vertical_space(1)
226
-
227
- add_vertical_space(3)
228
- if submit:
229
- if pdf is not None and openai_api_key != '':
230
- try:
231
- with st.spinner('Processing...'):
232
-
233
- pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
234
-
235
- summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
236
-
237
- summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
238
-
239
- weakness_prompt = resume_analyzer.weakness_prompt(query_with_chunks=summary)
240
-
241
- weakness = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=weakness_prompt)
242
-
243
- st.markdown(f'<h4 style="color: orange;">Weakness and Suggestions:</h4>', unsafe_allow_html=True)
244
- st.write(weakness)
245
-
246
- except Exception as e:
247
- st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
248
-
249
- elif pdf is None:
250
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
251
-
252
- elif openai_api_key == '':
253
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
254
-
255
-
256
- def job_title_prompt(query_with_chunks):
257
-
258
- query = f''' what are the job roles i apply to likedin based on below?
259
-
260
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
261
- {query_with_chunks}
262
- """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
263
- '''
264
- return query
265
-
266
-
267
- def job_title_suggestion():
268
-
269
- with st.form(key='Job Titles'):
270
-
271
- # User Upload the Resume
272
- add_vertical_space(1)
273
- pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
274
- add_vertical_space(1)
275
-
276
- # Enter OpenAI API Key
277
- col1,col2 = st.columns([0.6,0.4])
278
- with col1:
279
- openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
280
- add_vertical_space(2)
281
-
282
- # Click on Submit Button
283
- submit = st.form_submit_button(label='Submit')
284
- add_vertical_space(1)
285
-
286
- add_vertical_space(3)
287
- if submit:
288
- if pdf is not None and openai_api_key != '':
289
- try:
290
- with st.spinner('Processing...'):
291
-
292
- pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
293
-
294
- summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
295
-
296
- summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
297
-
298
- job_title_prompt = resume_analyzer.job_title_prompt(query_with_chunks=summary)
299
-
300
- job_title = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=job_title_prompt)
301
-
302
- st.markdown(f'<h4 style="color: orange;">Job Titles:</h4>', unsafe_allow_html=True)
303
- st.write(job_title)
304
-
305
- except Exception as e:
306
- st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
307
-
308
- elif pdf is None:
309
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
310
-
311
- elif openai_api_key == '':
312
- st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
313
-
314
-
315
-
316
- class linkedin_scraper:
317
-
318
- def webdriver_setup():
319
-
320
- options = webdriver.ChromeOptions()
321
- options.add_argument('--headless')
322
- options.add_argument('--no-sandbox')
323
- options.add_argument('--disable-dev-shm-usage')
324
-
325
- driver = webdriver.Chrome(options=options)
326
- driver.maximize_window()
327
- return driver
328
-
329
-
330
- def get_userinput():
331
-
332
- add_vertical_space(2)
333
- with st.form(key='linkedin_scarp'):
334
-
335
- add_vertical_space(1)
336
- col1,col2,col3 = st.columns([0.5,0.3,0.2], gap='medium')
337
- with col1:
338
- job_title_input = st.text_input(label='Job Title')
339
- job_title_input = job_title_input.split(',')
340
- with col2:
341
- job_location = st.text_input(label='Job Location', value='India')
342
- with col3:
343
- job_count = st.number_input(label='Job Count', min_value=1, value=1, step=1)
344
-
345
- # Submit Button
346
- add_vertical_space(1)
347
- submit = st.form_submit_button(label='Submit')
348
- add_vertical_space(1)
349
-
350
- return job_title_input, job_location, job_count, submit
351
-
352
-
353
- def build_url(job_title, job_location):
354
-
355
- b = []
356
- for i in job_title:
357
- x = i.split()
358
- y = '%20'.join(x)
359
- b.append(y)
360
-
361
- job_title = '%2C%20'.join(b)
362
- link = f"https://in.linkedin.com/jobs/search?keywords={job_title}&location={job_location}&locationId=&geoId=102713980&f_TPR=r604800&position=1&pageNum=0"
363
-
364
- return link
365
-
366
-
367
- def open_link(driver, link):
368
-
369
- while True:
370
- # Break the Loop if the Element is Found, Indicating the Page Loaded Correctly
371
- try:
372
- driver.get(link)
373
- driver.implicitly_wait(5)
374
- time.sleep(3)
375
- driver.find_element(by=By.CSS_SELECTOR, value='span.switcher-tabs__placeholder-text.m-auto')
376
- return
377
-
378
- # Retry Loading the Page
379
- except NoSuchElementException:
380
- continue
381
-
382
-
383
- def link_open_scrolldown(driver, link, job_count):
384
-
385
- # Open the Link in LinkedIn
386
- linkedin_scraper.open_link(driver, link)
387
-
388
- # Scroll Down the Page
389
- for i in range(0,job_count):
390
- # Simulate clicking the Page Up button
391
- body = driver.find_element(by=By.TAG_NAME, value='body')
392
- body.send_keys(Keys.PAGE_UP)
393
- # Scoll down the Page to End
394
- driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
395
- driver.implicitly_wait(2)
396
- # Click on See More Jobs Button if Present
397
- try:
398
- x = driver.find_element(by=By.CSS_SELECTOR, value="button[aria-label='See more jobs']").click()
399
- driver.implicitly_wait(5)
400
- except:
401
- pass
402
-
403
-
404
- def job_title_filter(scrap_job_title, user_job_title_input):
405
-
406
- # User Job Title Convert into Lower Case
407
- user_input = [i.lower().strip() for i in user_job_title_input]
408
-
409
- # scraped Job Title Convert into Lower Case
410
- scrap_title = [i.lower().strip() for i in [scrap_job_title]]
411
-
412
- # Verify Any User Job Title in the scraped Job Title
413
- confirmation_count = 0
414
- for i in user_input:
415
- if all(j in scrap_title[0] for j in i.split()):
416
- confirmation_count += 1
417
-
418
- # Return Job Title if confirmation_count greater than 0 else return NaN
419
- if confirmation_count > 0:
420
- return scrap_job_title
421
- else:
422
- return np.nan
423
-
424
-
425
- def scrap_company_data(driver, job_title_input, job_location):
426
-
427
- # scraping the Company Data
428
- company = driver.find_elements(by=By.CSS_SELECTOR, value='h4[class="base-search-card__subtitle"]')
429
- company_name = [i.text for i in company]
430
-
431
- location = driver.find_elements(by=By.CSS_SELECTOR, value='span[class="job-search-card__location"]')
432
- company_location = [i.text for i in location]
433
-
434
- title = driver.find_elements(by=By.CSS_SELECTOR, value='h3[class="base-search-card__title"]')
435
- job_title = [i.text for i in title]
436
-
437
- url = driver.find_elements(by=By.XPATH, value='//a[contains(@href, "/jobs/")]')
438
- website_url = [i.get_attribute('href') for i in url]
439
-
440
- # combine the all data to single dataframe
441
- df = pd.DataFrame(company_name, columns=['Company Name'])
442
- df['Job Title'] = pd.DataFrame(job_title)
443
- df['Location'] = pd.DataFrame(company_location)
444
- df['Website URL'] = pd.DataFrame(website_url)
445
-
446
- # Return Job Title if there are more than 1 matched word else return NaN
447
- df['Job Title'] = df['Job Title'].apply(lambda x: linkedin_scraper.job_title_filter(x, job_title_input))
448
-
449
- # Return Location if User Job Location in Scraped Location else return NaN
450
- df['Location'] = df['Location'].apply(lambda x: x if job_location.lower() in x.lower() else np.nan)
451
-
452
- # Drop Null Values and Reset Index
453
- df = df.dropna()
454
- df.reset_index(drop=True, inplace=True)
455
-
456
- return df
457
-
458
-
459
- def scrap_job_description(driver, df, job_count):
460
-
461
- # Get URL into List
462
- website_url = df['Website URL'].tolist()
463
-
464
- # Scrap the Job Description
465
- job_description, description_count = [], 0
466
- for i in range(0, len(website_url)):
467
- try:
468
- # Open the Link in LinkedIn
469
- linkedin_scraper.open_link(driver, website_url[i])
470
-
471
- # Click on Show More Button
472
- driver.find_element(by=By.CSS_SELECTOR, value='button[data-tracking-control-name="public_jobs_show-more-html-btn"]').click()
473
- driver.implicitly_wait(5)
474
- time.sleep(1)
475
-
476
- # Get Job Description
477
- description = driver.find_elements(by=By.CSS_SELECTOR, value='div[class="show-more-less-html__markup relative overflow-hidden"]')
478
- data = [i.text for i in description][0]
479
-
480
- if len(data.strip()) > 0:
481
- job_description.append(data)
482
- description_count += 1
483
- else:
484
- job_description.append('Description Not Available')
485
-
486
- # If URL cannot Loading Properly
487
- except:
488
- job_description.append('Description Not Available')
489
-
490
- # Check Description Count Meets User Job Count
491
- if description_count == job_count:
492
- break
493
-
494
- # Filter the Job Description
495
- df = df.iloc[:len(job_description), :]
496
-
497
- # Add Job Description in Dataframe
498
- df['Job Description'] = pd.DataFrame(job_description, columns=['Description'])
499
- df['Job Description'] = df['Job Description'].apply(lambda x: np.nan if x=='Description Not Available' else x)
500
- df = df.dropna()
501
- df.reset_index(drop=True, inplace=True)
502
- return df
503
-
504
-
505
- def display_data_userinterface(df_final):
506
-
507
- # Display the Data in User Interface
508
- add_vertical_space(1)
509
- if len(df_final) > 0:
510
- for i in range(0, len(df_final)):
511
-
512
- st.markdown(f'<h3 style="color: orange;">Job Posting Details : {i+1}</h3>', unsafe_allow_html=True)
513
- st.write(f"Company Name : {df_final.iloc[i,0]}")
514
- st.write(f"Job Title : {df_final.iloc[i,1]}")
515
- st.write(f"Location : {df_final.iloc[i,2]}")
516
- st.write(f"Website URL : {df_final.iloc[i,3]}")
517
-
518
- with st.expander(label='Job Desription'):
519
- st.write(df_final.iloc[i, 4])
520
- add_vertical_space(3)
521
-
522
- else:
523
- st.markdown(f'<h5 style="text-align: center;color: orange;">No Matching Jobs Found</h5>',
524
- unsafe_allow_html=True)
525
-
526
-
527
- def main():
528
-
529
- # Initially set driver to None
530
- driver = None
531
-
532
- try:
533
- job_title_input, job_location, job_count, submit = linkedin_scraper.get_userinput()
534
- add_vertical_space(2)
535
-
536
- if submit:
537
- if job_title_input != [] and job_location != '':
538
-
539
- with st.spinner('Chrome Webdriver Setup Initializing...'):
540
- driver = linkedin_scraper.webdriver_setup()
541
-
542
- with st.spinner('Loading More Job Listings...'):
543
-
544
- # build URL based on User Job Title Input
545
- link = linkedin_scraper.build_url(job_title_input, job_location)
546
-
547
- # Open the Link in LinkedIn and Scroll Down the Page
548
- linkedin_scraper.link_open_scrolldown(driver, link, job_count)
549
-
550
- with st.spinner('scraping Job Details...'):
551
-
552
- # Scraping the Company Name, Location, Job Title and URL Data
553
- df = linkedin_scraper.scrap_company_data(driver, job_title_input, job_location)
554
-
555
- # Scraping the Job Descriptin Data
556
- df_final = linkedin_scraper. scrap_job_description(driver, df, job_count)
557
-
558
- # Display the Data in User Interface
559
- linkedin_scraper.display_data_userinterface(df_final)
560
-
561
-
562
- # If User Click Submit Button and Job Title is Empty
563
- elif job_title_input == []:
564
- st.markdown(f'<h5 style="text-align: center;color: orange;">Job Title is Empty</h5>',
565
- unsafe_allow_html=True)
566
-
567
- elif job_location == '':
568
- st.markdown(f'<h5 style="text-align: center;color: orange;">Job Location is Empty</h5>',
569
- unsafe_allow_html=True)
570
-
571
- except Exception as e:
572
- add_vertical_space(2)
573
- st.markdown(f'<h5 style="text-align: center;color: orange;">Check Connection! Refresh the Page and Try Again</h5>', unsafe_allow_html=True)
574
-
575
- finally:
576
- if driver:
577
- driver.quit()
578
-
579
-
580
-
581
- # Streamlit Configuration Setup
582
- streamlit_config()
583
- add_vertical_space(5)
584
-
585
-
586
-
587
- with st.sidebar:
588
-
589
- add_vertical_space(4)
590
-
591
- option = option_menu(menu_title='', options=['Summary', 'Strength', 'Weakness', 'Job Titles', 'Linkedin Jobs'],
592
- icons=['house-fill', 'database-fill', 'pass-fill', 'list-ul', 'linkedin'])
593
-
594
-
595
-
596
- if option == 'Summary':
597
-
598
- resume_analyzer.resume_summary()
599
-
600
-
601
-
602
- elif option == 'Strength':
603
-
604
- resume_analyzer.resume_strength()
605
-
606
-
607
-
608
- elif option == 'Weakness':
609
-
610
- resume_analyzer.resume_weakness()
611
-
612
-
613
-
614
- elif option == 'Job Titles':
615
-
616
- resume_analyzer.job_title_suggestion()
617
-
618
-
619
-
620
- elif option == 'Linkedin Jobs':
621
-
622
- linkedin_scraper.main()
623
-
624
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import time
2
+ import numpy as np
3
+ import pandas as pd
4
+ import streamlit as st
5
+ from streamlit_option_menu import option_menu
6
+ from streamlit_extras.add_vertical_space import add_vertical_space
7
+ from PyPDF2 import PdfReader
8
+ from langchain.text_splitter import RecursiveCharacterTextSplitter
9
+ from langchain.embeddings.openai import OpenAIEmbeddings
10
+ from langchain.vectorstores import FAISS
11
+ from langchain.chat_models import ChatOpenAI
12
+ from langchain.chains.question_answering import load_qa_chain
13
+ from selenium import webdriver
14
+ from selenium.webdriver.common.by import By
15
+ from selenium.webdriver.common.keys import Keys
16
+ from selenium.common.exceptions import NoSuchElementException
17
+ import warnings
18
+ warnings.filterwarnings('ignore')
19
+
20
+
21
+ def streamlit_config():
22
+
23
+ # page configuration
24
+ st.set_page_config(page_title='Resume Analyzer AI', layout="wide")
25
+
26
+ # page header transparent color
27
+ page_background_color = """
28
+ <style>
29
+
30
+ [data-testid="stHeader"]
31
+ {
32
+ background: rgba(0,0,0,0);
33
+ }
34
+
35
+ </style>
36
+ """
37
+ st.markdown(page_background_color, unsafe_allow_html=True)
38
+
39
+ # title and position
40
+ st.markdown(f'<h1 style="text-align: center;">Resume Analyzer AI</h1>',
41
+ unsafe_allow_html=True)
42
+
43
+
44
+ class resume_analyzer:
45
+
46
+ def pdf_to_chunks(pdf):
47
+ # read pdf and it returns memory address
48
+ pdf_reader = PdfReader(pdf)
49
+
50
+ # extrat text from each page separately
51
+ text = ""
52
+ for page in pdf_reader.pages:
53
+ text += page.extract_text()
54
+
55
+ # Split the long text into small chunks.
56
+ text_splitter = RecursiveCharacterTextSplitter(
57
+ chunk_size=700,
58
+ chunk_overlap=200,
59
+ length_function=len)
60
+
61
+ chunks = text_splitter.split_text(text=text)
62
+ return chunks
63
+
64
+
65
+ def openai(openai_api_key, chunks, analyze):
66
+
67
+ # Using OpenAI service for embedding
68
+ embeddings = OpenAIEmbeddings(openai_api_key=openai_api_key)
69
+
70
+ # Facebook AI Similarity Serach library help us to convert text data to numerical vector
71
+ vectorstores = FAISS.from_texts(chunks, embedding=embeddings)
72
+
73
+ # compares the query and chunks, enabling the selection of the top 'K' most similar chunks based on their similarity scores.
74
+ docs = vectorstores.similarity_search(query=analyze, k=3)
75
+
76
+ # creates an OpenAI object, using the ChatGPT 3.5 Turbo model
77
+ llm = ChatOpenAI(model='gpt-3.5-turbo', api_key=openai_api_key)
78
+
79
+ # question-answering (QA) pipeline, making use of the load_qa_chain function
80
+ chain = load_qa_chain(llm=llm, chain_type='stuff')
81
+
82
+ response = chain.run(input_documents=docs, question=analyze)
83
+ return response
84
+
85
+
86
+ def summary_prompt(query_with_chunks):
87
+
88
+ query = f''' need to detailed summarization of below resume and finally conclude them
89
+
90
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
91
+ {query_with_chunks}
92
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
93
+ '''
94
+ return query
95
+
96
+
97
+ def resume_summary():
98
+
99
+ with st.form(key='Summary'):
100
+
101
+ # User Upload the Resume
102
+ add_vertical_space(1)
103
+ pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
104
+ add_vertical_space(1)
105
+
106
+ # Enter OpenAI API Key
107
+ col1,col2 = st.columns([0.6,0.4])
108
+ with col1:
109
+ openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
110
+ add_vertical_space(2)
111
+
112
+ # Click on Submit Button
113
+ submit = st.form_submit_button(label='Submit')
114
+ add_vertical_space(1)
115
+
116
+ add_vertical_space(3)
117
+ if submit:
118
+ if pdf is not None and openai_api_key != '':
119
+ try:
120
+ with st.spinner('Processing...'):
121
+
122
+ pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
123
+
124
+ summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
125
+
126
+ summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
127
+
128
+ st.markdown(f'<h4 style="color: orange;">Summary:</h4>', unsafe_allow_html=True)
129
+ st.write(summary)
130
+
131
+ except Exception as e:
132
+ st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
133
+
134
+ elif pdf is None:
135
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
136
+
137
+ elif openai_api_key == '':
138
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
139
+
140
+
141
+ def strength_prompt(query_with_chunks):
142
+ query = f'''need to detailed analysis and explain of the strength of below resume and finally conclude them
143
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
144
+ {query_with_chunks}
145
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
146
+ '''
147
+ return query
148
+
149
+
150
+ def resume_strength():
151
+
152
+ with st.form(key='Strength'):
153
+
154
+ # User Upload the Resume
155
+ add_vertical_space(1)
156
+ pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
157
+ add_vertical_space(1)
158
+
159
+ # Enter OpenAI API Key
160
+ col1,col2 = st.columns([0.6,0.4])
161
+ with col1:
162
+ openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
163
+ add_vertical_space(2)
164
+
165
+ # Click on Submit Button
166
+ submit = st.form_submit_button(label='Submit')
167
+ add_vertical_space(1)
168
+
169
+ add_vertical_space(3)
170
+ if submit:
171
+ if pdf is not None and openai_api_key != '':
172
+ try:
173
+ with st.spinner('Processing...'):
174
+
175
+ pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
176
+
177
+ summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
178
+
179
+ summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
180
+
181
+ strength_prompt = resume_analyzer.strength_prompt(query_with_chunks=summary)
182
+
183
+ strength = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=strength_prompt)
184
+
185
+ st.markdown(f'<h4 style="color: orange;">Strength:</h4>', unsafe_allow_html=True)
186
+ st.write(strength)
187
+
188
+ except Exception as e:
189
+ st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
190
+
191
+ elif pdf is None:
192
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
193
+
194
+ elif openai_api_key == '':
195
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
196
+
197
+
198
+ def weakness_prompt(query_with_chunks):
199
+ query = f'''need to detailed analysis and explain of the weakness of below resume and how to improve make a better resume.
200
+
201
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
202
+ {query_with_chunks}
203
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
204
+ '''
205
+ return query
206
+
207
+
208
+ def resume_weakness():
209
+
210
+ with st.form(key='Weakness'):
211
+
212
+ # User Upload the Resume
213
+ add_vertical_space(1)
214
+ pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
215
+ add_vertical_space(1)
216
+
217
+ # Enter OpenAI API Key
218
+ col1,col2 = st.columns([0.6,0.4])
219
+ with col1:
220
+ openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
221
+ add_vertical_space(2)
222
+
223
+ # Click on Submit Button
224
+ submit = st.form_submit_button(label='Submit')
225
+ add_vertical_space(1)
226
+
227
+ add_vertical_space(3)
228
+ if submit:
229
+ if pdf is not None and openai_api_key != '':
230
+ try:
231
+ with st.spinner('Processing...'):
232
+
233
+ pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
234
+
235
+ summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
236
+
237
+ summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
238
+
239
+ weakness_prompt = resume_analyzer.weakness_prompt(query_with_chunks=summary)
240
+
241
+ weakness = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=weakness_prompt)
242
+
243
+ st.markdown(f'<h4 style="color: orange;">Weakness and Suggestions:</h4>', unsafe_allow_html=True)
244
+ st.write(weakness)
245
+
246
+ except Exception as e:
247
+ st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
248
+
249
+ elif pdf is None:
250
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
251
+
252
+ elif openai_api_key == '':
253
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
254
+
255
+
256
+ def job_title_prompt(query_with_chunks):
257
+
258
+ query = f''' what are the job roles i apply to likedin based on below?
259
+
260
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
261
+ {query_with_chunks}
262
+ """""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""""
263
+ '''
264
+ return query
265
+
266
+
267
+ def job_title_suggestion():
268
+
269
+ with st.form(key='Job Titles'):
270
+
271
+ # User Upload the Resume
272
+ add_vertical_space(1)
273
+ pdf = st.file_uploader(label='Upload Your Resume', type='pdf')
274
+ add_vertical_space(1)
275
+
276
+ # Enter OpenAI API Key
277
+ col1,col2 = st.columns([0.6,0.4])
278
+ with col1:
279
+ openai_api_key = st.text_input(label='Enter OpenAI API Key', type='password')
280
+ add_vertical_space(2)
281
+
282
+ # Click on Submit Button
283
+ submit = st.form_submit_button(label='Submit')
284
+ add_vertical_space(1)
285
+
286
+ add_vertical_space(3)
287
+ if submit:
288
+ if pdf is not None and openai_api_key != '':
289
+ try:
290
+ with st.spinner('Processing...'):
291
+
292
+ pdf_chunks = resume_analyzer.pdf_to_chunks(pdf)
293
+
294
+ summary_prompt = resume_analyzer.summary_prompt(query_with_chunks=pdf_chunks)
295
+
296
+ summary = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=summary_prompt)
297
+
298
+ job_title_prompt = resume_analyzer.job_title_prompt(query_with_chunks=summary)
299
+
300
+ job_title = resume_analyzer.openai(openai_api_key=openai_api_key, chunks=pdf_chunks, analyze=job_title_prompt)
301
+
302
+ st.markdown(f'<h4 style="color: orange;">Job Titles:</h4>', unsafe_allow_html=True)
303
+ st.write(job_title)
304
+
305
+ except Exception as e:
306
+ st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
307
+
308
+ elif pdf is None:
309
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Upload Your Resume</h5>', unsafe_allow_html=True)
310
+
311
+ elif openai_api_key == '':
312
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Please Enter OpenAI API Key</h5>', unsafe_allow_html=True)
313
+
314
+
315
+
316
+ class linkedin_scraper:
317
+
318
+ def webdriver_setup():
319
+
320
+ options = webdriver.ChromeOptions()
321
+ options.add_argument('--headless')
322
+ options.add_argument('--no-sandbox')
323
+ options.add_argument('--disable-dev-shm-usage')
324
+
325
+ driver = webdriver.Chrome(options=options)
326
+ driver.maximize_window()
327
+ return driver
328
+
329
+
330
+ def get_userinput():
331
+
332
+ add_vertical_space(2)
333
+ with st.form(key='linkedin_scarp'):
334
+
335
+ add_vertical_space(1)
336
+ col1,col2,col3 = st.columns([0.5,0.3,0.2], gap='medium')
337
+ with col1:
338
+ job_title_input = st.text_input(label='Job Title')
339
+ job_title_input = job_title_input.split(',')
340
+ with col2:
341
+ job_location = st.text_input(label='Job Location', value='India')
342
+ with col3:
343
+ job_count = st.number_input(label='Job Count', min_value=1, value=1, step=1)
344
+
345
+ # Submit Button
346
+ add_vertical_space(1)
347
+ submit = st.form_submit_button(label='Submit')
348
+ add_vertical_space(1)
349
+
350
+ return job_title_input, job_location, job_count, submit
351
+
352
+
353
+ def build_url(job_title, job_location):
354
+
355
+ b = []
356
+ for i in job_title:
357
+ x = i.split()
358
+ y = '%20'.join(x)
359
+ b.append(y)
360
+
361
+ job_title = '%2C%20'.join(b)
362
+ link = f"https://in.linkedin.com/jobs/search?keywords={job_title}&location={job_location}&locationId=&geoId=102713980&f_TPR=r604800&position=1&pageNum=0"
363
+
364
+ return link
365
+
366
+
367
+ def open_link(driver, link):
368
+
369
+ while True:
370
+ # Break the Loop if the Element is Found, Indicating the Page Loaded Correctly
371
+ try:
372
+ driver.get(link)
373
+ driver.implicitly_wait(5)
374
+ time.sleep(3)
375
+ driver.find_element(by=By.CSS_SELECTOR, value='span.switcher-tabs__placeholder-text.m-auto')
376
+ return
377
+
378
+ # Retry Loading the Page
379
+ except NoSuchElementException:
380
+ continue
381
+
382
+
383
+ def link_open_scrolldown(driver, link, job_count):
384
+
385
+ # Open the Link in LinkedIn
386
+ linkedin_scraper.open_link(driver, link)
387
+
388
+ # Scroll Down the Page
389
+ for i in range(0,job_count):
390
+
391
+ # Simulate clicking the Page Up button
392
+ body = driver.find_element(by=By.TAG_NAME, value='body')
393
+ body.send_keys(Keys.PAGE_UP)
394
+
395
+ # Locate the sign-in modal dialog
396
+ try:
397
+ driver.find_element(by=By.CSS_SELECTOR,
398
+ value="button[data-tracking-control-name='public_jobs_contextual-sign-in-modal_modal_dismiss']>icon>svg").click()
399
+ except:
400
+ pass
401
+
402
+ # Scoll down the Page to End
403
+ driver.execute_script("window.scrollTo(0, document.body.scrollHeight);")
404
+ driver.implicitly_wait(2)
405
+
406
+ # Click on See More Jobs Button if Present
407
+ try:
408
+ x = driver.find_element(by=By.CSS_SELECTOR, value="button[aria-label='See more jobs']").click()
409
+ driver.implicitly_wait(5)
410
+ except:
411
+ pass
412
+
413
+
414
+ def job_title_filter(scrap_job_title, user_job_title_input):
415
+
416
+ # User Job Title Convert into Lower Case
417
+ user_input = [i.lower().strip() for i in user_job_title_input]
418
+
419
+ # scraped Job Title Convert into Lower Case
420
+ scrap_title = [i.lower().strip() for i in [scrap_job_title]]
421
+
422
+ # Verify Any User Job Title in the scraped Job Title
423
+ confirmation_count = 0
424
+ for i in user_input:
425
+ if all(j in scrap_title[0] for j in i.split()):
426
+ confirmation_count += 1
427
+
428
+ # Return Job Title if confirmation_count greater than 0 else return NaN
429
+ if confirmation_count > 0:
430
+ return scrap_job_title
431
+ else:
432
+ return np.nan
433
+
434
+
435
+ def scrap_company_data(driver, job_title_input, job_location):
436
+
437
+ # scraping the Company Data
438
+ company = driver.find_elements(by=By.CSS_SELECTOR, value='h4[class="base-search-card__subtitle"]')
439
+ company_name = [i.text for i in company]
440
+
441
+ location = driver.find_elements(by=By.CSS_SELECTOR, value='span[class="job-search-card__location"]')
442
+ company_location = [i.text for i in location]
443
+
444
+ title = driver.find_elements(by=By.CSS_SELECTOR, value='h3[class="base-search-card__title"]')
445
+ job_title = [i.text for i in title]
446
+
447
+ url = driver.find_elements(by=By.XPATH, value='//a[contains(@href, "/jobs/")]')
448
+ website_url = [i.get_attribute('href') for i in url]
449
+
450
+ # combine the all data to single dataframe
451
+ df = pd.DataFrame(company_name, columns=['Company Name'])
452
+ df['Job Title'] = pd.DataFrame(job_title)
453
+ df['Location'] = pd.DataFrame(company_location)
454
+ df['Website URL'] = pd.DataFrame(website_url)
455
+
456
+ # Return Job Title if there are more than 1 matched word else return NaN
457
+ df['Job Title'] = df['Job Title'].apply(lambda x: linkedin_scraper.job_title_filter(x, job_title_input))
458
+
459
+ # Return Location if User Job Location in Scraped Location else return NaN
460
+ df['Location'] = df['Location'].apply(lambda x: x if job_location.lower() in x.lower() else np.nan)
461
+
462
+ # Drop Null Values and Reset Index
463
+ df = df.dropna()
464
+ df.reset_index(drop=True, inplace=True)
465
+
466
+ return df
467
+
468
+
469
+ def scrap_job_description(driver, df, job_count):
470
+
471
+ # Get URL into List
472
+ website_url = df['Website URL'].tolist()
473
+
474
+ # Scrap the Job Description
475
+ job_description = []
476
+ description_count = 0
477
+
478
+ for i in range(0, len(website_url)):
479
+ try:
480
+ # Open the Link in LinkedIn
481
+ linkedin_scraper.open_link(driver, website_url[i])
482
+
483
+ # Click on Show More Button
484
+ driver.find_element(by=By.CSS_SELECTOR, value='button[data-tracking-control-name="public_jobs_show-more-html-btn"]').click()
485
+ driver.implicitly_wait(5)
486
+ time.sleep(1)
487
+
488
+ # Get Job Description
489
+ description = driver.find_elements(by=By.CSS_SELECTOR, value='div[class="show-more-less-html__markup relative overflow-hidden"]')
490
+ data = [i.text for i in description][0]
491
+
492
+ # Check Description length and Duplicate
493
+ if len(data.strip()) > 0 and data not in job_description:
494
+ job_description.append(data)
495
+ description_count += 1
496
+ else:
497
+ job_description.append('Description Not Available')
498
+
499
+ # If any unexpected issue
500
+ except:
501
+ job_description.append('Description Not Available')
502
+
503
+ # Check Description Count reach User Job Count
504
+ if description_count == job_count:
505
+ break
506
+
507
+ # Filter the Job Description
508
+ df = df.iloc[:len(job_description), :]
509
+
510
+ # Add Job Description in Dataframe
511
+ df['Job Description'] = pd.DataFrame(job_description, columns=['Description'])
512
+ df['Job Description'] = df['Job Description'].apply(lambda x: np.nan if x=='Description Not Available' else x)
513
+ df = df.dropna()
514
+ df.reset_index(drop=True, inplace=True)
515
+ return df
516
+
517
+
518
+ def display_data_userinterface(df_final):
519
+
520
+ # Display the Data in User Interface
521
+ add_vertical_space(1)
522
+ if len(df_final) > 0:
523
+ for i in range(0, len(df_final)):
524
+
525
+ st.markdown(f'<h3 style="color: orange;">Job Posting Details : {i+1}</h3>', unsafe_allow_html=True)
526
+ st.write(f"Company Name : {df_final.iloc[i,0]}")
527
+ st.write(f"Job Title : {df_final.iloc[i,1]}")
528
+ st.write(f"Location : {df_final.iloc[i,2]}")
529
+ st.write(f"Website URL : {df_final.iloc[i,3]}")
530
+
531
+ with st.expander(label='Job Desription'):
532
+ st.write(df_final.iloc[i, 4])
533
+ add_vertical_space(3)
534
+
535
+ else:
536
+ st.markdown(f'<h5 style="text-align: center;color: orange;">No Matching Jobs Found</h5>',
537
+ unsafe_allow_html=True)
538
+
539
+
540
+ def main():
541
+
542
+ # Initially set driver to None
543
+ driver = None
544
+
545
+ try:
546
+ job_title_input, job_location, job_count, submit = linkedin_scraper.get_userinput()
547
+ add_vertical_space(2)
548
+
549
+ if submit:
550
+ if job_title_input != [] and job_location != '':
551
+
552
+ with st.spinner('Chrome Webdriver Setup Initializing...'):
553
+ driver = linkedin_scraper.webdriver_setup()
554
+
555
+ with st.spinner('Loading More Job Listings...'):
556
+
557
+ # build URL based on User Job Title Input
558
+ link = linkedin_scraper.build_url(job_title_input, job_location)
559
+
560
+ # Open the Link in LinkedIn and Scroll Down the Page
561
+ linkedin_scraper.link_open_scrolldown(driver, link, job_count)
562
+
563
+ with st.spinner('scraping Job Details...'):
564
+
565
+ # Scraping the Company Name, Location, Job Title and URL Data
566
+ df = linkedin_scraper.scrap_company_data(driver, job_title_input, job_location)
567
+
568
+ # Scraping the Job Descriptin Data
569
+ df_final = linkedin_scraper. scrap_job_description(driver, df, job_count)
570
+
571
+ # Display the Data in User Interface
572
+ linkedin_scraper.display_data_userinterface(df_final)
573
+
574
+
575
+ # If User Click Submit Button and Job Title is Empty
576
+ elif job_title_input == []:
577
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Job Title is Empty</h5>',
578
+ unsafe_allow_html=True)
579
+
580
+ elif job_location == '':
581
+ st.markdown(f'<h5 style="text-align: center;color: orange;">Job Location is Empty</h5>',
582
+ unsafe_allow_html=True)
583
+
584
+ except Exception as e:
585
+ add_vertical_space(2)
586
+ st.markdown(f'<h5 style="text-align: center;color: orange;">{e}</h5>', unsafe_allow_html=True)
587
+
588
+ finally:
589
+ if driver:
590
+ driver.quit()
591
+
592
+
593
+
594
+ # Streamlit Configuration Setup
595
+ streamlit_config()
596
+ add_vertical_space(2)
597
+
598
+
599
+
600
+ with st.sidebar:
601
+
602
+ add_vertical_space(4)
603
+
604
+ option = option_menu(menu_title='', options=['Summary', 'Strength', 'Weakness', 'Job Titles', 'Linkedin Jobs'],
605
+ icons=['house-fill', 'database-fill', 'pass-fill', 'list-ul', 'linkedin'])
606
+
607
+
608
+
609
+ if option == 'Summary':
610
+
611
+ resume_analyzer.resume_summary()
612
+
613
+
614
+
615
+ elif option == 'Strength':
616
+
617
+ resume_analyzer.resume_strength()
618
+
619
+
620
+
621
+ elif option == 'Weakness':
622
+
623
+ resume_analyzer.resume_weakness()
624
+
625
+
626
+
627
+ elif option == 'Job Titles':
628
+
629
+ resume_analyzer.job_title_suggestion()
630
+
631
+
632
+
633
+ elif option == 'Linkedin Jobs':
634
+
635
+ linkedin_scraper.main()
636
+
637
+