Create app.py
Browse files
app.py
ADDED
@@ -0,0 +1,73 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import random
|
2 |
+
import uuid
|
3 |
+
import json
|
4 |
+
from transformers import AutoTokenizer, AutoModelForCausalLM
|
5 |
+
|
6 |
+
|
7 |
+
# Load the tokenizer and model without FlashAttention2
|
8 |
+
tokenizer = AutoTokenizer.from_pretrained("microsoft/Phi-3.5-vision-instruct")
|
9 |
+
|
10 |
+
# Disable FlashAttention2 by forcing default attention
|
11 |
+
config = {
|
12 |
+
'attn_implementation': 'default'
|
13 |
+
}
|
14 |
+
|
15 |
+
model = AutoModelForCausalLM.from_pretrained(
|
16 |
+
"microsoft/Phi-3.5-vision-instruct",
|
17 |
+
trust_remote_code=True,
|
18 |
+
**config
|
19 |
+
)
|
20 |
+
|
21 |
+
|
22 |
+
|
23 |
+
|
24 |
+
# Function to generate text using the model
|
25 |
+
def generate_text(prompt):
|
26 |
+
inputs = tokenizer(prompt, return_tensors="pt")
|
27 |
+
outputs = model.generate(inputs['input_ids'], max_length=150)
|
28 |
+
return tokenizer.decode(outputs[0], skip_special_tokens=True)
|
29 |
+
|
30 |
+
# Lists for random selection
|
31 |
+
interest_list = ["networking", "dealMaking"]
|
32 |
+
personal_interest_list = [
|
33 |
+
"Drawing", "Cooking", "Foodie", "Board Gaming", "Card games", "Checkers", "Chess",
|
34 |
+
"PC gaming", "Puzzle solving", "Video gaming", "Reading", "Science experiments",
|
35 |
+
"Magic and illusion", "Stand-up comedy", "Walking", "Beer brewing", "Gourmet food exploration",
|
36 |
+
"Mixology", "Wine tasting"
|
37 |
+
]
|
38 |
+
professional_interest_list = [
|
39 |
+
"Building a customer-centric culture", "Community Building", "Community Management",
|
40 |
+
"Delivering exceptional customer service", "Ensuring product/service quality",
|
41 |
+
"Handling customer complaints and feedback"
|
42 |
+
]
|
43 |
+
|
44 |
+
# Function to generate random profile with intelligent education/professional details
|
45 |
+
def generate_intelligent_profile():
|
46 |
+
# Randomly select interests and location
|
47 |
+
profile = {
|
48 |
+
"_id": str(uuid.uuid4()), # Generate random ID
|
49 |
+
"latitude": random.uniform(-90, 90), # Random latitude
|
50 |
+
"longitude": random.uniform(-180, 180), # Random longitude
|
51 |
+
"interest": random.sample(interest_list, random.randint(1, len(interest_list))), # Random interests
|
52 |
+
"personalInterest": random.sample(personal_interest_list, random.randint(3, 7)), # Random personal interests
|
53 |
+
"professionalInterest": random.sample(professional_interest_list, random.randint(3, 5)) # Random professional interests
|
54 |
+
}
|
55 |
+
|
56 |
+
# Generate intelligent educational details using the model
|
57 |
+
education_prompt = "Generate an educational background for a professional profile with relevant qualifications."
|
58 |
+
profile["educationalDetails"] = [generate_text(education_prompt)]
|
59 |
+
|
60 |
+
# Generate intelligent professional details using the model
|
61 |
+
professional_prompt = "Generate a professional career summary for a profile with achievements and roles."
|
62 |
+
profile["professionalDetails"] = [generate_text(professional_prompt)]
|
63 |
+
|
64 |
+
return profile
|
65 |
+
|
66 |
+
# Function to generate multiple profiles
|
67 |
+
def generate_profiles(num_profiles=5):
|
68 |
+
profiles = [generate_intelligent_profile() for _ in range(num_profiles)]
|
69 |
+
return {"data": profiles}
|
70 |
+
|
71 |
+
# Generate and print random intelligent profiles
|
72 |
+
generated_profiles = generate_profiles(3)
|
73 |
+
print(json.dumps(generated_profiles, indent=4))
|