import gradio as gr # from langchain.vectorstores import Chroma ''' https://huggingface.co./spaces/kevinhug/clientX https://hits.seeyoufarm.com/ ''' ''' PORTFOLIO OPTIMIZATION ''' from aiOpt import Asset import numpy as np def optimize(cost, prob, its): s = Asset(np.asfarray(cost.split()), np.asfarray(prob.split())) return s.random_restart(int(its)) ''' TIME SERIES ANALYTICS ''' import pandas as pd import plotly.express as px def trend(t): ''' import yfinance as yf from sklearn.preprocessing import StandardScaler data = yf.download(t, period="3mo") for c in t.split(' '): q=data.loc[:,('Close',c)] data.loc[:,('Close_MA',c)]=q.rolling(9).mean() -q.rolling(42).mean() q=data.loc[:,('Volume',c)] data.loc[:,('Volume_MA',c)]=q.rolling(9).mean() -q.rolling(42).mean() ma=data.loc[:,["Volume_MA","Close_MA"]].tail(15) std=StandardScaler() result=std.fit_transform(ma) df=pd.DataFrame(result,columns=ma.columns) d=df.tail(1).stack(level=-1).droplevel(0, axis=0) ''' d=pd.read_pickle("./ts/data.pkl") ''' https://www.gradio.app/docs/plot fig = px.line(df, x="day", y=countries) fig.update_layout( title="Outbreak in " + month, xaxis_title="Cases", yaxis_title="Days Since Day 0", ) return fig ''' fig=px.scatter(d, x="Close_MA", y="Volume_MA",color='ticker') fig.update_layout( title="Top Right is the Growth Industry", xaxis_title="Trend in Price", yaxis_title="Trend in Volume", ) return fig #return gr.ScatterPlot(d, x="Close_MA", y="Volume_MA",color='ticker') ''' SIMILAR VECTOR DB SEARCH ''' import chromadb client = chromadb.PersistentClient(path="chroma.db") db = client.get_collection(name="banks") def similar(issue): global db docs = db.query(query_texts=issue, n_results=5) return docs ''' FINE TUNE LLM LIKE SCORE ''' from fastai.text.all import * import pathlib p=pathlib.Path('./banks_txt_like.pkl').resolve() ''' NotImplementedError: cannot instantiate ‘WindowsPath’ on your system ''' import platform plt = platform.system() if plt == 'Windows': pathlib.PosixPath = pathlib.WindowsPath else: pathlib.WindowsPath = pathlib.PosixPath learn = load_learner(p) def like(issue): pred,idx,probs = learn.predict(issue) return pred ''' EXPLAINABLE AI ''' ''' https://www.gradio.app/docs/interface ''' with gr.Blocks() as demo: ''' https://hits.seeyoufarm.com/ https://dash.elfsight.com ''' counter="""
![Visitor Count](https://profile-counter.glitch.me/{YOUR USER}/count.svg) """ # gr.HTML(counter) gr.Markdown("""Enhancing Customer Engagement and Operational Efficiency with NLP ========= 1) Semantic Similarity Document Search (SSDS) 2) Fine Tune LLM 3) Trading Analytic: Using Time Series Data to Identify Growth 4) Portfolio Optimization with cost, probabilities 5) Explainable AI #### Data Scientist: Kevin Wong, objectdeveloper@gmail.com, 416-903-7937 ##### Open source ml bank dataset, __I'm just using a small sample of this data set for demo__ https://www.kaggle.com/datasets/trainingdatapro/20000-customers-reviews-on-banks/?select=Banks.csv [![Hits](https://hits.seeyoufarm.com/api/count/incr/badge.svg?url=https%3A%2F%2Fhuggingface.co%2Fspaces%2Fkevinhug%2FclientX&count_bg=%2379C83D&title_bg=%23555555&icon=&icon_color=%23E7E7E7&title=hits&edge_flat=false)](https://hits.seeyoufarm.com) """) with gr.Tab("Semantic Similarity Document Search (SSDS)"): in_similar = gr.Textbox(placeholder="having credit card problem", label="Issue", info="issue you want to explore about" ) out_similar = gr.JSON(label="Similar Verbatim") btn_similar = gr.Button("Find Similar Verbatim") btn_similar.click(fn=similar, inputs=in_similar, outputs=out_similar) gr.Examples( [ ["having credit card problem"], ["low interest credit card"], ["loan"], ["upset customer"], ["what is the password"], ], [in_similar] ) gr.Markdown(""" Description: ======= In today's dynamic financial landscape, the Semantic Similarity Document Search (SSDS) capability is a practical innovation to improve client experience, marketing leads, and sentiment analysis. As a Data Scientist with a decades in the financial industry, I see the value of SSDS in action. Investment Portfolio Construction/Marketing Leads: ------ To enhance marketing strategies, SSDS identifies market trends and consumer preferences, such as the demand for low-interest credit cards, and GIC. It's a treasure trove for refining our product offerings to the targeted customer according to their credit score, risk appetite, demographic, collateral, capital, and economic conditions, enhancing the lift and efficiency of the recommendation process. Combining **SingleStore MEMSQL/Kafka structured streaming** for a real-time recommendation for closing sales at the right time in the right channel. Optimize your Asset Allocation with your objective function and cost function. ### issue: - low interest credit card - GIC AML/KYC/Compliance/Audit/Cyber Security/Fraud Analytics/Observability: ------ ### vite vue chart.js UI demo https://kevinwkc.github.io/davinci/ ### Proactive Detection: Identify potential fraud threats and vulnerabilities in real-time. Customer-Centric Approach: Gain insights into customer concerns, allowing us to address them promptly. #### issue: - what is the password Client Experience: ------ When a client faces a bad experience, SSDS helps us swiftly locate relevant documents to understand and address their concerns, be it credit card issues, late payment fees, or credit score drops. ### issue: - having bad client experience - having credit card problem - late payment fee - credit score dropping Sentiments: ------ SSDS tracks customer sentiment, empowering us to swiftly respond to upset customers. It ensures we address their issues promptly, enhancing trust and loyalty. With no need for jargon, SSDS delivers tangible value to our fintech operations. It's about staying agile, informed, and customer-centric in a rapidly changing financial world. ### issue: - upset customer """) with gr.Accordion("Future Improvement"): gr.Markdown(""" tuning the distance for use case """) with gr.Tab("Explainable AI"): df=pd.read_csv("./xgb/re.csv") gr.Markdown(""" CREDIT DEFAULT RISK INTERPRETATION ======================= Explain by Context ---------- - Sometimes, understanding why an individual defaults requires shifting to a credit-healthy background, altering the baseline E[f(x) | credit healthy] using interventional feature perturbation ([source](https://arxiv.org/pdf/2006.16234.pdf)). [UCI Machine Learning Repository - Credit Default Dataset](https://www.kaggle.com/datasets/uciml/default-of-credit-card-clients-dataset) ![Credit Record Summary](file=./xgb/credit_record.png) **Observations from a healthy credit background:** f(x) in probability for logistic regression objective using XGBoost - base line at 0.2, indicate explain from healthy credit perspective, why this guy is default - This individual defaults due to high **PAY_0**, despite PAY_AMT5 . - PAY_0 represents repayment status in September, 2005 (-1=pay duly, 1=payment delay for one month, 2=payment delay for two months, … 8=payment delay for eight months, 9=payment delay for nine months and above). ![Credit Data Summary](file=./xgb/credit_data.png) **Insights from a healthy credit background:** - Default patterns relate to high **PAY_0/PAY_2** (payment delay) and low **LIMIT_BAL** (lack of liquidity). - LIMIT_BAL signifies the amount of given credit in NT dollars (includes individual and family/supplementary credit). - BILL_AMT1 indicates the bill statement amount in September, 2005 (NT dollar). HOME PRICE INTERPRETATION ======================= This analysis is derived from an XGBoost regression model designed to predict house prices. The model utilizes features such as **dist_subway, age, lat, long,** and **dist_stores**. Full dataset at the bottom of this tab Explain by Dataset ---------- - Below are explanation in typical background E[f(x)] ![Summary](file=./xgb/data.png) **Key insights:** - **dist_subway** has a significant impact on pricing when at low values (green). - **dist_store** demonstrates minimal impact on price. - Higher age correlates with lower prices while lower age raises prices. Explain by Feature ---------- ![Partial Dependence](file=./xgb/feature.png) **Observations:** - Prices spike for **distances lower than 900** based on the function f(x). - Noteworthy **SHAP value at record[20] around 6500**. Explain by Record ---------- ![Force](file=./xgb/record.png) **Contribution to Price:** - **dist_subway** holds the largest positive contribution to price. - **Age** follows as the second significant contributor. Explain by Instance ---------- ![Dependence](file=./xgb/instance.png) **Insights:** - Around **500 dist_subway**, there's a potential for both positive and negative impacts on price. - Overall trend: closer proximity to the subway correlates with higher prices. - An outlier at **6500 distance** from subway negatively impacts price, despite proximity to stores (dist_stores). ![1st Decision Tree](file=./xgb/dtree.png) *Note: first decision tree within XGBoost.* Explain by Top 5 Error Example =============== ![Top 5 Error Data](file=./xgb/error_data.png) **Top Features for Errors:** - **dist_subway, age** stands out as the top feature impacting the top 5 errors negatively (for young ages). ![Error Record](file=./xgb/error_record.png) **Top 1 Error:** - Notably, lat has positive impact - old age has a negative impact on pricing (top 1 error). ![Error Feature](file=./xgb/error_feature.png) **Insight from Errors:** - Further distance from the subway might positively impact pricing for the top 5 errors at around 700 ![Error Instance](file=./xgb/error_instance.png) **Error Instances:** - Younger age negatively impacts price, while older age positively impacts price for the top 5 errors. ML Observability =============== **Visualization with Context:** [Tableau Visualization](https://public.tableau.com/app/profile/kevin1619/vizzes) **Data Validation:** - Led data validation for a new data source using covariate shift and recall methodology for legacy models with circuit breaker pattern with notification. - Ensured consistency in feature transformation between dev and prod environments. - Monitor prediction distribution with precision, recall metric. **Unit Testing/Acceptance Testing:** - Led unit testing for models, identified logical errors, and improved campaign lift by 50% for small businesses. **A/B Testing for Lift:** - Utilized statistical approaches in A/B testing for small business models, ensuring lift met criteria. - Setting up baseline model, retain evidence of input, output **File/Log Mining:** - Led server observability, leveraging event journey maps to understand server downtimes. ** **Root Cause Analysis:** - Proficient in employing Six Sigma methodology to trace root causes with established metrics. """) gr.DataFrame(df) with gr.Tab("Fine Tune LLM"): in_like = gr.Textbox(placeholder="having credit card problem" , label="Issue", info="issue you want to explore about") out_like = gr.Textbox(placeholder="like score in range [2 to 248] from fine tuning data", label="like score", info="like score") btn_like = gr.Button("Classify Like Score") btn_like.click(fn=like, inputs=in_like, outputs=out_like) gr.Examples( [ ["having credit card problem"], ["low interest credit card"], ["loan"], ["upset customer"], ["what is the password"], ], [in_like] ) gr.Markdown(""" Smart Insights: Elevating Customer Engagement Through Sentiment Analysis ========= As a Data Scientist with a decades of financial industry experience, I recognize the paramount importance of staying closely tuned to our customer's needs and opinions. In this app, Fine Tune LLM, we have shown how fine-tuning a Language Model (LLM) on a custom dataset can provide valuable insights into customer sentiment across crucial areas such as service, sales, point of failure, product, and emerging trends. Objective: --------- Our aim is to extract meaningful insights from customer interactions to improve our services, products, and overall customer experience. This analysis will help us understand what our customers are discussing and how they feel about different aspects of our business. Use Case: - intervene attrition through incentive """) with gr.Tab("Trading Analyics"): in_ts = gr.Textbox(placeholder="XLE XLV XLY XLK XLF XLP XLI XLRE XLU", label="Ticker", info="Technical Difficult: Currently it only works with these tickers due to data pulling constraints" ) plot = gr.Plot() #plot = gr.Plot(label="Identify Trend/Decline Industry") btn_ts = gr.Button("Find Trending Industry") btn_ts.click(fn=trend, inputs=in_ts, outputs=plot) gr.Markdown(""" Maximizing Trading Efficiency: Personalize Your Asset Allocation for Optimal Growth ========= The industry life cycle is a useful tool for traders to identify growth and decline industries. It describes the evolution of an industry based on its stages of growth and decline #### There are four phases of the industry life cycle: introduction, growth, maturity, and decline By identifying growth and decline industries, traders can make informed investment decisions and speed up trading by investing in companies that are likely to experience growth in the future and avoiding companies that are likely to experience a decline in the future. - Long Trader: buy growth industry - Short Trader: sell decline industry #### Personalize objective function and cost function for each trader - cost function can prevent selecting decline industry we can use this to filter out blacklisted firms for compliance we can increase the cost for highly correlated stock for diversity - objective function can identify potential industry #### Personalize UI to fit each trader customize UI for secret sauce formula for stock picking: - metric: moving average for the price, moving average for volume, ...etc - timeframe chain: monthly, weekly, daily, 4h, 15 min ##### tableau portfolio https://public.tableau.com/app/profile/kevin1619 ##### vite vue chart.js UI demo https://kevinwkc.github.io/davinci/ #### Personalize Alert with Twilio The trader can set their price to buy at the right price at the right time, without missing the right entry in a high stress environment #### Keeping record for compliance The System can save data for reporting or compliance purpose """) with gr.Tab("Portfolio Optimization"): in_p_cost = gr.Textbox(value="4 30 2 3 5", label="Cost", info="cost for the asset" ) in_p_prob = gr.Textbox(value="0.3 0.4 0.5 0.6 0.7", label="Probabilities", info="P(success) for the asset" ) in_p_its = gr.Textbox(value="10", label="Number of Iteration", info="number of trial for optimal" ) out_p = gr.Textbox(label="Asset Allocation Approx Optimization using AI") btn_p = gr.Button("Optimize Asset Allocation") btn_p.click(fn=optimize, inputs=[in_p_cost, in_p_prob, in_p_its], outputs=out_p) gr.Markdown(""" Objective: To allocate assets in a way that maximizes expected profit while minimizing costs and risks. Inputs: - List of available assets and their associated probabilities and costs. Outputs: - Allocation of assets that maximizes expected profit while minimizing costs and risks. Constraints: Assume volume is bound by 0 to 100 Objective: max SUM [ vol_s * prob_s - cost_s ] - The total cost of the allocation must not exceed the available budget. - The risk level of the allocation must not exceed the maximum allowable risk level. - The profit margin of the allocation must not fall below the minimum allowable profit margin. Method: Using search algorithm to find the approx. optimial allocation Assumptions: - The probabilities and costs of the assets are known with certainty. - The expected return and risk level of the allocation are calculated using historical data and statistical models. """) demo.launch(allowed_paths=["./xgb","./ts"])