firobeid commited on
Commit
4dc9267
·
verified ·
1 Parent(s): 091180e

Upload app.py

Browse files
Files changed (1) hide show
  1. app.py +1141 -147
app.py CHANGED
@@ -1,147 +1,1141 @@
1
- import io
2
- import random
3
- from typing import List, Tuple
4
-
5
- import aiohttp
6
- import panel as pn
7
- from PIL import Image
8
- from transformers import CLIPModel, CLIPProcessor
9
-
10
- pn.extension(design="bootstrap", sizing_mode="stretch_width")
11
-
12
- ICON_URLS = {
13
- "brand-github": "https://github.com/holoviz/panel",
14
- "brand-twitter": "https://twitter.com/Panel_Org",
15
- "brand-linkedin": "https://www.linkedin.com/company/panel-org",
16
- "message-circle": "https://discourse.holoviz.org/",
17
- "brand-discord": "https://discord.gg/AXRHnJU6sP",
18
- }
19
-
20
-
21
- async def random_url(_):
22
- pet = random.choice(["cat", "dog"])
23
- api_url = f"https://api.the{pet}api.com/v1/images/search"
24
- async with aiohttp.ClientSession() as session:
25
- async with session.get(api_url) as resp:
26
- return (await resp.json())[0]["url"]
27
-
28
-
29
- @pn.cache
30
- def load_processor_model(
31
- processor_name: str, model_name: str
32
- ) -> Tuple[CLIPProcessor, CLIPModel]:
33
- processor = CLIPProcessor.from_pretrained(processor_name)
34
- model = CLIPModel.from_pretrained(model_name)
35
- return processor, model
36
-
37
-
38
- async def open_image_url(image_url: str) -> Image:
39
- async with aiohttp.ClientSession() as session:
40
- async with session.get(image_url) as resp:
41
- return Image.open(io.BytesIO(await resp.read()))
42
-
43
-
44
- def get_similarity_scores(class_items: List[str], image: Image) -> List[float]:
45
- processor, model = load_processor_model(
46
- "openai/clip-vit-base-patch32", "openai/clip-vit-base-patch32"
47
- )
48
- inputs = processor(
49
- text=class_items,
50
- images=[image],
51
- return_tensors="pt", # pytorch tensors
52
- )
53
- outputs = model(**inputs)
54
- logits_per_image = outputs.logits_per_image
55
- class_likelihoods = logits_per_image.softmax(dim=1).detach().numpy()
56
- return class_likelihoods[0]
57
-
58
-
59
- async def process_inputs(class_names: List[str], image_url: str):
60
- """
61
- High level function that takes in the user inputs and returns the
62
- classification results as panel objects.
63
- """
64
- try:
65
- main.disabled = True
66
- if not image_url:
67
- yield "##### ⚠️ Provide an image URL"
68
- return
69
-
70
- yield "##### ⚙ Fetching image and running model..."
71
- try:
72
- pil_img = await open_image_url(image_url)
73
- img = pn.pane.Image(pil_img, height=400, align="center")
74
- except Exception as e:
75
- yield f"##### 😔 Something went wrong, please try a different URL!"
76
- return
77
-
78
- class_items = class_names.split(",")
79
- class_likelihoods = get_similarity_scores(class_items, pil_img)
80
-
81
- # build the results column
82
- results = pn.Column("##### 🎉 Here are the results!", img)
83
-
84
- for class_item, class_likelihood in zip(class_items, class_likelihoods):
85
- row_label = pn.widgets.StaticText(
86
- name=class_item.strip(), value=f"{class_likelihood:.2%}", align="center"
87
- )
88
- row_bar = pn.indicators.Progress(
89
- value=int(class_likelihood * 100),
90
- sizing_mode="stretch_width",
91
- bar_color="secondary",
92
- margin=(0, 10),
93
- design=pn.theme.Material,
94
- )
95
- results.append(pn.Column(row_label, row_bar))
96
- yield results
97
- finally:
98
- main.disabled = False
99
-
100
-
101
- # create widgets
102
- randomize_url = pn.widgets.Button(name="Randomize URL", align="end")
103
-
104
- image_url = pn.widgets.TextInput(
105
- name="Image URL to classify",
106
- value=pn.bind(random_url, randomize_url),
107
- )
108
- class_names = pn.widgets.TextInput(
109
- name="Comma separated class names",
110
- placeholder="Enter possible class names, e.g. cat, dog",
111
- value="cat, dog, parrot",
112
- )
113
-
114
- input_widgets = pn.Column(
115
- "##### 😊 Click randomize or paste a URL to start classifying!",
116
- pn.Row(image_url, randomize_url),
117
- class_names,
118
- )
119
-
120
- # add interactivity
121
- interactive_result = pn.panel(
122
- pn.bind(process_inputs, image_url=image_url, class_names=class_names),
123
- height=600,
124
- )
125
-
126
- # add footer
127
- footer_row = pn.Row(pn.Spacer(), align="center")
128
- for icon, url in ICON_URLS.items():
129
- href_button = pn.widgets.Button(icon=icon, width=35, height=35)
130
- href_button.js_on_click(code=f"window.open('{url}')")
131
- footer_row.append(href_button)
132
- footer_row.append(pn.Spacer())
133
-
134
- # create dashboard
135
- main = pn.WidgetBox(
136
- input_widgets,
137
- interactive_result,
138
- footer_row,
139
- )
140
-
141
- title = "Panel Demo - Image Classification"
142
- pn.template.BootstrapTemplate(
143
- title=title,
144
- main=main,
145
- main_max_width="min(50%, 698px)",
146
- header_background="#F08080",
147
- ).servable(title=title)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import panel as pn
3
+ from pathlib import Path
4
+ import pandas as pd
5
+ import hvplot.pandas
6
+ from io import BytesIO, StringIO
7
+ import sys
8
+ import time
9
+ from dotenv import load_dotenv
10
+ import os
11
+ load_dotenv()
12
+ '''
13
+ <meta http-equiv="pragma" content="no-cache" />
14
+ <meta http-equiv="expires" content="-1" />
15
+ '''
16
+
17
+ hospital_data = pd.read_csv(
18
+ 'https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/hospital_claims.csv'
19
+ ).dropna()
20
+
21
+ # Slice the DataFrame to consist of only "552 - MEDICAL BACK PROBLEMS W/O MCC" information
22
+ procedure_552_charges = hospital_data[
23
+ hospital_data["DRG Definition"] == "552 - MEDICAL BACK PROBLEMS W/O MCC"
24
+ ]
25
+ # Group data by state and average total payments, and then sum the values
26
+ payments_by_state = procedure_552_charges[["Average Total Payments", "Provider State"]]
27
+ # Sum the average total payments by state
28
+ total_payments_by_state = payments_by_state.groupby("Provider State").sum()
29
+ plot1 = total_payments_by_state.hvplot.bar(rot = 45)
30
+
31
+
32
+ # Sort the state data values by Average Total Paymnts
33
+ sorted_total_payments_by_state = total_payments_by_state.sort_values("Average Total Payments")
34
+ sorted_total_payments_by_state.index.names = ['Provider State Sorted']
35
+ # Plot the sorted data
36
+ plot2 = sorted_total_payments_by_state.hvplot.line(rot = 45)
37
+
38
+ sorted_total_payments_by_state.index.names = ['Provider State Sorted']
39
+ plot3 = total_payments_by_state.hvplot.bar(rot = 45) + sorted_total_payments_by_state.hvplot(rot = 45)
40
+
41
+ # Group data by state and average medicare payments, and then sum the values
42
+ medicare_payment_by_state = procedure_552_charges[["Average Medicare Payments", "Provider State"]]
43
+ total_medicare_by_state = medicare_payment_by_state.groupby("Provider State").sum()
44
+ # Sort data values
45
+ sorted_total_medicare_by_state = total_medicare_by_state.sort_values("Average Medicare Payments")
46
+ plot4 = sorted_total_medicare_by_state.hvplot.bar(rot = 45)
47
+
48
+ plot5 = sorted_total_payments_by_state.hvplot.line(label="Average Total Payments", rot = 45) * sorted_total_medicare_by_state.hvplot.bar(label="Average Medicare Payments", rot = 45)
49
+
50
+ # Overlay plots of the same type using * operator
51
+ plot6 = sorted_total_payments_by_state.hvplot.bar(label="Average Total Payments", rot = 45) * sorted_total_medicare_by_state.hvplot.bar(label="Average Medicare Payments", width = 1000, rot = 45)
52
+
53
+ # hvplot_snip = pn.pane.HTML("https://firobeid.github.io/compose-plots/Resources/binning_V1.html")
54
+ hvplot_snip = pn.pane.Markdown("""[DataViz HTMLS Deployments](https://firobeid.github.io/compose-plots/Resources/binning_V1.html)""")
55
+ pn.extension( template="fast")
56
+
57
+ pn.state.template.param.update(
58
+ # site_url="",
59
+ # site="",
60
+ title="UCBerkely FinTech Bootcamp Demo",
61
+ favicon="https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/favicon.ico",
62
+ )
63
+ # Create a Title for the Dashboard
64
+ title = pn.pane.Markdown(
65
+ """
66
+ # UCBerkley FinTech Bootcamp Demo - Firas Obeid
67
+ """,
68
+ width=1000,
69
+ )
70
+
71
+ title_0 = pn.pane.Markdown(
72
+ """
73
+ # Intro to Python : Text Munging & Cleaning
74
+ """,
75
+ width=800,
76
+ )
77
+
78
+ title1 = pn.pane.Markdown(
79
+ """
80
+ # Hospital Data Analysis
81
+ """,
82
+ width=800,
83
+ )
84
+
85
+ title2 = pn.pane.Markdown(
86
+ """
87
+ # Machine Learning Unwinding
88
+ """,
89
+ width=800,
90
+ )
91
+
92
+ image = pn.pane.image.PNG(
93
+ 'https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/image.png',
94
+ alt_text='Meme Logo',
95
+ link_url='https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/image.png',
96
+ width=500
97
+ )
98
+ welcome = pn.pane.Markdown(
99
+ """
100
+ ### This dashboard/WebApp leverages FinTech and Data Science tools for practical and hands on demo's for UCBerkley FinTech Bootcamp students in [`Firas Ali Obeid's`](https://www.linkedin.com/in/feras-obeid/) classes
101
+ * Motive is to keep students up to date with the tools that allow them to define a problem till deployment in a very short amount of time for efficient deliverables in the work place or in academia.
102
+ * The tool/web app is developed completly using python and deployed serverless on github pages (not static anymore right?!
103
+
104
+ * Disclaimer: All data presented are from UCBerkley resources.
105
+ * Disclaimer: All references: https://blog.holoviz.org/panel_0.14.html
106
+
107
+ ***`Practice what you preach`***
108
+
109
+ """
110
+ )
111
+
112
+ ##Python Competition##
113
+ python_intro = pn.pane.Markdown(
114
+ """
115
+ # Essence of Data Cleaning in Python
116
+
117
+ * Write a script/function that ingests the following list and addresses all unique string formatting:
118
+ ```
119
+ names = ['St. Albans',
120
+ 'St. Albans',
121
+ 'St Albans',
122
+ 'St.Ablans',
123
+ "St.albans",
124
+ "St. Alans", 'S.Albans',
125
+ 'St..Albans', 'S.Albnas',
126
+ 'St. Albnas', "St.Al bans", 'St.Algans',
127
+ "Sl.Albans", 'St. Allbans', "St, Albans", 'St. Alban', 'St. Alban']
128
+ ```
129
+
130
+ * The intended output is the following, where you clean and split all into `Sx Axxxx` in one shot:
131
+ ```
132
+ ['St Albans', 'St Albans', 'StAlbans', 'St Ablans',
133
+ 'St Albans', 'St Alans', 'S Albans', 'St Albans', 'S Albnas',
134
+ 'St Albnas', 'St Albans', 'St Algans', 'Sl Albans', 'St Allbans', 'St Albans', 'St Alban', 'St Alban']
135
+ ```
136
+
137
+
138
+ ***`Cleaning text without using any package`***
139
+
140
+ """
141
+ )
142
+ code_submission = pn.widgets.TextAreaInput(value="", height=300, name='Paste your code below (remember to print(results)')
143
+ run_python_comp = pn.widgets.Button(name="Click to Check Code Runtime/Accuracy Results")
144
+
145
+ def time_it():
146
+ return pd.to_datetime(time.time(),unit = 's')
147
+ # def memory()->str:
148
+ # psutil
149
+ # return print('used: {}% free: {:.2f}GB'.format(psutil.virtual_memory().percent, float(psutil.virtual_memory().free)/1024**3))#@
150
+
151
+ def python_competition():
152
+ names = ['St. Albans',
153
+ 'St. Albans',
154
+ 'St Albans',
155
+ 'St.Ablans',
156
+ "St.albans",
157
+ "St. Alans", 'S.Albans',
158
+ 'St..Albans', 'S.Albnas',
159
+ 'St. Albnas', "St.Al bans", 'St.Algans',
160
+ "Sl.Albans", 'St. Allbans', "St, Albans", 'St. Alban', 'St. Alban']
161
+ actual_output = ['St Albans', 'St Albans', 'St Albans', 'St Ablans','St Albans', 'St Alans', 'S Albans', 'St Albans', 'S Albnas',
162
+ 'St Albnas', 'St Albans', 'St Algans', 'Sl Albans', 'St Allbans', 'St Albans', 'St Alban', 'St Alban']
163
+
164
+ if str(code_submission.value) == "":
165
+ # return pn.pane.Markdown(f"""""")
166
+ return pn.pane.Alert("""###Please pass in your code above!""", alert_type="warning",)
167
+ try:
168
+ # code = str(code_submission.value.decode("utf-8"))
169
+ code = code_submission.value
170
+ # print(code)
171
+ # create file-like string to capture output
172
+ codeOut = StringIO()
173
+ codeErr = StringIO()
174
+ # capture output and errors
175
+ sys.stdout = codeOut
176
+ sys.stderr = codeErr
177
+ start = time_it()
178
+ # start_memory = float(psutil.virtual_memory().free)/1024**3
179
+ exec(code)
180
+ end = time_it()
181
+ # end_memory = float(psutil.virtual_memory().free)/1024**3
182
+ loop_time = end - start
183
+ # loop_memory = start_memory - end_memory
184
+ # restore stdout and stderr
185
+ sys.stdout = sys.__stdout__
186
+ sys.stderr = sys.__stderr__
187
+
188
+ # s = codeErr.getvalue()
189
+ # print("error:\n%s\n" % s)
190
+ s = codeOut.getvalue()
191
+ s = eval(s)
192
+
193
+ codeOut.close()
194
+ codeErr.close()
195
+ accuracy = len(set(s).intersection(set(actual_output)))/len(set(actual_output))
196
+ results = pd.DataFrame({'Results(Time+Space_Complexity':{'Nanoseconds': loop_time.nanoseconds, 'Microseconds': loop_time.microseconds
197
+ ,'Seconds': loop_time.seconds,'Total_Seconds':loop_time.total_seconds() ,'Accuracy': '%.2f' % (accuracy*100)}}) #'Memory': '%d MB' % (loop_memory* 1024),
198
+ return pn.widgets.DataFrame(results.sort_index(), width=600, height=1000, name = 'Results')
199
+ except Exception as e:
200
+ return pn.pane.Markdown(f"""{e}""")
201
+
202
+ py_widgets_submission = pn.WidgetBox(
203
+ pn.panel("""# Check your Code""", margin=(0, 10)),
204
+ pn.panel('* Past your code below, no need to to add the original list.', margin=(0, 10)),
205
+ pn.panel('* Please end your code with a print() of your results list. Only put one print() at the end and no other print() should exist', margin=(0, 10)),
206
+ pn.panel('* If you got an error, remove all spaces between consecutive lines', margin=(0, 10)),
207
+ code_submission,
208
+ run_python_comp,
209
+ pn.pane.Alert("""## Your Code Submission Results""", alert_type="success",),
210
+ width = 500
211
+ )
212
+
213
+
214
+ @pn.depends(run_python_comp.param.clicks)
215
+ def python_competition_submission(_):
216
+ return pn.Column(python_competition)
217
+
218
+ #ML GENERAL
219
+ ml_slider = pn.widgets.IntSlider(start=1, end=10)
220
+ def ml_slideshow(index):
221
+ url = f"https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/{index}.png"
222
+ return pn.pane.JPG(url, width = 500)
223
+
224
+ ml_output = pn.bind(ml_slideshow, ml_slider)
225
+ ml_app = pn.Column(ml_slider, ml_output)
226
+
227
+ ##DATA
228
+ data_slider = pn.widgets.IntSlider(start=1, end=8)
229
+ def data_slideshow(index):
230
+ url2 = f"https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/Data_Splitting/{index}.png"
231
+ return pn.pane.PNG(url2,width = 800)
232
+ data_output = pn.bind(data_slideshow, data_slider)
233
+
234
+ ##CLUSTERING
235
+ clustering_slider = pn.widgets.IntSlider(start=1, end=36)
236
+ def cluster_slideshow(index):
237
+ url2 = f"https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/Clustering/Clustering-{index}.png"
238
+ return pn.pane.PNG(url2,width = 800)
239
+ cluster_output = pn.bind(cluster_slideshow, clustering_slider)
240
+ # cluster_app = pn.Column(clustering_slider, cluster_output)
241
+ k_means_simple = pn.pane.Markdown("""
242
+ ### K_means Simple Algo Implementation
243
+ ```python
244
+
245
+ from sklearn.metrics import pairwise_distances_argmin
246
+
247
+ def find_clusters(X, n_clusters, rseed=2):
248
+ # 1. Randomly choose clusters
249
+ rng = np.random.RandomState(rseed)
250
+ i = rng.permutation(X.shape[0])[:n_clusters]
251
+ centers = X[i]
252
+
253
+ while True:
254
+ # 2a. Assign labels based on closest center
255
+ labels = pairwise_distances_argmin(X, centers)
256
+
257
+ # 2b. Find new centers from means of points
258
+ new_centers = np.array([X[labels == i].mean(0)
259
+ for i in range(n_clusters)])
260
+
261
+ # 2c. Check for convergence
262
+ if np.all(centers == new_centers):
263
+ break
264
+ centers = new_centers
265
+
266
+ return centers, labels
267
+ ```
268
+ """,width = 500)
269
+
270
+ ##GENERAL ML
271
+ general_ml_slider = pn.widgets.IntSlider(start=1, end=40)
272
+ def general_ml_slideshow(index):
273
+ url = f"https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Algo_Survey/{index}.png"
274
+ return pn.pane.PNG(url,width = 800)
275
+ general_ml_output = pn.bind(general_ml_slideshow, general_ml_slider)
276
+
277
+ ML_quote = pn.pane.Markdown(
278
+ """
279
+ ***`"When your fundraising it's AI
280
+ When you're hiring it is ML
281
+ When you're implementing it's Linear Regression
282
+ When you are debugging it's printf()" - Barron Schwartz`***
283
+ """
284
+ )
285
+
286
+ ML_algoes = pn.pane.Markdown("""
287
+ ### Some behind the Scenes Simple Implementations
288
+ ```python
289
+
290
+ import numpy as np
291
+
292
+ def LogesticRegression_predict(features, weights, intercept):
293
+ dot_product = np.dot(features,weights.T) #or .reshape(-1) instead of T
294
+ z = intercept + dot_product
295
+ sigmoid = 1 / (1 + np.exp(-z))
296
+ return sigmoid
297
+
298
+ import pickle
299
+ def save_model(model_name, model):
300
+ '''
301
+ model_name = name.pkl
302
+ joblib.load('name.pkl')
303
+ assign a variable to load model
304
+ '''
305
+ with open(str(model_name), 'wb') as f:
306
+ pickle.dump(model, f)
307
+ ```
308
+
309
+ ### Criteria for Splitting in Decision Tress
310
+ ```python
311
+ def gini(rows):
312
+ '''
313
+ Calculate the Gini Impurity for a list of rows.
314
+
315
+ There are a few different ways to do this, I thought this one was
316
+ the most concise. See:
317
+ https://en.wikipedia.org/wiki/Decision_tree_learning#Gini_impurity
318
+ '''
319
+ counts = class_counts(rows)
320
+ impurity = 1
321
+ for lbl in counts:
322
+ prob_of_lbl = counts[lbl] / float(len(rows))
323
+ impurity -= prob_of_lbl**2
324
+ return impurity
325
+ ```
326
+ ### Find Best Split Algo (Decision Tree)
327
+
328
+ ```python
329
+ def find_best_split(rows):
330
+ '''Find the best question to ask by iterating over every feature / value
331
+ and calculating the information gain.'''
332
+ best_gain = 0 # keep track of the best information gain
333
+ best_question = None # keep train of the feature / value that produced it
334
+ current_uncertainty = gini(rows)
335
+ n_features = len(rows[0]) - 1 # number of columns
336
+
337
+ for col in range(n_features): # for each feature
338
+
339
+ values = set([row[col] for row in rows]) # unique values in the column
340
+
341
+ for val in values: # for each value
342
+
343
+ question = Question(col, val)
344
+
345
+ # try splitting the dataset
346
+ true_rows, false_rows = partition(rows, question)
347
+
348
+ # Skip this split if it doesn't divide the
349
+ # dataset.
350
+ if len(true_rows) == 0 or len(false_rows) == 0:
351
+ continue
352
+
353
+ # Calculate the information gain from this split
354
+ gain = info_gain(true_rows, false_rows, current_uncertainty)
355
+
356
+ # You actually can use '>' instead of '>=' here
357
+ # but I wanted the tree to look a certain way for our
358
+ # toy dataset.
359
+ if gain >= best_gain:
360
+ best_gain, best_question = gain, question
361
+
362
+ return best_gain, best_question
363
+ ```
364
+
365
+ #### Why we doing Label Encoding?
366
+ - We apply One-Hot Encoding when:
367
+
368
+ The categorical feature is not ordinal (like the countries above)
369
+ The number of categorical features is less so one-hot encoding can be effectively applied
370
+
371
+ - We apply Label Encoding when:
372
+
373
+ The categorical feature is ordinal (like Jr. kg, Sr. kg, Primary school, high school)
374
+ The number of categories is quite large as one-hot encoding can lead to high memory consumption
375
+ ```python
376
+ categorical_vars = list(df.columns[df.dtypes == object].values)
377
+ obj_df = df.select_dtypes(include=['object']).copy()
378
+ map_dict = {col: {n: cat for n, cat in enumerate(obj_df[col].astype('category').cat.categories)} for col in obj_df}
379
+ obj_df = pd.DataFrame({col: obj_df[col].astype('category').cat.codes for col in obj_df}, index=obj_df.index)
380
+
381
+ ```
382
+ """,width = 800)
383
+
384
+ ML_metrics = pn.pane.Markdown("""
385
+ ### Binary Classification Metrics Calculation
386
+
387
+ ```python
388
+ __author__: Firas Obeid
389
+ def metrics(confusion_matrix):
390
+ '''
391
+ Each mean is appropriate for different types of data; for example:
392
+
393
+ * If values have the same units: Use the arithmetic mean.
394
+ * If values have differing units: Use the geometric mean.
395
+ * If values are rates: Use the harmonic mean.
396
+ confusion_matrix = [[ TN, FP ],
397
+ [ FN, TP ]]
398
+ '''
399
+ TN = matrix[0,0]
400
+ FP = matrix[0,1]
401
+ FN = matrix[1,0]
402
+ TP = matrix[1,1]
403
+ Specificity = round(TN / (FP + TN), 4) # True Negative Rate
404
+ FPR = round(FP / (FP + TN), 4)
405
+ Confidence = round(1 - FPR, 4)
406
+ FDR = round(FP / (FP + TP), 4)
407
+ Precision = 1 - FDR # TP / (FP + TP)
408
+ Recall_Power = round(TP / (TP + FN), 4) #Sensitivity or TPR
409
+ G_mean = (Specificity * Recall_Power) **(1/2)
410
+ Accuracy = round((TP + TN) / (TP +FP + TN + FN), 4)
411
+ return {'FPR':FPR, 'Confidence': Confidence, 'FDR' :FDR, 'Precision':
412
+ Precision, 'Recall_Power':Recall_Power, 'Accuracy': Accuracy, "G_mean": G_mean}
413
+ ```
414
+ """, width = 800)
415
+
416
+ knn_scratch = pn.pane.Markdown("""
417
+ ### K-Nearest Neighbor from Scratch
418
+ ```python
419
+ __author__: Mohammad Obeid
420
+ import numpy as np
421
+ def knn(X_train, y_train, X_test, y_test, k):
422
+ '''
423
+ returns the test error compared to the predicted labels
424
+ '''
425
+ distances = np.sqrt(np.sum((X_test[:, np.newaxis, :] - X_train) ** 2, axis=2))
426
+ y_pred = np.zeros(len(X_test))
427
+
428
+ for i in range(len(X_test)):
429
+ # Get the indices of the k-nearest neighbors
430
+ indices = np.argsort(distances[i])[:k]
431
+
432
+ # Get the labels of the k-nearest neighbors
433
+ k_nearest_labels = y_train[indices].astype(int)
434
+
435
+ # Predict the label of test sample i
436
+ y_pred[i] = np.bincount(k_nearest_labels).argmax()
437
+
438
+ return sum(y_pred != y_test)/len(y_test)
439
+ ## Optimal K for KNN
440
+ test_errors = []
441
+ for k in range(1, 101):
442
+ test_error = knn(X_train, y_train, X_test, y_test, k)
443
+ test_errors.append(test_error)
444
+
445
+ # Plot the test errors as a function of n_neighbors
446
+ plt.plot(range(1, 101), test_errors)
447
+ plt.xlabel('n_neighbors')
448
+ plt.ylabel('Test error')
449
+ plt.title('KNN classifier performance')
450
+ plt.show()
451
+ ```
452
+ """, width = 880)
453
+ prec_recall = pn.pane.Markdown("""
454
+ ### Precision VS Recall Interpretation
455
+ ** Recall : How many of that class (1 or 0) does the model capture?
456
+
457
+ ** Precision: How many are of those captured are correct prediction?
458
+
459
+ On a high level, Recall controls False Negative (i.e more important in medical field)
460
+ and Precision controls False Positived (i.e more important in credit risk, traind, finance in general...)
461
+ In marketing for example, we care about a balance between precision & Recall (We dont want to have high
462
+ recall meaning we would reach out to customers that we predict incorrectly eventually due to the low precision)
463
+ """, width = 500)
464
+
465
+ ##DEEP LEARNING
466
+ dl_slider = pn.widgets.IntSlider(start=1, end=7)
467
+ def dl_slideshow(index):
468
+ url = f"https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/DL/{index}.png"
469
+ return pn.pane.PNG(url,width = 800)
470
+ dl_output = pn.bind(dl_slideshow, dl_slider)
471
+ lstm_gif = pn.pane.GIF('https://raw.githubusercontent.com/firobeid/firobeid.github.io/blob/main/docs/compose-plots/Resources/ML_lectures/DL/LSTM_ANIMATION.gif', alt_text = ' LSTM Animation')
472
+ DL_tips = pn.pane.Markdown("""
473
+ ### Binary CrossEntropy Error (Loss/Error)
474
+ ```python
475
+ def Z(x):
476
+ return np.log(x / (1-x))
477
+ def error(Z, Y):
478
+ return(max(Z,0) - Z*Y + np.log(1 + np.exp(-abs(Z))))
479
+
480
+ y_pred = nn.predict(X_test)
481
+ y_pred = np.array(list(map(Z, y_pred)))
482
+ error = np.vectorize(error)
483
+
484
+ all_errors = error(y_pred.ravel(), y_test)
485
+
486
+ np.mean(all_errors)
487
+ ```
488
+
489
+ # What I Learned from my Personal Research?
490
+
491
+ * If faced with [Failed to call ThenRnnBackward]:
492
+
493
+ 1. Allowing GPU Memory Growth
494
+ 2. Using batch_input_shape instead of input_shape
495
+ 3. Using drop_remainder=True when creating batches
496
+
497
+ * If faced with Crashing IPyhton during Training:
498
+
499
+ 1. Simply put verbose=0 in all model.fit(...) instructions
500
+ 2. Install keras-tqdm to manage progress bar
501
+ 3. Redirect the output to a file
502
+
503
+ ### Modelling Tips for Neural Netwroks
504
+ We can specify devices for storage and calculation, such as the CPU or GPU. By default, data are created in the main memory and then use the CPU for calculations.
505
+
506
+ The deep learning framework requires all input data for calculation to be on the same device, be it CPU or the same GPU.
507
+
508
+ You can lose significant performance by moving data without care. A typical mistake is as follows: computing the loss for every minibatch on the GPU and reporting it back to the user on the command line (or logging it in a NumPy ndarray) will trigger a global interpreter lock which stalls all GPUs. It is much better to allocate memory for logging inside the GPU and only move larger logs.
509
+
510
+ - For Tensorflow-2: You can just use LSTM with no activation specified (ied default to tanh) function and it will automatically use the CuDNN version
511
+ - Gradient clipping is a technique to prevent exploding gradients in very deep networks, usually in recurrent neural networks. ... This prevents any gradient to have norm greater than the threshold and thus the gradients are clipped.
512
+
513
+ - PCO to intialize weights help in time computation reduction and global optima finding
514
+ - Denoising input data helps predict small price changes
515
+ - Epoch means one pass over the full training set
516
+ - Batch means that you use all your data to compute the gradient during one iteration.
517
+ - Mini-batch means you only take a subset of all your data during one iteration.
518
+ - In the context of SGD, "Minibatch" means that the gradient is calculated across the entire batch before updating weights. If you are not using a "minibatch", every training example in a "batch" updates the learning algorithm's parameters independently.
519
+
520
+ - Batch Gradient Descent. Batch size is set to the total number of examples in the training dataset. (batch_size = len(train))
521
+ - Stochastic Gradient Descent. Batch size is set to one. (batch_size = 1)
522
+ - Minibatch Gradient Descent. Batch size is set to more than one and less than the total number of examples in the training dataset. (batch_size = 32,64...)
523
+
524
+ ### Tips for Activation Functions:
525
+ - When using the ReLU function for hidden layers, it is a good practice to use a "He Normal" or "He Uniform" weight initialization and scale input data to the range 0-1 (normalize) prior to training.
526
+ - When using the Sigmoid function for hidden layers, it is a good practice to use a "Xavier Normal" or "Xavier Uniform" weight initialization (also referred to Glorot initialization, named for Xavier Glorot) and scale input data to the range 0-1 (e.g. the range of the activation function) prior to training.
527
+ - When using the TanH function for hidden layers, it is a good practice to use a "Xavier Normal" or "Xavier Uniform" weight initialization (also referred to Glorot initialization, named for Xavier Glorot) and scale input data to the range -1 to 1 (e.g. the range of the activation function) prior to training.
528
+
529
+ #### Tips for LSTM Inputs
530
+ - The LSTM input layer must be 3D.
531
+ - The meaning of the 3 input dimensions are: samples, time steps, and features (sequences, sequence_length, characters).
532
+ - The LSTM input layer is defined by the input_shape argument on the first hidden layer.
533
+ - The input_shape argument takes a tuple of two values that define the number of time steps and features.
534
+ - The number of samples is assumed to be 1 or more.
535
+ - The reshape() function on NumPy arrays can be used to reshape your 1D or 2D data to be 3D.
536
+ - The reshape() function takes a tuple as an argument that defines the new shape
537
+ - The LSTM return the entire sequence of outputs for each sample (one vector per timestep per sample), if you set return_sequences=True.
538
+ - Stateful RNN only makes sense if each input sequence in a batch starts exactly where the corresponding sequence in the previous batch left off. Our RNN model is stateless since each sample is different from the other and they dont form a text corpus but are separate headlines.
539
+
540
+ #### Tips for Embedding Layer
541
+ - Gives relationship between characters.
542
+ - Dense vector representation (n-Dimensional) of float point values. Map(char/byte) to a dense vector.
543
+ - Embeddings are trainable weights/paramaeters by the model equivalent to weights learned by dense layer.
544
+ - In our case each unique character/byte is represented with an N-Dimensional vector of floating point values, where the learned embedding forms a lookup table by "looking up" each characters dense vector in the table to encode it.
545
+ - A simple integer encoding of our characters is not efficient for the model to interpret since a linear classifier only learns the weights for a single feature but not the relationship (probability distribution) between each feature(characters) or there encodings.
546
+ - A higher dimensional embedding can capture fine-grained relationships between characters, but takes more data to learn.(256-Dimensions our case)
547
+
548
+
549
+ """, width = 1000)
550
+ ##TIMESERIES
551
+ timeseries_libs = pn.pane.Markdown("""
552
+ ## 10 Time-Series Python Libraries in 2022:
553
+
554
+ ### 📚 Flow forecast
555
+
556
+ Flow forecast is a deep learning for time series forecasting framework. It provides the latest models (transformers, attention models, GRUs) and cutting edge concepts with interpretability metrics. It is the only true end-to-end deep learning for time series forecasting framework.
557
+
558
+ ### 📚 Auto_TS
559
+
560
+ Auto_TS train multiple time series models with just one line of code and is a part of autoML.
561
+
562
+ ### 📚 SKTIME
563
+
564
+ Sktime an extension to scikit-learn includes machine learning time-series for regression, prediction, and classification. This library has the most features with interfaces scikit-learn, statsmodels, TSFresh and PyOD.
565
+
566
+ ### 📚 Darts
567
+
568
+ Darts contains a large number of models ranging from ARIMA to deep neural networks. It also lets users combine predictions from several models and external regressors which makes it easier to backtest models.
569
+
570
+ ### 📚 Pmdarima
571
+
572
+ Pmdarima is a wrapper over ARIMA with automatic Hyperparameter tunning for analyzing, forecasting, and visualizing time series data including transformers and featurizers, including Box-Cox and Fourier transformations and a seasonal decomposition tool.
573
+
574
+ ### 📚 TSFresh
575
+
576
+ TSFresh automates feature extraction and selection from time series. It has Dimensionality reduction, Outlier detection and missing values.
577
+
578
+ ### 📚 Pyflux
579
+
580
+ Pyflux builds probabilistic model, very advantageous for tasks where a more complete picture of uncertainty is needed and the latent variables are treated as random variables through a joint probability.
581
+
582
+
583
+ ### 📚 Prophet
584
+
585
+ Facebook's Prophet is a forecasting tool for CSV format and is suitable for strong seasonal data and robust to missing data and outliers.
586
+ Prophet is a library that makes it easy for you to fit a model that decomposes a time series model into trend, season, and holiday components. It's somewhat customizable and has a few nifty tools like graphing and well-thought out forecasting.
587
+ Prophet does the following linear decomposition:
588
+
589
+ * g(t): Logistic or linear growth trend with optional linear splines (linear in the exponent for the logistic growth). The library calls the knots 'change points.'
590
+ * s(t): Sine and cosine (i.e. Fourier series) for seasonal terms.
591
+ * h(t): Gaussian functions (bell curves) for holiday effects (instead of dummies, to make the effect smoother).
592
+
593
+ [Some thoughts about Prophet](https://www.reddit.com/r/MachineLearning/comments/syx41w/p_beware_of_false_fbprophets_introducing_the/)
594
+
595
+ ### 📚 Statsforecast
596
+ [GitHub Link to Statsforecast](https://github.com/Nixtla/statsforecast)
597
+
598
+ Statsforecast offers a collection of univariate time series. It invludes ADIDA, HistoricAverage, CrostonClassic, CrostonSBA, CrostonOptimized, SeasonalNaive, IMAPA Naive, RandomWalkWithDrift, TSB, AutoARIMA and ETS.
599
+ Impressive fact: It is 20x faster than pmdarima , 500x faster than Prophet,100x faster than NeuralProphet, 4x faster than statsmodels.
600
+
601
+ ### 📚 PyCaret
602
+
603
+ PyCaret replaces hundreds of lines of code with few lines only. Its time-series forecasting is in pre-release mode with --pre tag with 30+ algorithms. It includes automated hyperparameter tuning, experiment logging and deployment on cloud.
604
+
605
+ ### 📚 NeuralProphet
606
+
607
+ NeuralProphet is a Neural Network based Time-Series model, inspired by Facebook Prophet and AR-Net, built on PyTorch.
608
+
609
+ Source: Maryam Miradi, PhD
610
+ """,width = 800)
611
+ timeseries_data_split = pn.pane.Markdown("""
612
+ ### Training and Validating Time Series Forecasting Models
613
+ ```python
614
+
615
+ from sklearn.model_selection import TimeSeriesSplit
616
+ N_SPLITS = 4
617
+
618
+
619
+ X = df['timestamp']
620
+ y = df['value']
621
+
622
+
623
+ folds = TimeSeriesSplit(n_splits = N_SPLITS)
624
+
625
+
626
+ for i, (train_index, valid_index) in enumerate(folds.split(X)):
627
+ X_train, X_valid = X[train_index], X[valid_index]
628
+ y_train, y_valid = y[train_index], y[valid_index]
629
+ ```
630
+ ### Training and Validating `Financial` Time Series Forecasting Models
631
+ ```python
632
+
633
+ __author__ = 'Stefan Jansen'
634
+ class MultipleTimeSeriesCV:
635
+ '''
636
+ Generates tuples of train_idx, test_idx pairs
637
+ Assumes the MultiIndex contains levels 'symbol' and 'date'
638
+ purges overlapping outcomes
639
+ '''
640
+
641
+ def __init__(self,
642
+ n_splits=3,
643
+ train_period_length=126,
644
+ test_period_length=21,
645
+ lookahead=None,
646
+ date_idx='date',
647
+ shuffle=False):
648
+ self.n_splits = n_splits
649
+ self.lookahead = lookahead
650
+ self.test_length = test_period_length
651
+ self.train_length = train_period_length
652
+ self.shuffle = shuffle
653
+ self.date_idx = date_idx
654
+
655
+ def split(self, X, y=None, groups=None):
656
+ unique_dates = X.index.get_level_values(self.date_idx).unique()
657
+ days = sorted(unique_dates, reverse=True)
658
+ split_idx = []
659
+ for i in range(self.n_splits):
660
+ test_end_idx = i * self.test_length
661
+ test_start_idx = test_end_idx + self.test_length
662
+ train_end_idx = test_start_idx + self.lookahead - 1
663
+ train_start_idx = train_end_idx + self.train_length + self.lookahead - 1
664
+ split_idx.append([train_start_idx, train_end_idx,
665
+ test_start_idx, test_end_idx])
666
+
667
+ dates = X.reset_index()[[self.date_idx]]
668
+ for train_start, train_end, test_start, test_end in split_idx:
669
+
670
+ train_idx = dates[(dates[self.date_idx] > days[train_start])
671
+ & (dates[self.date_idx] <= days[train_end])].index
672
+ test_idx = dates[(dates[self.date_idx] > days[test_start])
673
+ & (dates[self.date_idx] <= days[test_end])].index
674
+ if self.shuffle:
675
+ np.random.shuffle(list(train_idx))
676
+ yield train_idx.to_numpy(), test_idx.to_numpy()
677
+
678
+ def get_n_splits(self, X, y, groups=None):
679
+ return self.n_splits
680
+ ```
681
+ """,width = 800)
682
+ ts_gif = pn.pane.GIF("https://raw.githubusercontent.com/firobeid/machine-learning-for-trading/main/assets/timeseries_windowing.gif")
683
+ ts_cv = pn.pane.PNG("https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ts/cv.png",link_url = 'https://wandb.ai/iamleonie/A-Gentle-Introduction-to-Time-Series-Analysis-Forecasting/reports/A-Gentle-Introduction-to-Time-Series-Analysis-Forecasting--VmlldzoyNjkxOTMz', width = 800)
684
+ # Create a tab layout for the dashboard
685
+ # https://USERNAME.github.io/REPO_NAME/PATH_TO_FILE.pdf
686
+ motivational = pn.pane.Alert("## YOUR PROGRESS...\nUpward sloping and incremental. Keep moving forward!", alert_type="success")
687
+ gif_pane = pn.pane.GIF('https://upload.wikimedia.org/wikipedia/commons/b/b1/Loading_icon.gif')
688
+ progress_ = pn.pane.PNG('https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/Progress.png')
689
+
690
+ ##########################
691
+ ##TIMESERIES COMPETITION##
692
+ ##########################
693
+ reward = pn.pane.PNG("https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/TimeSeriesCompetition/Images/Reward.png")
694
+ other_metrics = pn.pane.PNG("https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ts/Regression_Loss_functions.png", height = 500)
695
+ def cal_error_metrics():
696
+ global real_test_data, predictions, rmse_error
697
+
698
+ def rmse(preds,target):
699
+ if (len(preds)!=len(target)):
700
+ raise AttributeError('list1 and list2 must be of same length')
701
+ return round(((sum((preds[i]-target[i])**2 for i in range(len(preds)))/len(preds)) ** 0.5),2)
702
+
703
+ try:
704
+ assert len(real_test_data) == len(predictions)
705
+ except Exception as e: # if less than 2 words, return empty result
706
+ return pn.pane.Markdown("""ERROR:You didnt upload excatly 17519 predictions rows!!""")
707
+ try:
708
+ rmse_error = rmse(real_test_data["GHI"].values, predictions[predictions.columns[0]].values)
709
+
710
+ error_df = pd.DataFrame({"RMSE":[rmse_error]}, index = [str(file_input_ts.filename)])
711
+ error_df.index.name = 'Uploaded_Predictions'
712
+ except Exception as e:
713
+ return pn.pane.Markdown(f"""{e}""")
714
+
715
+ return pn.widgets.DataFrame(error_df, width=300, height=100, name = 'Score Board')
716
+
717
+
718
+ def get_real_test_timeseries():
719
+ global real_test_data, predictions
720
+ real_test_data = hospital_data = pd.read_csv(
721
+ 'https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/TimeSeriesCompetition/test_data/competition_real_test_data_2018.csv'
722
+ ).dropna()
723
+ if file_input_ts.value is None:
724
+ predictions = pd.DataFrame({'GHI': [real_test_data['GHI'].mean()] * len(real_test_data)})
725
+ else:
726
+ predictions = BytesIO()
727
+ predictions.write(file_input_ts.value)
728
+ predictions.seek(0)
729
+ print(file_input_ts.filename)
730
+ try:
731
+ predictions = pd.read_csv(predictions, error_bad_lines=False).dropna()#.set_index("id")
732
+ except:
733
+ predictions = pd.read_csv(predictions, error_bad_lines=False).dropna()
734
+ if len(predictions.columns) > 1:
735
+ predictions = predictions[[predictions.columns[-1]]]
736
+ predictions = predictions._get_numeric_data()
737
+ predictions[predictions < 0] = 0 #predictions cant be hegative for solar energy prediction task
738
+ # New_Refit_routing = New_Refit_routing[[cols for cols in New_Refit_routing.columns if New_Refit_routing[cols].nunique() >= 2]] #remove columns with less then 2 unique values
739
+ # return predictions
740
+
741
+ def github_cred():
742
+ from github import Github
743
+ repo_name = 'firobeid/TimeSeriesCompetitionTracker'
744
+ # using an access token
745
+ g = Github(os.getenv('GITHUB_TOKEN'))
746
+ return g.get_repo(repo_name)
747
+
748
+ def leaderboard_ts():
749
+ global file_on_github
750
+ # repo_name = 'firobeid/TimeSeriesCompetitionTracker'
751
+ # # using an access token
752
+ # g = Github(os.getenv('GITHUB_TOKEN'))
753
+ # # Create Github linkage Instance
754
+ # g = github_cred()
755
+ # if prediction_submission_name.value == 'Firas_Prediction_v1':
756
+ repo = github_cred()
757
+ contents = repo.get_contents("")
758
+ competitior_rank_file = 'leadership_board_ts.csv'
759
+ if competitior_rank_file not in [i.path for i in contents]:
760
+ print("Creatine leaderboard file...")
761
+ repo.create_file(competitior_rank_file, "creating timeseries leaderboard", "Competitor_Submission, RMSE", branch="main")
762
+ file_on_github = pd.read_csv("https://raw.githubusercontent.com/firobeid/TimeSeriesCompetitionTracker/main/leadership_board_ts.csv", delim_whitespace=" ")
763
+
764
+ def upload_scores():
765
+ global rmse_error, sub_name, file_on_github
766
+ competitior_rank_file = 'leadership_board_ts.csv'
767
+ repo = github_cred()
768
+ submission = sub_name
769
+ score = rmse_error
770
+ leaderboard_ts()
771
+ file_on_github.loc[len(file_on_github.index)] = [submission, score]
772
+
773
+ target_content = repo.get_contents(competitior_rank_file)
774
+ repo.update_file(competitior_rank_file, "Uploading scores for %s"%sub_name, file_on_github.to_string(index=False), target_content.sha, branch="main")
775
+ return pn.pane.Markdown("""Successfully Uploaded to Leaderboard!""")
776
+
777
+ def final_github():
778
+ global sub_name
779
+ global real_test_data, predictions, rmse_error
780
+ sub_name = str(prediction_submission_name.value.replace("\n", "").replace(" ", ""))
781
+ print(sub_name)
782
+ if 'rmse_error' not in globals(): #not to allow saving rmse everytime site is reoaded
783
+ return pn.widgets.DataFrame(file_on_github.sort_values(by = 'RMSE',ascending=True).set_index('Competitor_Submission'), width=600, height=1000, name = 'Leader Board')
784
+
785
+ else:
786
+ try:
787
+ if sub_name != 'Firas_Prediction_v1': #not to allow saving rmse everytime site is reoaded also
788
+ upload_scores()
789
+ except Exception as e:
790
+ return pn.pane.Markdown(f"""{e}""")
791
+ file_on_github["Rank"] = file_on_github.rank(method = "min")["RMSE"]
792
+ return pn.widgets.DataFrame(file_on_github.sort_values(by = 'RMSE',ascending=True).set_index('Rank'), width=600, height=1000, name = 'Leader Board')
793
+
794
+ run_github_upload = pn.widgets.Button(name="Click to Upload Results to Leaderscore Board!")
795
+ prediction_submission_name = pn.widgets.TextAreaInput(value="Firas_Prediction_v1", height=100, name='Change the name of submission below:')
796
+ widgets_submission = pn.WidgetBox(
797
+ pn.panel("""# Submit to LeaderBoard Ranking""", margin=(0, 10)),
798
+ pn.panel('* Change Submision Name Below to your own version and team name (no spaces in between)', margin=(0, 10)),
799
+ prediction_submission_name,
800
+ run_github_upload,
801
+ pn.pane.Alert("""## Leader Ranking Board""", alert_type="success",),
802
+ width = 500
803
+ )
804
+
805
+ @pn.depends(run_github_upload.param.clicks)
806
+ def ts_competition_submission(_):
807
+ leaderboard_ts()
808
+ return pn.Column(final_github)
809
+
810
+
811
+ run_button = pn.widgets.Button(name="Click to get model scores!")
812
+ file_input_ts = pn.widgets.FileInput(align='center')
813
+ text_ts = """
814
+ # Prediction Error Scoring
815
+
816
+ This section is to host a time series modelling competition between UCBekely students teams'. The teams should
817
+ build a time series univariate or multivariate model but the aim is to forcast the `GHI` column (a solar energy storage metric).
818
+
819
+ The train data is 30 minutes frequecy data between 2010-2017 for solar energy for UTDallas area. The students then predict the whole off 2018
820
+ ,which is 17519 data points (periods) into the future (2018). The students submit there predictions as csv over here,
821
+ get error score (RMSE not the best maybe but serves learning objective) and submit to leaderboard to be ranked. Public submissions
822
+ are welcome! But I cant give you extra points on project 2 ;)
823
+
824
+ The data used for the modelling can be found here:
825
+ [Competition Data](https://github.com/firobeid/Forecasting-techniques/tree/master/train_data)
826
+
827
+ ### Instructions
828
+ 1. Upload predictions CSV (only numerical data)
829
+ 2. Make sure you have 17519 predictions / row in your CSV and only one column
830
+ 3. Press `Click to get model error/score!`
831
+ 4. Observe you predictions error under yellow box bellow
832
+ 5. If satisfied move on to the next box to the right to submit team name and prediction.
833
+ `My code takes care of pulling your error and storing it on GitHub to be ranked against incoming scores from teams`
834
+ """
835
+ widgets_ts = pn.WidgetBox(
836
+ pn.panel(text_ts, margin=(0, 10)),
837
+ pn.panel('Upload Prediction CSV', margin=(0, 10)),
838
+ file_input_ts,
839
+ run_button,
840
+ pn.pane.Alert("### Prediction Results Will Refresh Below After Clicking above", alert_type="warning")
841
+ , width = 500
842
+ )
843
+
844
+ def update_target(event):
845
+ get_real_test_timeseries()
846
+
847
+ file_input_ts.param.watch(update_target, 'value')
848
+
849
+ @pn.depends(run_button.param.clicks)
850
+ def ts_competition(_):
851
+ get_real_test_timeseries()
852
+ return pn.Column(cal_error_metrics)
853
+ ##########################
854
+ ## ML COMPETITION ##
855
+ ##########################
856
+ reward_ml = pn.pane.PNG("https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/TimeSeriesCompetition/Images/Reward.png")
857
+ # other_metrics = pn.pane.PNG("https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ts/Regression_Loss_functions.png", height = 500)
858
+ def amex_metric_mod(y_true, y_pred):
859
+
860
+ labels = np.transpose(np.array([y_true, y_pred]))
861
+ labels = labels[labels[:, 1].argsort()[::-1]]
862
+ weights = np.where(labels[:,0]==0, 10, 1)
863
+ cut_vals = labels[np.cumsum(weights) <= int(0.15 * np.sum(weights))]
864
+ top_four = np.sum(cut_vals[:,0]) / np.sum(labels[:,0])
865
+
866
+ gini = [0,0]
867
+ for i in [1,0]:
868
+ labels = np.transpose(np.array([y_true, y_pred]))
869
+ labels = labels[labels[:, i].argsort()[::-1]]
870
+ weight = np.where(labels[:,0]==0, 10, 1)
871
+ weight_random = np.cumsum(weight / np.sum(weight))
872
+ total_pos = np.sum(labels[:, 0] * weight)
873
+ cum_pos_found = np.cumsum(labels[:, 0] * weight)
874
+ lorentz = cum_pos_found / total_pos
875
+ gini[i] = np.sum((lorentz - weight_random) * weight)
876
+
877
+ return 0.5 * (gini[1]/gini[0] + top_four)
878
+
879
+ def ks(y_real, y_proba):
880
+ from scipy.stats import ks_2samp
881
+ df = pd.DataFrame()
882
+ df['real'] = y_real
883
+ df['proba'] = y_proba
884
+
885
+ # Recover each class
886
+ class0 = df[df['real'] == 0]
887
+ class1 = df[df['real'] == 1]
888
+
889
+ ks_ = ks_2samp(class0['proba'], class1['proba'])
890
+
891
+
892
+ return ks_[0]
893
+
894
+ def expected_calibration_error(y, proba, bins = 'fd'):
895
+ import numpy as np
896
+ bin_count, bin_edges = np.histogram(proba, bins = bins)
897
+ n_bins = len(bin_count)
898
+ bin_edges[0] -= 1e-8 # because left edge is not included
899
+ bin_id = np.digitize(proba, bin_edges, right = True) - 1
900
+ bin_ysum = np.bincount(bin_id, weights = y, minlength = n_bins)
901
+ bin_probasum = np.bincount(bin_id, weights = proba, minlength = n_bins)
902
+ bin_ymean = np.divide(bin_ysum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)
903
+ bin_probamean = np.divide(bin_probasum, bin_count, out = np.zeros(n_bins), where = bin_count > 0)
904
+ ece = np.abs((bin_probamean - bin_ymean) * bin_count).sum() / len(proba)
905
+ return ece
906
+
907
+ def save_csv(type_, name):
908
+ from io import StringIO
909
+ sio = StringIO()
910
+ if type_ == 'dev':
911
+ df = pd.read_csv('https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/train_data/dev_data.csv')
912
+ elif type_ == 'test':
913
+ df = pd.read_csv('https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/test_data/test_data.csv')
914
+ df.to_csv(sio)
915
+ sio.seek(0)
916
+ return pn.widgets.FileDownload(sio, embed=True, filename='%s.csv'%name)
917
+
918
+ def cal_error_metrics_ml():
919
+ global real_test_data_ml, predictions_ml, metrics
920
+
921
+ def all_metrics(y_true, y_test):
922
+ from sklearn.metrics import roc_auc_score
923
+ return {"Amex_Metric": amex_metric_mod(y_true,y_test),
924
+ "KS" : ks(y_true,y_test),
925
+ "Expected Calibration Error": expected_calibration_error(y_true,y_test),
926
+ "AUC": roc_auc_score(y_true, y_test)}
927
+
928
+ try:
929
+ assert len(real_test_data_ml) == len(predictions_ml)
930
+ except Exception as e: # if less than 2 words, return empty result
931
+ return pn.pane.Markdown(f"""ERROR:You didnt upload excatly {len(real_test_data_ml)} predictions rows!!""")
932
+ try:
933
+ metrics = all_metrics(real_test_data_ml["loan_status"].values, predictions_ml[predictions_ml.columns[0]].values)
934
+
935
+ # error_df = pd.DataFrame({"RMSE":[rmse_error]}, index = [str(file_input_ts.filename)])
936
+ error_df = pd.DataFrame({"Metrics_Value":metrics}).T
937
+ error_df.index.name = 'Results'
938
+ except Exception as e:
939
+ return pn.pane.Markdown(f"""{e}""")
940
+
941
+ return pn.widgets.DataFrame(error_df, layout='fit_columns', width=700, height=100, name = 'Score Board')
942
+
943
+
944
+ def get_real_test_labels():
945
+ global real_test_data_ml, predictions_ml
946
+ real_test_data_ml = pd.read_csv(
947
+ 'https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/test_data/test_labels.csv'
948
+ ).dropna()
949
+ if file_input_ml.value is None:
950
+ predictions_ml = pd.DataFrame({'loan_status': np.random.choice([0,1], size = len(real_test_data_ml), p = [0.87,0.13])})
951
+ else:
952
+ predictions_ml = BytesIO()
953
+ predictions_ml.write(file_input_ml.value)
954
+ predictions_ml.seek(0)
955
+ print(file_input_ml.filename)
956
+ try:
957
+ predictions_ml = pd.read_csv(predictions_ml, error_bad_lines=False).dropna()#.set_index("id")
958
+ except:
959
+ predictions_ml = pd.read_csv(predictions_ml, error_bad_lines=False).dropna()
960
+ if len(predictions_ml.columns) > 1:
961
+ predictions_ml = predictions_ml[[predictions_ml.columns[-1]]]
962
+ predictions_ml = predictions_ml._get_numeric_data()
963
+ # predictions[predictions < 0] = 0 #predictions cant be hegative for solar energy prediction task
964
+ # New_Refit_routing = New_Refit_routing[[cols for cols in New_Refit_routing.columns if New_Refit_routing[cols].nunique() >= 2]] #remove columns with less then 2 unique values
965
+ # return predictions
966
+
967
+ # def github_cred():
968
+ # # from github import Github
969
+ # repo_name = 'firobeid/TimeSeriesCompetitionTracker'
970
+ # # using an access token
971
+ # g = Github(os.getenv('GITHUB_TOKEN'))
972
+ # return g.get_repo(repo_name)
973
+
974
+ # def leaderboard_ts():
975
+ # global file_on_github
976
+ # # repo_name = 'firobeid/TimeSeriesCompetitionTracker'
977
+ # # # using an access token
978
+ # # g = Github(os.getenv('GITHUB_TOKEN'))
979
+ # # # Create Github linkage Instance
980
+ # # g = github_cred()
981
+ # # if prediction_submission_name.value == 'Firas_Prediction_v1':
982
+ # repo = github_cred()
983
+ # contents = repo.get_contents("")
984
+ # competitior_rank_file = 'leadership_board_ts.csv'
985
+ # if competitior_rank_file not in [i.path for i in contents]:
986
+ # print("Creatine leaderboard file...")
987
+ # repo.create_file(competitior_rank_file, "creating timeseries leaderboard", "Competitor_Submission, RMSE", branch="main")
988
+ # file_on_github = pd.read_csv("https://raw.githubusercontent.com/firobeid/TimeSeriesCompetitionTracker/main/leadership_board_ts.csv", delim_whitespace=" ")
989
+
990
+ # def upload_scores():
991
+ # global rmse_error, sub_name, file_on_github
992
+ # competitior_rank_file = 'leadership_board_ts.csv'
993
+ # repo = github_cred()
994
+ # submission = sub_name
995
+ # score = rmse_error
996
+ # leaderboard_ts()
997
+ # file_on_github.loc[len(file_on_github.index)] = [submission, score]
998
+
999
+ # target_content = repo.get_contents(competitior_rank_file)
1000
+ # repo.update_file(competitior_rank_file, "Uploading scores for %s"%sub_name, file_on_github.to_string(index=False), target_content.sha, branch="main")
1001
+ # return pn.pane.Markdown("""Successfully Uploaded to Leaderboard!""")
1002
+
1003
+ # def final_github():
1004
+ # global sub_name
1005
+ # global real_test_data_ml, predictions_ml, metrics
1006
+ # sub_name = str(prediction_submission_name.value.replace("\n", "").replace(" ", ""))
1007
+ # print(sub_name)
1008
+ # if 'rmse_error' not in globals(): #not to allow saving rmse everytime site is reoaded
1009
+ # return pn.widgets.DataFrame(file_on_github.sort_values(by = 'RMSE',ascending=True).set_index('Competitor_Submission'), width=600, height=1000, name = 'Leader Board')
1010
+
1011
+ # else:
1012
+ # try:
1013
+ # if sub_name != 'Firas_Prediction_v1': #not to allow saving rmse everytime site is reoaded also
1014
+ # upload_scores()
1015
+ # except Exception as e:
1016
+ # return pn.pane.Markdown(f"""{e}""")
1017
+ # file_on_github["Rank"] = file_on_github.rank(method = "min")["RMSE"]
1018
+ # return pn.widgets.DataFrame(file_on_github.sort_values(by = 'RMSE',ascending=True).set_index('Rank'), width=600, height=1000, name = 'Leader Board')
1019
+
1020
+ # run_github_upload = pn.widgets.Button(name="Click to Upload Results to Leaderscore Board!")
1021
+ # prediction_submission_name = pn.widgets.TextAreaInput(value="Firas_Prediction_v1", height=100, name='Change the name of submission below:')
1022
+ # widgets_submission = pn.WidgetBox(
1023
+ # pn.panel("""# Submit to LeaderBoard Ranking""", margin=(0, 10)),
1024
+ # pn.panel('* Change Submision Name Below to your own version and team name (no spaces in between)', margin=(0, 10)),
1025
+ # prediction_submission_name,
1026
+ # # run_github_upload,
1027
+ # pn.pane.Alert("""## Leader Ranking Board""", alert_type="success",),
1028
+ # width = 500
1029
+ # )
1030
+ # def update_submission_widget(event):
1031
+ # global sub_name
1032
+ # prediction_submission_name.value = event.new
1033
+ # sub_name = str(prediction_submission_name.value.replace("\n", "").replace(" ", ""))
1034
+ # print(sub_name)
1035
+ # # when prediction_submission_name changes,
1036
+ # # run this function to global variable sub_name
1037
+ # prediction_submission_name.param.watch(update_submission_widget, "value")
1038
+
1039
+ # @pn.depends(run_github_upload.param.clicks)
1040
+ # def ts_competition_submission(_):
1041
+ # leaderboard_ts()
1042
+ # return pn.Column(final_github)
1043
+
1044
+
1045
+ run_button_ml = pn.widgets.Button(name="Click to get model scores!")
1046
+ file_input_ml = pn.widgets.FileInput(align='center')
1047
+ text_ml = """
1048
+ # Lending Club Prediction Competition
1049
+
1050
+ This section is to host an ML classification competition between UCBerkely student teams'. The teams/individuals should
1051
+ build classification models to predict the `loan_status` column on test data that I set there respective true labels aside.
1052
+
1053
+ This modelling competition coins a full ML model building on a lending club dataset. I downsampled the original
1054
+ development sample from 2million+ rows to 200k+ rows for practicality. The downsampling preserved the target
1055
+ variable distribution as it was done to control the following columns:
1056
+
1057
+ `["addr_state", "issue_d", "zip_code", "grade", "sub_grade", "term"]`
1058
+
1059
+ The test set,which is 20863 rows is to be passed for predictions on the students champion models. The students check there predictions as csv upload
1060
+ over here, to get several metric scores (all metrics below if higher are better except for expected_calibration_error 'lower is better).
1061
+
1062
+ The competition data used for the modelling can be found here(right click and copy the link):
1063
+
1064
+ ### Download the Train and Test Data'
1065
+
1066
+ * [Starter Code](https://github.com/firobeid/firobeid.github.io/blob/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/Start_Code/UCBerkeley_LendingClubData.ipynb)
1067
+ * [Competition Development Data](https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/train_data/dev_data.csv)
1068
+ * [Test Unlabled Data](https://raw.githubusercontent.com/firobeid/firobeid.github.io/main/docs/compose-plots/Resources/ML_lectures/ML_Competition/test_data/test_data.csv)
1069
+
1070
+ To access the data locally, copy either off the last two hyperlinks and paste them as follows:
1071
+
1072
+ ```
1073
+ import pandas as pd
1074
+ df = pd.read_csv('https://raw.githubusercontent.com/...')
1075
+ ```
1076
+ ### Instructions
1077
+ 1. Upload predictions CSV (only numerical data)
1078
+ 2. Make sure you have 20863 predictions / row in your CSV and only one column
1079
+ 3. Press `Click to get model error/score!`
1080
+ 4. Observe you predictions error under yellow box bellow
1081
+ 5. If satisfied send me your predictions.csv privaetly in slack!
1082
+
1083
+ `My code takes care of pulling your error and storing it on GitHub to be ranked against incoming scores from teams`
1084
+ """
1085
+ widgets_ml = pn.WidgetBox(
1086
+ pn.panel(text_ml, margin=(0, 20)),
1087
+ # pn.Row(save_csv('dev', 'development_data'),save_csv('test', 'test_data'), width = 500),
1088
+ pn.panel('Upload Prediction CSV', margin=(0, 10)),
1089
+ file_input_ml,
1090
+ run_button_ml,
1091
+ pn.pane.Alert("### Prediction Results Will Refresh Below After Clicking above", alert_type="warning")
1092
+ , width = 700
1093
+ )
1094
+
1095
+ def update_target_ml(event):
1096
+ get_real_test_labels()
1097
+
1098
+ file_input_ml.param.watch(update_target_ml, 'value')
1099
+
1100
+ @pn.depends(run_button_ml.param.clicks)
1101
+ def ml_competition(_):
1102
+ get_real_test_labels()
1103
+ return pn.Column(cal_error_metrics_ml)
1104
+
1105
+
1106
+ #########
1107
+ ##FINAL##
1108
+ #########
1109
+ tabs = pn.Tabs(
1110
+ ("Welcome", pn.Column(welcome, image)
1111
+ ),
1112
+ ("Pythonic Text Munging",pn.Tabs(("Title",pn.Column(pn.Row(title_0))),
1113
+ ("Coding Competition", pn.Row(python_intro,pn.layout.Spacer(width=20), pn.Column(py_widgets_submission, python_competition_submission)))
1114
+ )
1115
+
1116
+
1117
+ ),
1118
+ ("DataViz",pn.Tabs(("Title",pn.Column(pn.Row(title1),hvplot_snip)),
1119
+ ("total_payments_by_state", pn.Row(plot1)),
1120
+ ("sorted_total_payments_by_state", pn.Row(plot2)),
1121
+ ("Tab1 + Tab2", pn.Column(plot3,width=960)),
1122
+ ("sorted_total_medicare_by_state", pn.Row(plot4,plot5, plot6, width=2000))
1123
+ )
1124
+ ),
1125
+ ("Zen of ML", pn.Tabs(("Title",pn.Row(title2,gif_pane, pn.Column(motivational,progress_))),
1126
+ ('Lets Get Things Straight',pn.Column(ml_slider, ml_output)),
1127
+ ('Data Considerations!!',pn.Column(data_slider, data_output)),
1128
+ ('Unsupervised Learning (Clustering)', pn.Row(pn.Column(clustering_slider, cluster_output),k_means_simple)),
1129
+ ("TimeSeries Forecasting",pn.Row(timeseries_libs,pn.Column(ts_gif, ts_cv),timeseries_data_split)),
1130
+ ("General ML Algorithms' Survey", pn.Row(pn.Column(general_ml_slider, general_ml_output),ML_algoes, pn.Column(knn_scratch, ML_metrics, prec_recall))),
1131
+ # ('TimeSeries Competition Error Metric',pn.Row(pn.Column(widgets_ts, ts_competition, reward), pn.layout.Spacer(width=20), pn.layout.Spacer(width=20), pn.Column(pn.pane.Markdown("### Other Metrics Can Be Used:"),other_metrics))),
1132
+ ('TimeSeries Competition Error Metric',pn.Row(pn.Column(widgets_ts, ts_competition, reward), pn.layout.Spacer(width=20), pn.Column(widgets_submission, ts_competition_submission), pn.layout.Spacer(width=20), pn.Column(pn.pane.Markdown("### Other Metrics Can Be Used:"),other_metrics))),
1133
+ ('ML Classification Competition',pn.Row(pn.Column(widgets_ml, ml_competition, reward), pn.layout.Spacer(width=30), pn.layout.Spacer(width=20), pn.Column(pn.pane.Markdown("### Keep this in mind:"),ML_quote))),
1134
+ ('Neural Netwroks Visit',pn.Row(pn.Column(dl_slider, dl_output), DL_tips))
1135
+ )
1136
+ )
1137
+ )
1138
+
1139
+
1140
+ audio = pn.pane.Audio('http://ccrma.stanford.edu/~jos/mp3/pno-cs.mp3', name='Audio')
1141
+ pn.Column(pn.Row(title), tabs, pn.Row(pn.pane.Alert("Enjoy some background classic", alert_type="success"),audio), ).servable(target='main')