Skip to content

Commit b76cb5a

Browse files
committed
code refactor
upgrading to python 3.6.6
1 parent c20a59a commit b76cb5a

15 files changed

+1730
-323
lines changed

models/7SEF/7S_p2_clusters_Autumn.csv

Lines changed: 1047 additions & 0 deletions
Large diffs are not rendered by default.

models/7SEF/7S_p2_clusters_Autumn_v99.csv

Lines changed: 262 additions & 262 deletions
Large diffs are not rendered by default.

notebooks/7SEF/1.0-imad-automatic-script.ipynb

Lines changed: 4 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -214,7 +214,7 @@
214214
},
215215
{
216216
"cell_type": "code",
217-
"execution_count": 25,
217+
"execution_count": null,
218218
"metadata": {},
219219
"outputs": [],
220220
"source": [
@@ -224,9 +224,9 @@
224224
],
225225
"metadata": {
226226
"kernelspec": {
227-
"display_name": "Python (dev_py34)",
227+
"display_name": "Python (dev_py36)",
228228
"language": "python",
229-
"name": "dev_py34"
229+
"name": "dev_py36"
230230
},
231231
"language_info": {
232232
"codemirror_mode": {
@@ -238,7 +238,7 @@
238238
"name": "python",
239239
"nbconvert_exporter": "python",
240240
"pygments_lexer": "ipython3",
241-
"version": "3.4.5"
241+
"version": "3.6.6"
242242
}
243243
},
244244
"nbformat": 4,

notebooks/7SEF/Untitled.ipynb

Lines changed: 343 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,343 @@
1+
{
2+
"cells": [
3+
{
4+
"cell_type": "code",
5+
"execution_count": 52,
6+
"metadata": {},
7+
"outputs": [],
8+
"source": [
9+
"import numpy as np\n",
10+
"import pandas as pd\n",
11+
"import math\n",
12+
"import statsmodels.api as sm\n",
13+
"from scipy.stats import chisquare\n",
14+
"\n",
15+
"\n",
16+
"import settings\n",
17+
"import itertools\n",
18+
"from sklearn.preprocessing import (LabelBinarizer, LabelEncoder, MinMaxScaler,\n",
19+
" OneHotEncoder, StandardScaler, RobustScaler)\n",
20+
"\n",
21+
"\n",
22+
"\n",
23+
"\n",
24+
"def get_encoders(le_name,ohe_name,scaler_name):\n",
25+
" le_encoder = np.load(settings.models_path + le_name + '.npy').item()\n",
26+
" ohe_encoder = np.load(settings.models_path + ohe_name + '.npy').item()\n",
27+
" scaler = np.load(settings.models_path + scaler_name + '.npy').item()\n",
28+
"\n",
29+
" return le_encoder,ohe_encoder,scaler\n",
30+
"\n",
31+
"\n",
32+
"def create_encoder(df, le_name = None, ohe_name = None, scaler_name=None, categorical_features=None, numeric_features=None):\n",
33+
" \"\"\"Creates and stores a categorical encoder of a given dataframe\n",
34+
" \n",
35+
" Arguments:\n",
36+
" df {Dataframe} -- The Pandas Dataframe to encode\n",
37+
" \n",
38+
" Keyword Arguments:\n",
39+
" categorical_features {list} -- The list of categorical features to consider (default: {None})\n",
40+
" numeric_features {list} -- The list of non categorical features to ignore (default: {None})\n",
41+
" \n",
42+
" Returns:\n",
43+
" tuple(dict,dict,OneHotEncoder) -- Return the encoders used in every columns as a dictionnary\n",
44+
" \"\"\"\n",
45+
"\n",
46+
"\n",
47+
" if (categorical_features is None):\n",
48+
" categorical_features = sorted(df.drop(numeric_features,axis=1).columns)\n",
49+
" le_dict = {}\n",
50+
" ohe_dict = {}\n",
51+
" scalers = {}\n",
52+
" for index, col in df[categorical_features].sort_index(axis=1).iteritems():\n",
53+
" if (numeric_features is not None) and (index in numeric_features):\n",
54+
" continue\n",
55+
" if index not in categorical_features:\n",
56+
" continue\n",
57+
" le = LabelEncoder().fit(col)\n",
58+
" le_dict[index] = le\n",
59+
" ohe = OneHotEncoder(categories=\"auto\").fit(le.transform(col).reshape((-1, 1)))\n",
60+
" ohe_dict[index] = ohe\n",
61+
"\n",
62+
" labeled_df = df[categorical_features].sort_index(axis=1).apply(lambda x: le_dict[x.name].transform(x))\n",
63+
" ohe_encoder = OneHotEncoder(categories=\"auto\").fit(labeled_df)\n",
64+
"\n",
65+
" # add numeric features\n",
66+
" if len(numeric_features)==0:\n",
67+
" numeric_features = (list(df.columns.to_series().groupby(df.dtypes).groups[np.dtype('float64')]))\n",
68+
" for f in numeric_features:\n",
69+
" values = df[[f]].values\n",
70+
" scaler = MinMaxScaler().fit(values)\n",
71+
" scalers[f] = scaler\n",
72+
"\n",
73+
"\n",
74+
" # if le_name is not None:\n",
75+
" # np.save(settings.models_path + le_name + '.npy', le_dict)\n",
76+
" # if ohe_name is not None:\n",
77+
" # np.save(settings.models_path + ohe_name + '.npy', ohe_encoder)\n",
78+
" # if scaler_name is not None:\n",
79+
" # np.save(settings.models_path + scaler_name + '.npy', scalers)\n",
80+
" \n",
81+
" return labeled_df, le_dict, ohe_encoder, scalers, categorical_features, numeric_features\n",
82+
" \n"
83+
]
84+
},
85+
{
86+
"cell_type": "code",
87+
"execution_count": 66,
88+
"metadata": {},
89+
"outputs": [
90+
{
91+
"data": {
92+
"text/html": [
93+
"<div>\n",
94+
"<style scoped>\n",
95+
" .dataframe tbody tr th:only-of-type {\n",
96+
" vertical-align: middle;\n",
97+
" }\n",
98+
"\n",
99+
" .dataframe tbody tr th {\n",
100+
" vertical-align: top;\n",
101+
" }\n",
102+
"\n",
103+
" .dataframe thead th {\n",
104+
" text-align: right;\n",
105+
" }\n",
106+
"</style>\n",
107+
"<table border=\"1\" class=\"dataframe\">\n",
108+
" <thead>\n",
109+
" <tr style=\"text-align: right;\">\n",
110+
" <th></th>\n",
111+
" <th>Color</th>\n",
112+
" <th>Size</th>\n",
113+
" <th>Ldate</th>\n",
114+
" <th>Age Group</th>\n",
115+
" <th>Person</th>\n",
116+
" <th>Pname</th>\n",
117+
" <th>Ptype</th>\n",
118+
" <th>Tprice</th>\n",
119+
" <th>Currency</th>\n",
120+
" <th>Sales Season</th>\n",
121+
" <th>s1</th>\n",
122+
" <th>s2</th>\n",
123+
" <th>s3</th>\n",
124+
" <th>s4</th>\n",
125+
" <th>s5</th>\n",
126+
" </tr>\n",
127+
" <tr>\n",
128+
" <th>Product</th>\n",
129+
" <th></th>\n",
130+
" <th></th>\n",
131+
" <th></th>\n",
132+
" <th></th>\n",
133+
" <th></th>\n",
134+
" <th></th>\n",
135+
" <th></th>\n",
136+
" <th></th>\n",
137+
" <th></th>\n",
138+
" <th></th>\n",
139+
" <th></th>\n",
140+
" <th></th>\n",
141+
" <th></th>\n",
142+
" <th></th>\n",
143+
" <th></th>\n",
144+
" </tr>\n",
145+
" </thead>\n",
146+
" <tbody>\n",
147+
" <tr>\n",
148+
" <th>3E+101_2</th>\n",
149+
" <td>Blue</td>\n",
150+
" <td>Thick</td>\n",
151+
" <td>45</td>\n",
152+
" <td>4-6</td>\n",
153+
" <td>Girls</td>\n",
154+
" <td>One Internal Pants</td>\n",
155+
" <td>Thick</td>\n",
156+
" <td>39.0</td>\n",
157+
" <td>$</td>\n",
158+
" <td>Winter</td>\n",
159+
" <td>101.0</td>\n",
160+
" <td>261.0</td>\n",
161+
" <td>309.0</td>\n",
162+
" <td>297.0</td>\n",
163+
" <td>323.0</td>\n",
164+
" </tr>\n",
165+
" <tr>\n",
166+
" <th>3E+201_2</th>\n",
167+
" <td>Red</td>\n",
168+
" <td>Thick</td>\n",
169+
" <td>45</td>\n",
170+
" <td>4-6</td>\n",
171+
" <td>Girls</td>\n",
172+
" <td>One Internal Pants</td>\n",
173+
" <td>Thick</td>\n",
174+
" <td>39.0</td>\n",
175+
" <td>$</td>\n",
176+
" <td>Winter</td>\n",
177+
" <td>81.0</td>\n",
178+
" <td>266.0</td>\n",
179+
" <td>297.0</td>\n",
180+
" <td>270.0</td>\n",
181+
" <td>257.0</td>\n",
182+
" </tr>\n",
183+
" <tr>\n",
184+
" <th>3E+301_2</th>\n",
185+
" <td>Blue</td>\n",
186+
" <td>Thick</td>\n",
187+
" <td>45</td>\n",
188+
" <td>4-6</td>\n",
189+
" <td>Girls</td>\n",
190+
" <td>One Internal Pants</td>\n",
191+
" <td>Thick</td>\n",
192+
" <td>39.0</td>\n",
193+
" <td>$</td>\n",
194+
" <td>Winter</td>\n",
195+
" <td>49.0</td>\n",
196+
" <td>179.0</td>\n",
197+
" <td>190.0</td>\n",
198+
" <td>192.0</td>\n",
199+
" <td>179.0</td>\n",
200+
" </tr>\n",
201+
" <tr>\n",
202+
" <th>30E000400_2</th>\n",
203+
" <td>Black</td>\n",
204+
" <td>Thick</td>\n",
205+
" <td>45</td>\n",
206+
" <td>4-6</td>\n",
207+
" <td>Girls</td>\n",
208+
" <td>One Internal Pants</td>\n",
209+
" <td>Thick</td>\n",
210+
" <td>39.0</td>\n",
211+
" <td>$</td>\n",
212+
" <td>Winter</td>\n",
213+
" <td>55.0</td>\n",
214+
" <td>222.0</td>\n",
215+
" <td>261.0</td>\n",
216+
" <td>275.0</td>\n",
217+
" <td>279.0</td>\n",
218+
" </tr>\n",
219+
" <tr>\n",
220+
" <th>30E823101_2</th>\n",
221+
" <td>Grey</td>\n",
222+
" <td>No Size</td>\n",
223+
" <td>39</td>\n",
224+
" <td>4-6</td>\n",
225+
" <td>Girls</td>\n",
226+
" <td>One Internal Pants</td>\n",
227+
" <td>Thick</td>\n",
228+
" <td>39.0</td>\n",
229+
" <td>$</td>\n",
230+
" <td>Winter</td>\n",
231+
" <td>3.0</td>\n",
232+
" <td>15.0</td>\n",
233+
" <td>18.0</td>\n",
234+
" <td>30.0</td>\n",
235+
" <td>30.0</td>\n",
236+
" </tr>\n",
237+
" </tbody>\n",
238+
"</table>\n",
239+
"</div>"
240+
],
241+
"text/plain": [
242+
" Color Size Ldate Age Group Person Pname \\\n",
243+
"Product \n",
244+
"3E+101_2 Blue Thick 45 4-6 Girls One Internal Pants \n",
245+
"3E+201_2 Red Thick 45 4-6 Girls One Internal Pants \n",
246+
"3E+301_2 Blue Thick 45 4-6 Girls One Internal Pants \n",
247+
"30E000400_2 Black Thick 45 4-6 Girls One Internal Pants \n",
248+
"30E823101_2 Grey No Size 39 4-6 Girls One Internal Pants \n",
249+
"\n",
250+
" Ptype Tprice Currency Sales Season s1 s2 s3 s4 \\\n",
251+
"Product \n",
252+
"3E+101_2 Thick 39.0 $ Winter 101.0 261.0 309.0 297.0 \n",
253+
"3E+201_2 Thick 39.0 $ Winter 81.0 266.0 297.0 270.0 \n",
254+
"3E+301_2 Thick 39.0 $ Winter 49.0 179.0 190.0 192.0 \n",
255+
"30E000400_2 Thick 39.0 $ Winter 55.0 222.0 261.0 275.0 \n",
256+
"30E823101_2 Thick 39.0 $ Winter 3.0 15.0 18.0 30.0 \n",
257+
"\n",
258+
" s5 \n",
259+
"Product \n",
260+
"3E+101_2 323.0 \n",
261+
"3E+201_2 257.0 \n",
262+
"3E+301_2 179.0 \n",
263+
"30E000400_2 279.0 \n",
264+
"30E823101_2 30.0 "
265+
]
266+
},
267+
"execution_count": 66,
268+
"metadata": {},
269+
"output_type": "execute_result"
270+
}
271+
],
272+
"source": [
273+
"from data.preprocessing import load_file\n",
274+
"\n",
275+
"df = load_file(\"clf_features\", type_=\"P\", index = [\"Product\"])\n",
276+
"\n",
277+
"categorical_features = [\"Color\",\"Size\",\"Age Group\",\"Ldate\",\"Person\",\"Pname\",\"Ptype\",\"Currency\",\"Sales Season\"]\n",
278+
"numeric_features = [\"Tprice\",\"s1\",\"s2\",\"s3\",\"s4\",\"s5\"]\n",
279+
"df.head()"
280+
]
281+
},
282+
{
283+
"cell_type": "code",
284+
"execution_count": 68,
285+
"metadata": {},
286+
"outputs": [
287+
{
288+
"name": "stdout",
289+
"output_type": "stream",
290+
"text": [
291+
"23.3 ms ± 3.11 ms per loop (mean ± std. dev. of 7 runs, 10 loops each)\n"
292+
]
293+
}
294+
],
295+
"source": [
296+
"%timeit labeled_df, le_dict, ohe_encoder, scalers, categorical_features, num_features = create_encoder(df, numeric_features=numeric_features)"
297+
]
298+
},
299+
{
300+
"cell_type": "code",
301+
"execution_count": 76,
302+
"metadata": {},
303+
"outputs": [
304+
{
305+
"data": {
306+
"text/plain": [
307+
"numpy.ndarray"
308+
]
309+
},
310+
"execution_count": 76,
311+
"metadata": {},
312+
"output_type": "execute_result"
313+
}
314+
],
315+
"source": [
316+
"t= np.zeros((1,1))\n",
317+
"\n",
318+
"type(t)"
319+
]
320+
}
321+
],
322+
"metadata": {
323+
"kernelspec": {
324+
"display_name": "Python (dev_py36)",
325+
"language": "python",
326+
"name": "dev_py36"
327+
},
328+
"language_info": {
329+
"codemirror_mode": {
330+
"name": "ipython",
331+
"version": 3
332+
},
333+
"file_extension": ".py",
334+
"mimetype": "text/x-python",
335+
"name": "python",
336+
"nbconvert_exporter": "python",
337+
"pygments_lexer": "ipython3",
338+
"version": "3.6.6"
339+
}
340+
},
341+
"nbformat": 4,
342+
"nbformat_minor": 2
343+
}

src/app.py

Lines changed: 5 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,5 @@
11
from flask import Flask, jsonify
2+
from flask_api import status
23
import random
34

45
from data import import_data
@@ -7,9 +8,10 @@
78

89
@app.route("/api/train")
910
def train_model():
10-
import_data.import_data()
11-
return jsonify("model training"), 404
12-
11+
if (import_data.import_data()):
12+
return jsonify("model training"), status.HTTP_200_OK
13+
else:
14+
return "The files are not ready to launch the training", status.HTTP_204_NO_CONTENT
1315

1416

1517

0 commit comments

Comments
 (0)