Skip to content

Commit 50ed043

Browse files
committed
boilerplate + scaffolding
0 parents  commit 50ed043

35 files changed

+1014
-0
lines changed

.dvc/.gitignore

+9
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,9 @@
1+
/config.local
2+
/updater
3+
/lock
4+
/updater.lock
5+
/tmp
6+
/state-journal
7+
/state-wal
8+
/state
9+
/cache

.dvc/config

Whitespace-only changes.

.gitignore

+89
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,89 @@
1+
# Byte-compiled / optimized / DLL files
2+
__pycache__/
3+
*.py[cod]
4+
5+
# C extensions
6+
*.so
7+
8+
# Distribution / packaging
9+
.Python
10+
env/
11+
build/
12+
develop-eggs/
13+
dist/
14+
downloads/
15+
eggs/
16+
.eggs/
17+
lib/
18+
lib64/
19+
parts/
20+
sdist/
21+
var/
22+
*.egg-info/
23+
.installed.cfg
24+
*.egg
25+
26+
# PyInstaller
27+
# Usually these files are written by a python script from a template
28+
# before PyInstaller builds the exe, so as to inject date/other infos into it.
29+
*.manifest
30+
*.spec
31+
32+
# Installer logs
33+
pip-log.txt
34+
pip-delete-this-directory.txt
35+
36+
# Unit test / coverage reports
37+
htmlcov/
38+
.tox/
39+
.coverage
40+
.coverage.*
41+
.cache
42+
nosetests.xml
43+
coverage.xml
44+
*.cover
45+
46+
# Translations
47+
*.mo
48+
*.pot
49+
50+
# Django stuff:
51+
*.log
52+
53+
# Sphinx documentation
54+
docs/_build/
55+
56+
# PyBuilder
57+
target/
58+
59+
# DotEnv configuration
60+
.env
61+
62+
# Database
63+
*.db
64+
*.rdb
65+
66+
# Pycharm
67+
.idea
68+
69+
# VS Code
70+
.vscode/
71+
72+
# Spyder
73+
.spyproject/
74+
75+
# Jupyter NB Checkpoints
76+
.ipynb_checkpoints/
77+
78+
# exclude data from source control by default
79+
/data/
80+
81+
# Mac OS-specific storage files
82+
.DS_Store
83+
84+
# vim
85+
*.swp
86+
*.swo
87+
88+
# Mypy cache
89+
.mypy_cache/

LICENSE

+10
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,10 @@
1+
2+
The MIT License (MIT)
3+
Copyright (c) 2020, Thomas Tu
4+
5+
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
6+
7+
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
8+
9+
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
10+

Makefile

+144
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,144 @@
1+
.PHONY: clean data lint requirements sync_data_to_s3 sync_data_from_s3
2+
3+
#################################################################################
4+
# GLOBALS #
5+
#################################################################################
6+
7+
PROJECT_DIR := $(shell dirname $(realpath $(lastword $(MAKEFILE_LIST))))
8+
BUCKET = [OPTIONAL] your-bucket-for-syncing-data (do not include 's3://')
9+
PROFILE = default
10+
PROJECT_NAME = CaReCur
11+
PYTHON_INTERPRETER = python3
12+
13+
ifeq (,$(shell which conda))
14+
HAS_CONDA=False
15+
else
16+
HAS_CONDA=True
17+
endif
18+
19+
#################################################################################
20+
# COMMANDS #
21+
#################################################################################
22+
23+
## Install Python Dependencies
24+
requirements: test_environment
25+
$(PYTHON_INTERPRETER) -m pip install -U pip setuptools wheel
26+
$(PYTHON_INTERPRETER) -m pip install -r requirements.txt
27+
28+
## Make Dataset
29+
data: requirements
30+
$(PYTHON_INTERPRETER) src/data/make_dataset.py data/raw data/processed
31+
32+
## Delete all compiled Python files
33+
clean:
34+
find . -type f -name "*.py[co]" -delete
35+
find . -type d -name "__pycache__" -delete
36+
37+
## Lint using flake8
38+
lint:
39+
flake8 src
40+
41+
## Upload Data to S3
42+
sync_data_to_s3:
43+
ifeq (default,$(PROFILE))
44+
aws s3 sync data/ s3://$(BUCKET)/data/
45+
else
46+
aws s3 sync data/ s3://$(BUCKET)/data/ --profile $(PROFILE)
47+
endif
48+
49+
## Download Data from S3
50+
sync_data_from_s3:
51+
ifeq (default,$(PROFILE))
52+
aws s3 sync s3://$(BUCKET)/data/ data/
53+
else
54+
aws s3 sync s3://$(BUCKET)/data/ data/ --profile $(PROFILE)
55+
endif
56+
57+
## Set up python interpreter environment
58+
create_environment:
59+
ifeq (True,$(HAS_CONDA))
60+
@echo ">>> Detected conda, creating conda environment."
61+
ifeq (3,$(findstring 3,$(PYTHON_INTERPRETER)))
62+
conda create --name $(PROJECT_NAME) python=3
63+
else
64+
conda create --name $(PROJECT_NAME) python=2.7
65+
endif
66+
@echo ">>> New conda env created. Activate with:\nsource activate $(PROJECT_NAME)"
67+
else
68+
$(PYTHON_INTERPRETER) -m pip install -q virtualenv virtualenvwrapper
69+
@echo ">>> Installing virtualenvwrapper if not already installed.\nMake sure the following lines are in shell startup file\n\
70+
export WORKON_HOME=$$HOME/.virtualenvs\nexport PROJECT_HOME=$$HOME/Devel\nsource /usr/local/bin/virtualenvwrapper.sh\n"
71+
@bash -c "source `which virtualenvwrapper.sh`;mkvirtualenv $(PROJECT_NAME) --python=$(PYTHON_INTERPRETER)"
72+
@echo ">>> New virtualenv created. Activate with:\nworkon $(PROJECT_NAME)"
73+
endif
74+
75+
## Test python environment is setup correctly
76+
test_environment:
77+
$(PYTHON_INTERPRETER) test_environment.py
78+
79+
#################################################################################
80+
# PROJECT RULES #
81+
#################################################################################
82+
83+
84+
85+
#################################################################################
86+
# Self Documenting Commands #
87+
#################################################################################
88+
89+
.DEFAULT_GOAL := help
90+
91+
# Inspired by <http://marmelab.com/blog/2016/02/29/auto-documented-makefile.html>
92+
# sed script explained:
93+
# /^##/:
94+
# * save line in hold space
95+
# * purge line
96+
# * Loop:
97+
# * append newline + line to hold space
98+
# * go to next line
99+
# * if line starts with doc comment, strip comment character off and loop
100+
# * remove target prerequisites
101+
# * append hold space (+ newline) to line
102+
# * replace newline plus comments by `---`
103+
# * print line
104+
# Separate expressions are necessary because labels cannot be delimited by
105+
# semicolon; see <http://stackoverflow.com/a/11799865/1968>
106+
.PHONY: help
107+
help:
108+
@echo "$$(tput bold)Available rules:$$(tput sgr0)"
109+
@echo
110+
@sed -n -e "/^## / { \
111+
h; \
112+
s/.*//; \
113+
:doc" \
114+
-e "H; \
115+
n; \
116+
s/^## //; \
117+
t doc" \
118+
-e "s/:.*//; \
119+
G; \
120+
s/\\n## /---/; \
121+
s/\\n/ /g; \
122+
p; \
123+
}" ${MAKEFILE_LIST} \
124+
| LC_ALL='C' sort --ignore-case \
125+
| awk -F '---' \
126+
-v ncol=$$(tput cols) \
127+
-v indent=19 \
128+
-v col_on="$$(tput setaf 6)" \
129+
-v col_off="$$(tput sgr0)" \
130+
'{ \
131+
printf "%s%*s%s ", col_on, -indent, $$1, col_off; \
132+
n = split($$2, words, " "); \
133+
line_length = ncol - indent; \
134+
for (i = 1; i <= n; i++) { \
135+
line_length -= length(words[i]) + 1; \
136+
if (line_length <= 0) { \
137+
line_length = ncol - indent - length(words[i]) - 1; \
138+
printf "\n%*s ", -indent, " "; \
139+
} \
140+
printf "%s ", words[i]; \
141+
} \
142+
printf "\n"; \
143+
}' \
144+
| more $(shell test $(shell uname) = Darwin && echo '--no-init --raw-control-chars')

README.md

+57
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,57 @@
1+
carecur
2+
==============================
3+
4+
California Renewables Curtailment project.
5+
6+
Project Organization
7+
------------
8+
9+
├── LICENSE
10+
├── Makefile <- Makefile with commands like `make data` or `make train`
11+
├── README.md <- The top-level README for developers using this project.
12+
├── data
13+
│   ├── external <- Data from third party sources.
14+
│   ├── interim <- Intermediate data that has been transformed.
15+
│   ├── processed <- The final, canonical data sets for modeling.
16+
│   └── raw <- The original, immutable data dump.
17+
18+
├── docs <- A default Sphinx project; see sphinx-doc.org for details
19+
20+
├── models <- Trained and serialized models, model predictions, or model summaries
21+
22+
├── notebooks <- Jupyter notebooks. Naming convention is a number (for ordering),
23+
│ the creator's initials, and a short `-` delimited description, e.g.
24+
│ `1.0-jqp-initial-data-exploration`.
25+
26+
├── references <- Data dictionaries, manuals, and all other explanatory materials.
27+
28+
├── reports <- Generated analysis as HTML, PDF, LaTeX, etc.
29+
│   └── figures <- Generated graphics and figures to be used in reporting
30+
31+
├── requirements.txt <- The requirements file for reproducing the analysis environment, e.g.
32+
│ generated with `pip freeze > requirements.txt`
33+
34+
├── setup.py <- makes project pip installable (pip install -e .) so src can be imported
35+
├── src <- Source code for use in this project.
36+
│   ├── __init__.py <- Makes src a Python module
37+
│ │
38+
│   ├── data <- Scripts to download or generate data
39+
│   │   └── make_dataset.py
40+
│ │
41+
│   ├── features <- Scripts to turn raw data into features for modeling
42+
│   │   └── build_features.py
43+
│ │
44+
│   ├── models <- Scripts to train models and then use trained models to make
45+
│ │ │ predictions
46+
│   │   ├── predict_model.py
47+
│   │   └── train_model.py
48+
│ │
49+
│   └── visualization <- Scripts to create exploratory and results oriented visualizations
50+
│   └── visualize.py
51+
52+
└── tox.ini <- tox file with settings for running tox; see tox.testrun.org
53+
54+
55+
--------
56+
57+
<p><small>Project based on the <a target="_blank" href="https://drivendata.github.io/cookiecutter-data-science/">cookiecutter data science project template</a>. #cookiecutterdatascience</small></p>

0 commit comments

Comments
 (0)