Skip to content

Commit c5117a7

Browse files
Merge pull request #2 from christianversloot/add-testing-action
Add automated testing to repository
2 parents 6c023e2 + 5c23e55 commit c5117a7

File tree

10 files changed

+377
-279
lines changed

10 files changed

+377
-279
lines changed
Lines changed: 36 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,36 @@
1+
# This workflow will install Python dependencies, run tests and lint with a variety of Python versions
2+
# For more information see: https://help.github.com/actions/language-and-framework-guides/using-python-with-github-actions
3+
4+
name: Python package
5+
6+
on:
7+
push:
8+
branches: [ master ]
9+
pull_request:
10+
branches: [ master ]
11+
12+
jobs:
13+
build:
14+
15+
runs-on: ubuntu-latest
16+
strategy:
17+
matrix:
18+
python-version: ['3.6', '3.7', '3.8', '3.9']
19+
20+
steps:
21+
- uses: actions/checkout@v2
22+
- name: Set up Python ${{ matrix.python-version }}
23+
uses: actions/setup-python@v2
24+
with:
25+
python-version: ${{ matrix.python-version }}
26+
- name: Install dependencies
27+
run: |
28+
python -m pip install --upgrade pip
29+
python -m pip install flake8
30+
if [ -f requirements.txt ]; then pip install -r requirements.txt; fi
31+
- name: Lint with flake8
32+
run: |
33+
# stop the build if there are Python syntax errors or undefined names
34+
flake8 . --count --select=E9,F63,F7,F82 --show-source --statistics
35+
# exit-zero treats all errors as warnings. The GitHub editor is 127 chars wide
36+
flake8 . --count --exit-zero --max-complexity=10 --max-line-length=127 --statistics

.vscode/settings.json

Lines changed: 3 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,3 @@
1+
{
2+
"python.formatting.provider": "black"
3+
}

assets/basic_template.py

Lines changed: 20 additions & 19 deletions
Original file line numberDiff line numberDiff line change
@@ -9,28 +9,29 @@
99
1010
'''
1111

12-
from tensorflow.keras.utils import get_file
1312
import numpy as np
1413

14+
1515
def load_data(path='<Dataset_slug>.npz', size='small'):
16-
"""Loads the <Dataset name>
17-
# Arguments
18-
path: path where to cache the dataset locally
19-
(relative to ~/.keras/datasets).
20-
size: small or large, indicating dummy dataset size to return.
21-
# Returns
22-
Tuple of Numpy arrays: `(input_train, target_train), (input_test, target_test)`.
23-
"""
16+
"""Loads the <Dataset name>
17+
# Arguments
18+
path: path where to cache the dataset locally
19+
(relative to ~/.keras/datasets).
20+
size: small or large, indicating dummy dataset size to return.
21+
# Returns
22+
Tuple of Numpy arrays: `(input_train, target_train),
23+
(input_test, target_test)`.
24+
"""
2425

2526
if size == 'small':
26-
input_train = np.array([1, 2])
27-
target_train = np.array([0, 1])
28-
input_test = np.array([2, 3])
29-
target_test = np.array([1, 0])
27+
input_train = np.array([1, 2])
28+
target_train = np.array([0, 1])
29+
input_test = np.array([2, 3])
30+
target_test = np.array([1, 0])
3031
else:
31-
input_train = np.array([1, 2, 84, 9, 1, 48, 2])
32-
target_train = np.array([0, 1, 0, 0, 0, 1, 1])
33-
input_test = np.array([2, 3, 32, 84, 99, 1, 2])
34-
target_test = np.array([1, 0, 0, 0, 1, 0, 1])
35-
36-
return (input_train, target_train), (input_test, target_test)
32+
input_train = np.array([1, 2, 84, 9, 1, 48, 2])
33+
target_train = np.array([0, 1, 0, 0, 0, 1, 1])
34+
input_test = np.array([2, 3, 32, 84, 99, 1, 2])
35+
target_test = np.array([1, 0, 0, 0, 1, 0, 1])
36+
37+
return (input_train, target_train), (input_test, target_test)

extra_keras_datasets/__init__.py

Lines changed: 3 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -6,4 +6,6 @@
66
from . import kmnist
77
from . import svhn
88
from . import stl10
9-
from . import iris
9+
from . import iris
10+
11+
__all__ = ['emnist', 'kmnist', 'svhn', 'stl10', 'iris']

extra_keras_datasets/emnist.py

Lines changed: 56 additions & 43 deletions
Original file line numberDiff line numberDiff line change
@@ -1,55 +1,68 @@
1-
'''
1+
"""
22
Import the EMNIST dataset
33
Source: https://www.nist.gov/itl/products-and-services/emnist-dataset
4-
Description: The EMNIST dataset is a set of handwritten character digits derived from the NIST Special Database 19 and converted to a 28x28 pixel image format and dataset structure that directly matches the MNIST dataset
4+
Description: The EMNIST dataset is a set of handwritten character
5+
digits derived from the NIST Special Database 19 and converted to
6+
a 28x28 pixel image format and dataset structure that directly
7+
matches the MNIST dataset
58
69
~~~ Important note ~~~
710
Please cite the following paper when using or referencing the dataset:
8-
Cohen, G., Afshar, S., Tapson, J., & van Schaik, A. (2017). EMNIST: an extension of MNIST to handwritten letters. Retrieved from http://arxiv.org/abs/1702.05373
11+
Cohen, G., Afshar, S., Tapson, J., & van Schaik, A. (2017). EMNIST:
12+
an extension of MNIST to handwritten letters.
13+
Retrieved from http://arxiv.org/abs/1702.05373
914
10-
'''
15+
"""
1116

1217
from tensorflow.keras.utils import get_file
13-
import numpy as np
1418
from zipfile import ZipFile
1519
from scipy import io as sio
1620
import os
1721

18-
def load_data(path='emnist_matlab.npz', type='balanced'):
19-
"""Loads the EMNIST dataset.
20-
# Arguments
21-
path: path where to cache the dataset locally
22-
(relative to ~/.keras/datasets).
23-
type: any of balanced, byclass, bymerge, digits, letters, mnist (defaults to balanced)
24-
# Returns
25-
Tuple of Numpy arrays: `(input_train, target_train), (input_test, target_test)`.
26-
"""
27-
path = get_file(path,
28-
origin='http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/matlab.zip')
29-
with ZipFile(path, 'r') as opened_zip:
30-
31-
# Read file and temporarily store it
32-
file_name = f'./{type}.mat'
33-
f = open(file_name, 'wb')
34-
f.write(opened_zip.read(f'matlab/emnist-{type}.mat'))
35-
f.close()
36-
37-
# Load data from Matlab file.
38-
# Source: https://stackoverflow.com/a/53547262
39-
mat = sio.loadmat(file_name)
40-
data = mat['dataset']
41-
input_train = data['train'][0,0]['images'][0,0]
42-
target_train = data['train'][0,0]['labels'][0,0].flatten()
43-
input_test = data['test'][0,0]['images'][0,0]
44-
target_test = data['test'][0,0]['labels'][0,0].flatten()
45-
46-
# Remove data when loaded
47-
os.remove(file_name)
48-
49-
# Reshape input data
50-
# Source: https://stackoverflow.com/a/53547262
51-
input_train = input_train.reshape((input_train.shape[0], 28, 28), order='F')
52-
input_test = input_test.reshape((input_test.shape[0], 28, 28), order='F')
53-
54-
# Return data
55-
return (input_train, target_train), (input_test, target_test)
22+
23+
def load_data(path="emnist_matlab.npz", type="balanced"):
24+
"""Loads the EMNIST dataset.
25+
# Arguments
26+
path: path where to cache the dataset locally
27+
(relative to ~/.keras/datasets).
28+
type: any of balanced, byclass, bymerge, digits, letters,
29+
mnist (defaults to balanced)
30+
# Returns
31+
Tuple of Numpy arrays: `(input_train, target_train),
32+
(input_test, target_test)`.
33+
"""
34+
path = get_file(
35+
path, origin=("http://www.itl.nist.gov/iaui/vip/cs_links/EMNIST/"
36+
"matlab.zip")
37+
)
38+
with ZipFile(path, "r") as opened_zip:
39+
40+
# Read file and temporarily store it
41+
file_name = f"./{type}.mat"
42+
f = open(file_name, "wb")
43+
f.write(opened_zip.read(f"matlab/emnist-{type}.mat"))
44+
f.close()
45+
46+
# Load data from Matlab file.
47+
# Source: https://stackoverflow.com/a/53547262
48+
mat = sio.loadmat(file_name)
49+
data = mat["dataset"]
50+
input_train = data["train"][0, 0]["images"][0, 0]
51+
target_train = data["train"][0, 0]["labels"][0, 0].flatten()
52+
input_test = data["test"][0, 0]["images"][0, 0]
53+
target_test = data["test"][0, 0]["labels"][0, 0].flatten()
54+
55+
# Remove data when loaded
56+
os.remove(file_name)
57+
58+
# Reshape input data
59+
# Source: https://stackoverflow.com/a/53547262
60+
input_train = input_train.reshape(
61+
(input_train.shape[0], 28, 28), order="F"
62+
)
63+
input_test = input_test.reshape(
64+
(input_test.shape[0], 28, 28), order="F"
65+
)
66+
67+
# Return data
68+
return (input_train, target_train), (input_test, target_test)

extra_keras_datasets/iris.py

Lines changed: 73 additions & 61 deletions
Original file line numberDiff line numberDiff line change
@@ -1,82 +1,94 @@
1-
'''
1+
"""
22
Import the Iris dataset
33
Source: http://archive.ics.uci.edu/ml/datasets/Iris
4-
Description: The data set contains 3 classes of 50 instances each, where each class refers to a type of iris plant.
4+
Description: The data set contains 3 classes of 50 instances each, where
5+
each class refers to a type of iris plant.
56
67
~~~ Important note ~~~
78
Please cite the following paper when using or referencing the dataset:
8-
Fisher,R.A. "The use of multiple measurements in taxonomic problems" Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions to Mathematical Statistics" (John Wiley, NY, 1950).
9-
'''
9+
Fisher,R.A. "The use of multiple measurements in taxonomic problems"
10+
Annual Eugenics, 7, Part II, 179-188 (1936); also in "Contributions
11+
to Mathematical Statistics" (John Wiley, NY, 1950).
12+
"""
1013

1114
from tensorflow.keras.utils import get_file
1215
import numpy as np
1316
import math
1417

15-
def load_data(path='iris.npz', test_split=0.2):
16-
'''Loads the Iris dataset.
17-
# Arguments
18-
path: path where to cache the dataset locally
19-
(relative to ~/.keras/datasets).
20-
test_split: percentage of data to use for testing (by default 20%)
21-
# Returns
22-
Tuple of Numpy arrays: `(input_train, target_train), (input_test, target_test)`.
23-
Input structure: (sepal length, sepal width, petal length, petal width)
24-
Target structure: 0 = iris setosa; 1 = iris versicolor; 2 = iris virginica.
25-
'''
26-
path = get_file(path,
27-
origin='http://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data')
2818

29-
# Read data from file
30-
f = open(path, 'r')
31-
lines = f.readlines()
32-
33-
# Process each line into input/target structure
34-
samples = []
35-
for line in lines:
36-
sample = line_to_list(line)
37-
if sample is not None:
38-
samples.append(sample)
39-
f.close()
19+
def load_data(path="iris.npz", test_split=0.2):
20+
"""Loads the Iris dataset.
21+
# Arguments
22+
path: path where to cache the dataset locally
23+
(relative to ~/.keras/datasets).
24+
test_split: percentage of data to use for testing (by default 20%)
25+
# Returns
26+
Tuple of Numpy arrays: `(input_train, target_train),
27+
(input_test, target_test)`.
28+
Input structure: (sepal length, sepal width, petal length,
29+
petal width)
30+
Target structure: 0 = iris setosa; 1 = iris versicolor;
31+
2 = iris virginica.
32+
"""
33+
path = get_file(
34+
path,
35+
origin=("http://archive.ics.uci.edu/ml/machine-learning-databases/"
36+
"iris/iris.data")
37+
)
4038

41-
# Randomly shuffle the data
42-
np.random.shuffle(samples)
39+
# Read data from file
40+
f = open(path, "r")
41+
lines = f.readlines()
4342

44-
# Compute test_split in length
45-
num_test_samples = math.floor(len(samples) * test_split)
43+
# Process each line into input/target structure
44+
samples = []
45+
for line in lines:
46+
sample = line_to_list(line)
47+
if sample is not None:
48+
samples.append(sample)
49+
f.close()
4650

47-
# Split data
48-
training_data = samples[num_test_samples:]
49-
testing_data = samples[:num_test_samples]
51+
# Randomly shuffle the data
52+
np.random.shuffle(samples)
53+
54+
# Compute test_split in length
55+
num_test_samples = math.floor(len(samples) * test_split)
56+
57+
# Split data
58+
training_data = samples[num_test_samples:]
59+
testing_data = samples[:num_test_samples]
60+
61+
# Split into inputs and targets
62+
input_train = [i[0:4] for i in training_data]
63+
input_test = [i[0:4] for i in testing_data]
64+
target_train = [i[4] for i in training_data]
65+
target_test = [i[4] for i in testing_data]
66+
67+
# Return data
68+
return (input_train, target_train), (input_test, target_test)
5069

51-
# Split into inputs and targets
52-
input_train = [i[0:4] for i in training_data]
53-
input_test = [i[0:4] for i in testing_data]
54-
target_train = [i[4] for i in training_data]
55-
target_test = [i[4] for i in testing_data]
5670

57-
# Return data
58-
return (input_train, target_train), (input_test, target_test)
59-
6071
def line_to_list(line):
61-
'''
72+
"""
6273
Convert a String-based line into a list with input and target data.
63-
'''
64-
elements = line.split(',')
65-
if len(elements) > 1:
66-
target = target_string_to_int(elements[4])
67-
full_sample = [float(i) for i in elements[0:4]]
68-
full_sample.append(target)
69-
return tuple(full_sample)
70-
else:
71-
return None
74+
"""
75+
elements = line.split(",")
76+
if len(elements) > 1:
77+
target = target_string_to_int(elements[4])
78+
full_sample = [float(i) for i in elements[0:4]]
79+
full_sample.append(target)
80+
return tuple(full_sample)
81+
else:
82+
return None
83+
7284

7385
def target_string_to_int(target_value):
74-
'''
86+
"""
7587
Convert a String-based into an Integer-based target value.
76-
'''
77-
if target_value == 'Iris-setosa\n':
78-
return 0
79-
elif target_value == 'Iris-versicolor\n':
80-
return 1
81-
else:
82-
return 2
88+
"""
89+
if target_value == "Iris-setosa\n":
90+
return 0
91+
elif target_value == "Iris-versicolor\n":
92+
return 1
93+
else:
94+
return 2

0 commit comments

Comments
 (0)