Use black profile in isort to prevent conflicts (#244)

* Use black profile in isort to prevent conflicts
This commit is contained in:
Matt Kramer
2022-05-05 22:15:43 -05:00
committed by GitHub
parent b5b150a918
commit 88a0dd38de
2 changed files with 67 additions and 48 deletions

View File

@@ -9,3 +9,4 @@ repos:
hooks: hooks:
- id: isort - id: isort
name: isort (python) name: isort (python)
args: [--profile, black]

View File

@@ -1,45 +1,49 @@
#Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb # Credit: https://github.com/karpathy/micrograd/blob/master/demo.ipynb
#cell # cell
import random
import numpy as np
import matplotlib.pyplot as plt
import datetime import datetime
import random
#cell import matplotlib.pyplot as plt
import numpy as np
# cell
from micrograd.engine import Value from micrograd.engine import Value
from micrograd.nn import Neuron, Layer, MLP from micrograd.nn import MLP, Layer, Neuron
print_statements = [] print_statements = []
def run_all_micrograd_demo(*args,**kwargs):
def run_all_micrograd_demo(*args, **kwargs):
result = micrograd_demo() result = micrograd_demo()
pyscript.write('micrograd-run-all-fig2-div', result) pyscript.write("micrograd-run-all-fig2-div", result)
def print_div(o): def print_div(o):
o = str(o) o = str(o)
print_statements.append(o + ' \n<br>') print_statements.append(o + " \n<br>")
pyscript.write('micrograd-run-all-print-div', ''.join(print_statements)) pyscript.write("micrograd-run-all-print-div", "".join(print_statements))
#All code is wrapped in this run_all function so it optionally executed (called)
#from pyscript when a button is pressed. # All code is wrapped in this run_all function so it optionally executed (called)
def micrograd_demo(*args,**kwargs): # from pyscript when a button is pressed.
def micrograd_demo(*args, **kwargs):
""" """
Runs the micrograd demo. Runs the micrograd demo.
*args and **kwargs do nothing and are only there to capture any parameters passed *args and **kwargs do nothing and are only there to capture any parameters passed
from pyscript when this function is called when a button is clicked. from pyscript when this function is called when a button is clicked.
""" """
#cell
start = datetime.datetime.now()
print_div('Starting...')
#cell # cell
start = datetime.datetime.now()
print_div("Starting...")
# cell
np.random.seed(1337) np.random.seed(1337)
random.seed(1337) random.seed(1337)
#cell # cell
#An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html # An adaptation of sklearn's make_moons function https://scikit-learn.org/stable/modules/generated/sklearn.datasets.make_moons.html
def make_moons(n_samples=100, noise=None): def make_moons(n_samples=100, noise=None):
n_samples_out, n_samples_in = n_samples, n_samples n_samples_out, n_samples_in = n_samples, n_samples
@@ -48,26 +52,38 @@ def micrograd_demo(*args,**kwargs):
inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in)) inner_circ_x = 1 - np.cos(np.linspace(0, np.pi, n_samples_in))
inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5 inner_circ_y = 1 - np.sin(np.linspace(0, np.pi, n_samples_in)) - 0.5
X = np.vstack([np.append(outer_circ_x, inner_circ_x), np.append(outer_circ_y, inner_circ_y)]).T X = np.vstack(
y = np.hstack([np.zeros(n_samples_out, dtype=np.intp), np.ones(n_samples_in, dtype=np.intp)]) [
if noise is not None: X += np.random.normal(loc=0.0, scale=noise, size=X.shape) np.append(outer_circ_x, inner_circ_x),
np.append(outer_circ_y, inner_circ_y),
]
).T
y = np.hstack(
[
np.zeros(n_samples_out, dtype=np.intp),
np.ones(n_samples_in, dtype=np.intp),
]
)
if noise is not None:
X += np.random.normal(loc=0.0, scale=noise, size=X.shape)
return X, y return X, y
X, y = make_moons(n_samples=100, noise=0.1) X, y = make_moons(n_samples=100, noise=0.1)
#cell # cell
y = y*2 - 1 # make y be -1 or 1 y = y * 2 - 1 # make y be -1 or 1
# visualize in 2D # visualize in 2D
plt.figure(figsize=(5,5)) plt.figure(figsize=(5, 5))
plt.scatter(X[:,0], X[:,1], c=y, s=20, cmap='jet') plt.scatter(X[:, 0], X[:, 1], c=y, s=20, cmap="jet")
plt plt
pyscript.write('micrograd-run-all-fig1-div', plt) pyscript.write("micrograd-run-all-fig1-div", plt)
#cell # cell
model = MLP(2, [16, 16, 1]) # 2-layer neural network model = MLP(2, [16, 16, 1]) # 2-layer neural network
print_div(model) print_div(model)
print_div(("number of parameters", len(model.parameters()))) print_div(("number of parameters", len(model.parameters())))
#cell # cell
# loss function # loss function
def loss(batch_size=None): def loss(batch_size=None):
# inline DataLoader :) # inline DataLoader :)
@@ -77,51 +93,53 @@ def micrograd_demo(*args,**kwargs):
ri = np.random.permutation(X.shape[0])[:batch_size] ri = np.random.permutation(X.shape[0])[:batch_size]
Xb, yb = X[ri], y[ri] Xb, yb = X[ri], y[ri]
inputs = [list(map(Value, xrow)) for xrow in Xb] inputs = [list(map(Value, xrow)) for xrow in Xb]
# forward the model to get scores # forward the model to get scores
scores = list(map(model, inputs)) scores = list(map(model, inputs))
# svm "max-margin" loss # svm "max-margin" loss
losses = [(1 + -yi*scorei).relu() for yi, scorei in zip(yb, scores)] losses = [(1 + -yi * scorei).relu() for yi, scorei in zip(yb, scores)]
data_loss = sum(losses) * (1.0 / len(losses)) data_loss = sum(losses) * (1.0 / len(losses))
# L2 regularization # L2 regularization
alpha = 1e-4 alpha = 1e-4
reg_loss = alpha * sum((p*p for p in model.parameters())) reg_loss = alpha * sum((p * p for p in model.parameters()))
total_loss = data_loss + reg_loss total_loss = data_loss + reg_loss
# also get accuracy # also get accuracy
accuracy = [((yi).__gt__(0)) == ((scorei.data).__gt__(0)) for yi, scorei in zip(yb, scores)] accuracy = [
((yi).__gt__(0)) == ((scorei.data).__gt__(0))
for yi, scorei in zip(yb, scores)
]
return total_loss, sum(accuracy) / len(accuracy) return total_loss, sum(accuracy) / len(accuracy)
total_loss, acc = loss() total_loss, acc = loss()
print((total_loss, acc)) print((total_loss, acc))
#cell # cell
# optimization # optimization
for k in range(20): #was 100 for k in range(20): # was 100
# forward # forward
total_loss, acc = loss() total_loss, acc = loss()
# backward # backward
model.zero_grad() model.zero_grad()
total_loss.backward() total_loss.backward()
# update (sgd) # update (sgd)
learning_rate = 1.0 - 0.9*k/100 learning_rate = 1.0 - 0.9 * k / 100
for p in model.parameters(): for p in model.parameters():
p.data -= learning_rate * p.grad p.data -= learning_rate * p.grad
if k % 1 == 0: if k % 1 == 0:
# print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%") # print(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
print_div(f"step {k} loss {total_loss.data}, accuracy {acc*100}%") print_div(f"step {k} loss {total_loss.data}, accuracy {acc*100}%")
#cell # cell
h = 0.25 h = 0.25
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1 x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1 y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h), xx, yy = np.meshgrid(np.arange(x_min, x_max, h), np.arange(y_min, y_max, h))
np.arange(y_min, y_max, h))
Xmesh = np.c_[xx.ravel(), yy.ravel()] Xmesh = np.c_[xx.ravel(), yy.ravel()]
inputs = [list(map(Value, xrow)) for xrow in Xmesh] inputs = [list(map(Value, xrow)) for xrow in Xmesh]
scores = list(map(model, inputs)) scores = list(map(model, inputs))