id
int64 0
45.1k
| file_name
stringlengths 4
68
| file_path
stringlengths 14
193
| content
stringlengths 32
9.62M
| size
int64 32
9.62M
| language
stringclasses 1
value | extension
stringclasses 6
values | total_lines
int64 1
136k
| avg_line_length
float64 3
903k
| max_line_length
int64 3
4.51M
| alphanum_fraction
float64 0
1
| repo_name
stringclasses 779
values | repo_stars
int64 0
882
| repo_forks
int64 0
108
| repo_open_issues
int64 0
90
| repo_license
stringclasses 8
values | repo_extraction_date
stringclasses 146
values | sha
stringlengths 64
64
| __index_level_0__
int64 0
45.1k
| exdup_ids_cmlisp_stkv2
listlengths 1
47
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
3,168
|
cats-and-dogs.lisp
|
chunsj_TH/db/cats-and-dogs.lisp
|
(declaim (optimize (speed 3) (debug 0) (safety 0)))
(defpackage :th.db.cats-and-dogs
(:use #:common-lisp
#:mu
#:th)
(:export #:read-cats-and-dogs-data))
(in-package :th.db.cats-and-dogs)
(defparameter +cnd-location+ ($concat (namestring (user-homedir-pathname))
".th/datasets/cats-and-dogs/tensors"))
(defun read-cats-and-dogs-64-train-tensor (idx &key (loc +cnd-location+) (normalize T))
(when (and (>= idx 1) (<= idx 25))
(let ((fc (file.disk (strcat loc (format nil "/cat64-train-~2,'0D.tensor" idx)) "r"))
(fd (file.disk (strcat loc (format nil "/dog64-train-~2,'0D.tensor" idx)) "r"))
(mc (tensor.byte))
(md (tensor.byte)))
(setf ($fbinaryp fc) t)
(setf ($fbinaryp fd) t)
($fread mc fc)
($fread md fd)
($fclose fc)
($fclose fd)
(let ((m (apply #'tensor.byte (append (list (* 2 ($size mc 0))) (cdr ($size mc)))))
(n ($size mc 0)))
(loop :for i :from 0 :below n
:for idx = (* i 2)
:do (setf ($ m idx) ($ mc i)
($ m (1+ idx)) ($ md i)))
(if normalize
($div! (tensor.float m) 255)
(tensor.float m))))))
(defun read-cats-and-dogs-data (&key (indices '(1)) (loc +cnd-location+) (normalize T))
(when indices
(if (eq 1 ($count indices))
(read-cats-and-dogs-64-train-tensor (car indices) :loc loc :normalize normalize)
(apply #'$concat (append (mapcar (lambda (idx)
(read-cats-and-dogs-64-train-tensor idx
:loc loc
:normalize normalize))
indices)
'(0))))))
| 1,896
|
Common Lisp
|
.lisp
| 40
| 32.85
| 101
| 0.470016
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
45d03a1ac624919bf9799a5aa476571365f3a43cc47e43dd3fcbf740170047a4
| 3,168
|
[
-1
] |
3,169
|
fashion-original.lisp
|
chunsj_TH/db/fashion-original.lisp
|
(declaim (optimize (speed 3) (debug 0) (safety 0)))
(defpackage :th.db.fashion-original
(:use #:common-lisp
#:mu
#:th)
(:export #:read-fashion-train-images
#:read-fashion-train-labes
#:read-fashion-t10k-images
#:read-fashion-t10k-labels
#:read-fashion-data))
(in-package :th.db.fashion-original)
(defparameter +idx-types+
'((#x08 (unsigned-byte 8) 1)
(#x09 (signed-byte 8) 1)
;;(#x0B (unsigned-byte 4))
(#x0C (signed-byte 32) 4)
(#x0D single-float 4)
(#x0E double-float 8)))
(defparameter +fmnist-location+ ($concat (namestring (user-homedir-pathname))
".th/datasets/fashion-mnist"))
(defun read-nbyte (n str)
(let ((ret 0))
(loop :repeat n :do (setf ret (logior (ash ret 8) (read-byte str))))
ret))
(defun read-single-image-into-m (m idx s nrow ncol &optional (normalize nil))
(let* ((sz (* nrow ncol)))
(dotimes (i sz)
(let* ((v (read-byte s))
(rv (if normalize (/ v 255.0) (* 1.0 v))))
(setf ($ m idx i) rv)))))
(defun read-single-label-into-m (m idx byte onehot?)
(if onehot?
(setf ($ m idx byte) 1.0)
(setf ($ m idx 0) (coerce byte 'single-float))))
(defun read-fashion-images (fname &key (normalize nil) (verbose nil))
(with-open-file (str fname :element-type '(unsigned-byte 8))
(assert (loop :repeat 2 :always (= #x00 (read-byte str)))
nil
"magic numbers not matched")
(let* ((type-tag (read-byte str))
(tagdata (cdr (assoc type-tag +idx-types+)))
(dtype (car tagdata))
(nbytes (cadr tagdata))
(metadata (loop :repeat (read-byte str) :collect (read-nbyte 4 str)))
(ndata (car metadata))
(nrow (cadr metadata))
(ncol (caddr metadata))
(m (zeros ndata (* nrow ncol))))
(when verbose
(format T "~%TYPE: ~A NBYTES: ~A~%" dtype nbytes)
(format T "NDATA: ~A NROW: ~A NCOL: ~A~%" ndata nrow ncol))
(loop :for i :from 0 :below ndata
:do (read-single-image-into-m m i str nrow ncol normalize))
m)))
(defun read-fashion-labels (fname &key (verbose nil) (onehot nil))
(with-open-file (str fname :element-type '(unsigned-byte 8))
(assert (loop :repeat 2 :always (= #x00 (read-byte str)))
nil
"magic numbers not matched")
(let* ((type-tag (read-byte str))
(tagdata (cdr (assoc type-tag +idx-types+)))
(dtype (car tagdata))
(nbytes (cadr tagdata))
(metadata (loop :repeat (read-byte str) :collect (read-nbyte 4 str)))
(ndata (car metadata))
(m (if onehot (zeros ndata 10) (zeros ndata 1))))
(when verbose
(format T "~%TYPE: ~A NBYTES: ~A~%" dtype nbytes)
(format T "NDATA: ~A~%" ndata))
(loop :for i :from 0 :below ndata
:do (read-single-label-into-m m i (read-byte str) onehot))
m)))
(defun read-fashion-train-images (&key (path +fmnist-location+) (normalize nil) (verbose nil))
(read-fashion-images (strcat path "/train-images-idx3-ubyte")
:normalize normalize :verbose verbose))
(defun read-fashion-train-labels (&key (path +fmnist-location+) (verbose nil) (onehot nil))
(read-fashion-labels (strcat path "/train-labels-idx1-ubyte")
:onehot onehot
:verbose verbose))
(defun read-fashion-t10k-images (&key (path +fmnist-location+) (normalize nil) (verbose nil))
(read-fashion-images (strcat path "/t10k-images-idx3-ubyte")
:normalize normalize :verbose verbose))
(defun read-fashion-t10k-labels (&key (path +fmnist-location+) (onehot nil) (verbose nil))
(read-fashion-labels (strcat path "/t10k-labels-idx1-ubyte")
:onehot onehot
:verbose verbose))
(defun read-fashion-data (&key (path +fmnist-location+) (normalize T) (onehot T))
#{:train-images (read-fashion-train-images :path path :normalize normalize)
:train-labels (read-fashion-train-labels :path path :onehot onehot)
:test-images (read-fashion-t10k-images :path path :normalize normalize)
:test-labels (read-fashion-t10k-labels :path path :onehot onehot)})
| 4,283
|
Common Lisp
|
.lisp
| 91
| 38.681319
| 94
| 0.601723
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
2899fea8316b4023546e1f5d307cd7465a789fab2b0de6009809c6ccb8ff374e
| 3,169
|
[
-1
] |
3,170
|
exdata.lisp
|
chunsj_TH/db/exdata.lisp
|
(defpackage :th.ex.data
(:use #:common-lisp
#:mu
#:th)
(:export #:text-lines
#:ptb
#:addition
#:date-data
#:eng-fra-small
#:eng-fra-small-processed
#:iris))
(in-package :th.ex.data)
(defun data-filename (name)
($concat (namestring (asdf:system-source-directory :th)) "data/" name))
(defun text-lines (&optional (file :tiny-shakespeare))
(cond ((eq file :tiny-shakespeare) (read-lines-from (data-filename "tinyshakespeare.txt")))
((eq file :obama) (read-lines-from (data-filename "obama.txt")))
((eq file :pg) (read-lines-from (data-filename "pg.txt")))
((eq file :small-pg) (read-lines-from (data-filename "pgsmall.txt")))))
(defun ptb (&optional (type :train))
(mapcar (lambda (line) (strim (concatenate 'string line "<eos>")))
(read-lines-from (data-filename
(concatenate 'string
"ptb."
(string-downcase (symbol-name type))
".txt")))))
(defun addition () (read-lines-from (data-filename "addition.txt")))
(defun date-data () (read-lines-from (data-filename "date.txt")))
(defun eng-fra-small () (read-lines-from (data-filename "eng-fra.small.txt")))
(defun eng-fra-small-processed ()
(->> (read-lines-from (data-filename "eng-fra.small-processed.txt"))
(mapcar (lambda (s) (split #\Tab s)))))
(defun iris ()
(let ((x (tensor 150 4))
(y (zeros 150 3)))
(loop :for line :in (cdr (read-lines-from (data-filename "iris.csv")))
:for i :from 0
:for parts = (split #\, line)
:do (let ((xs (mapcar (lambda (s) (parse-float s)) (subseq parts 0 4)))
(yloc (parse-integer ($ parts 4))))
(setf ($ x i 0) ($ xs 0)
($ x i 1) ($ xs 1)
($ x i 2) ($ xs 2)
($ x i 3) ($ xs 3))
(setf ($ y i yloc) 1)))
(list :x x :y y
:targets '("setosa" "versicolor" "virginica")
:features '("sepal length (cm)" "sepal width (cm)"
"petal length (cm)" "petal width (cm)"))))
| 2,241
|
Common Lisp
|
.lisp
| 49
| 34.530612
| 93
| 0.517841
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
0f12276bf6dff8b1bbb838b5d9de2585a95983240ccee5cff6cecdf64214517a
| 3,170
|
[
-1
] |
3,171
|
celeba.lisp
|
chunsj_TH/db/celeba.lisp
|
(declaim (optimize (speed 3) (debug 0) (safety 0)))
(defpackage :th.db.celeba
(:use #:common-lisp
#:mu
#:th)
(:export #:read-celeba64-data))
(in-package :th.db.celeba)
(defparameter +celeba-location+ ($concat (namestring (user-homedir-pathname)) ".th/datasets/celeba"))
(defun read-celeba64-tensor (idx &key (loc +celeba-location+) (normalize T))
(when (and (>= idx 1) (<= idx 20))
(let ((f (file.disk (strcat loc (format nil "/tensors/celeba64-~2,'0D.tensor" idx)) "r"))
(m (tensor.byte)))
(setf ($fbinaryp f) t)
($fread m f)
($fclose f)
(if normalize
($div! (tensor.float m) 255)
(tensor.float m)))))
(defun read-celeba64-data (&key (indices '(1)) (loc +celeba-location+) (normalize T))
(when indices
(if (eq 1 ($count indices))
(read-celeba64-tensor (car indices) :loc loc :normalize normalize)
(apply #'$concat (append (mapcar (lambda (idx)
(read-celeba64-tensor idx :loc loc :normalize normalize))
indices)
'(0))))))
| 1,146
|
Common Lisp
|
.lisp
| 26
| 34.461538
| 101
| 0.558744
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
63fa28a0fe15424777cae012de77a5eb941b7aec6cf4ce399abf46381918072a
| 3,171
|
[
-1
] |
3,172
|
mnist-original.lisp
|
chunsj_TH/db/mnist-original.lisp
|
(declaim (optimize (speed 3) (debug 1) (safety 0)))
(defpackage :th.db.mnist-original
(:use #:common-lisp
#:mu
#:th)
(:export #:read-mnist-train-images
#:read-mnist-train-labels
#:read-mnist-t10k-images
#:read-mnist-t10k-labels
#:read-mnist-data))
(in-package :th.db.mnist-original)
(defparameter +idx-types+
'((#x08 (unsigned-byte 8) 1)
(#x09 (signed-byte 8) 1)
;;(#x0B (unsigned-byte 4))
(#x0C (signed-byte 32) 4)
(#x0D single-float 4)
(#x0E double-float 8)))
(defparameter +mnist-location+ ($concat (namestring (user-homedir-pathname)) ".th/datasets/mnist"))
(defun read-nbyte (n str)
(let ((ret 0))
(loop :repeat n :do (setf ret (logior (ash ret 8) (read-byte str))))
ret))
(defun read-single-image-into-m (m idx s nrow ncol &optional (normalize nil))
(let* ((sz (* nrow ncol)))
(dotimes (i sz)
(let* ((v (read-byte s))
(rv (if normalize (/ v 255.0) (* 1.0 v))))
(setf ($ m idx i) rv)))))
(defun read-single-label-into-m (m idx byte onehot?)
(if onehot?
(setf ($ m idx byte) 1.0)
(setf ($ m idx 0) (coerce byte 'single-float))))
(defun read-mnist-images (fname &key (normalize nil) (verbose nil))
(with-open-file (str fname :element-type '(unsigned-byte 8))
(assert (loop :repeat 2 :always (= #x00 (read-byte str)))
nil
"magic numbers not matched")
(let* ((type-tag (read-byte str))
(tagdata (cdr (assoc type-tag +idx-types+)))
(dtype (car tagdata))
(nbytes (cadr tagdata))
(metadata (loop :repeat (read-byte str) :collect (read-nbyte 4 str)))
(ndata (car metadata))
(nrow (cadr metadata))
(ncol (caddr metadata))
(m (zeros ndata (* nrow ncol))))
(when verbose
(format T "~%TYPE: ~A NBYTES: ~A~%" dtype nbytes)
(format T "NDATA: ~A NROW: ~A NCOL: ~A~%" ndata nrow ncol))
(loop :for i :from 0 :below ndata
:do (read-single-image-into-m m i str nrow ncol normalize))
m)))
(defun read-mnist-labels (fname &key (verbose nil) (onehot nil))
(with-open-file (str fname :element-type '(unsigned-byte 8))
(assert (loop :repeat 2 :always (= #x00 (read-byte str)))
nil
"magic numbers not matched")
(let* ((type-tag (read-byte str))
(tagdata (cdr (assoc type-tag +idx-types+)))
(dtype (car tagdata))
(nbytes (cadr tagdata))
(metadata (loop :repeat (read-byte str) :collect (read-nbyte 4 str)))
(ndata (car metadata))
(m (if onehot (zeros ndata 10) (zeros ndata 1))))
(when verbose
(format T "~%TYPE: ~A NBYTES: ~A~%" dtype nbytes)
(format T "NDATA: ~A~%" ndata))
(loop :for i :from 0 :below ndata
:do (read-single-label-into-m m i (read-byte str) onehot))
m)))
(defun read-mnist-train-images (&key (path +mnist-location+) (normalize nil) (verbose nil))
(read-mnist-images (strcat path "/train-images-idx3-ubyte")
:normalize normalize :verbose verbose))
(defun read-mnist-train-labels (&key (path +mnist-location+) (verbose nil) (onehot nil))
(read-mnist-labels (strcat path "/train-labels-idx1-ubyte")
:onehot onehot
:verbose verbose))
(defun read-mnist-t10k-images (&key (path +mnist-location+) (normalize nil) (verbose nil))
(read-mnist-images (strcat path "/t10k-images-idx3-ubyte")
:normalize normalize :verbose verbose))
(defun read-mnist-t10k-labels (&key (path +mnist-location+) (onehot nil) (verbose nil))
(read-mnist-labels (strcat path "/t10k-labels-idx1-ubyte")
:onehot onehot
:verbose verbose))
(defun read-mnist-data (&key (path +mnist-location+) (normalize T) (onehot T))
#{:train-images (read-mnist-train-images :path path :normalize normalize)
:train-labels (read-mnist-train-labels :path path :onehot onehot)
:test-images (read-mnist-t10k-images :path path :normalize normalize)
:test-labels (read-mnist-t10k-labels :path path :onehot onehot)})
| 4,173
|
Common Lisp
|
.lisp
| 90
| 38.488889
| 99
| 0.60408
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
00b45bc47c34835f089182c51f2ae188b82725c0769f7c6153f13b525c58dbe7
| 3,172
|
[
-1
] |
3,173
|
layers.lisp
|
chunsj_TH/layers/layers.lisp
|
(declaim (optimize (speed 3) (debug 1) (safety 0)))
(defpackage :th.layers
(:use #:common-lisp
#:mu
#:th)
(:export #:layer
#:$execute
#:$evaluate
#:$train-parameters
#:$save-weights
#:$load-weights
#:sequential-layer
#:parallel-layer
#:affine-layer
#:batch-normalization-layer
#:layer-normalization-layer
#:convolution-2d-layer
#:maxpool-2d-layer
#:avgpool-2d-layer
#:flatten-layer
#:full-convolution-2d-layer
#:reshape-layer
#:functional-layer
#:$function-arguments
#:dropout-layer
#:rnn-cell
#:lstm-cell
#:gru-cell
#:affine-cell
#:attention-cell
#:dropout-cell
#:$keep-state!
#:recurrent-layer
#:$generate-sequence
#:$cell-state
#:$cell
#:$update-cell-state!
#:$set-memory!
#:bidirectional-recurrent-layer
#:$fcell
#:$bcell
#:with-keeping-state
#:concat-sequence
#:encoder-vocabulary-size
#:encoder-encode
#:encoder-decode
#:encoder-choose
#:encoder-vocabularies
#:encoder-vocabulary-map
#:encoder
#:word-encoder
#:character-encoder
#:build-character-encoder
#:$choose))
;; XXX cell state control api sucks!
(in-package :th.layers)
(defgeneric $execute (layer x &key trainp))
(defgeneric $evaluate (layer x))
(defgeneric $train-parameters (layer))
(defgeneric $keep-state! (layer statefulp &optional truncatedp))
(defgeneric $cell-state (layer))
(defgeneric $update-cell-state! (recurrent-layer h))
(defgeneric $set-memory! (cell hs))
(defclass layer () ())
(defmethod $train-parameters ((l layer)) nil)
(defmethod $keep-state! ((l layer) statefulp &optional (truncatedp T)) l)
(defmethod $cell-state ((l layer)) nil)
(defmethod $update-cell-state! ((l layer) h) l)
(defmethod $set-memory! ((l layer) hs) l)
(defmethod $parameters ((l layer)) ($train-parameters l))
(defmethod $execute ((l layer) x &key (trainp t))
(declare (ignore x trainp))
nil)
(defmethod $evaluate ((l layer) x) ($execute l x :trainp nil))
(defmethod $gd! ((l layer) &optional (learning-rate 0.01))
($gd! ($train-parameters l) learning-rate))
(defmethod $mgd! ((l layer) &optional (learning-rate 0.01) (momentum 0.9))
($mgd! ($train-parameters l) learning-rate momentum))
(defmethod $agd! ((l layer) &optional (learning-rate 0.01))
($agd! ($train-parameters l) learning-rate))
(defmethod $amgd! ((l layer) &optional (learning-rate 0.001) (β1 0.9) (β2 0.999))
($amgd! ($train-parameters l) learning-rate β1 β2))
(defmethod $rmgd! ((l layer) &optional (learning-rate 0.001) (decay-rate 0.99))
($rmgd! ($train-parameters l) learning-rate decay-rate))
(defmethod $adgd! ((l layer) &optional (learning-rate 1) (decay-rate 0.95))
($adgd! ($train-parameters l) learning-rate decay-rate))
(defmethod $rpgd! ((l layer) &optional (learning-rate 1E-2) (etas '(0.5 1.2)) (steps '(1E-6 50)))
($rpgd! ($train-parameters l) learning-rate etas steps))
(defmethod $cg! ((l layer)) ($cg! ($train-parameters l)))
(defmethod $reset! ((l layer)) ($reset! ($train-parameters l)))
(defun $save-weights (filename network)
(ensure-directories-exist (strcat filename "/"))
(loop :for p :in ($parameters network)
:for i :from 0
:for tensor = (if ($parameterp p) ($data p) p)
:do (let* ((tfn (strcat filename "/" (format nil "~A" i) ".dat"))
(f (file.disk tfn "w")))
(setf ($fbinaryp f) t)
($fwrite tensor f)
($fclose f))))
(defun $load-weights (filename network)
(loop :for p :in ($parameters network)
:for i :from 0
:do (let* ((tfn (strcat filename "/" (format nil "~A" i) ".dat"))
(f (file.disk tfn "r")))
(setf ($fbinaryp f) t)
($fread (if ($parameterp p) ($data p) p) f)
($fclose f))))
(defclass sequential-layer (layer)
((ls :initform nil)))
(defmethod $ ((l sequential-layer) location &rest others-and-default)
(declare (ignore others-and-default))
(with-slots (ls) l
($ ls location)))
(defun sequential-layer (&rest layers)
(let ((n (make-instance 'sequential-layer)))
(with-slots (ls) n
(setf ls layers))
n))
(defmethod $train-parameters ((l sequential-layer))
(with-slots (ls) l
(loop :for e :in ls
:appending ($train-parameters e))))
(defmethod $keep-state! ((l sequential-layer) statefulp &optional (truncatedp T))
(with-slots (ls) l
(loop :for e :in ls
:do ($keep-state! e statefulp truncatedp)))
l)
(defmethod $parameters ((l sequential-layer))
(with-slots (ls) l
(loop :for e :in ls
:appending ($parameters e))))
(defmethod $execute ((l sequential-layer) x &key (trainp t))
(with-slots (ls) l
(let ((r ($execute (car ls) x :trainp trainp)))
(loop :for e :in (cdr ls)
:do (let ((nr ($execute e r :trainp trainp)))
(setf r nr)))
r)))
(defclass parallel-layer (sequential-layer) ())
(defun parallel-layer (&rest layers)
(let ((n (make-instance 'parallel-layer)))
(with-slots (ls) n
(setf ls layers))
n))
(defmethod $execute ((l parallel-layer) x &key (trainp t))
(with-slots (ls) l
(mapcar (lambda (l) ($execute l x :trainp trainp)) ls)))
(defclass batch-normalization-layer (layer)
((g :initform nil)
(e :initform nil)
(rm :initform nil)
(rv :initform nil)
(sm :initform nil)
(sd :initform nil)))
(defun batch-normalization-layer (input-size)
(let ((n (make-instance 'batch-normalization-layer)))
(with-slots (g e rm rv sm sd) n
(setf g ($parameter (ones input-size))
e ($parameter (zeros input-size))
rm (zeros input-size)
rv (ones input-size)
sm (zeros input-size)
sd (zeros input-size)))
n))
(defmethod $train-parameters ((l batch-normalization-layer))
(with-slots (g e) l
(list g e)))
(defmethod $parameters ((l batch-normalization-layer))
(with-slots (g e rm rv) l
(list g e rm rv)))
(defmethod $execute ((l batch-normalization-layer) x &key (trainp t))
(with-slots (g e rm rv sm sd) l
(if (and trainp (not (eq 1 ($ndim x))) (not (eq 3 ($ndim x))) (not (eq 1 ($size x 0))))
($bn x g e rm rv sm sd)
(if (and (not (eq 1 ($ndim x))) (not (eq 3 ($ndim x))) (not (eq 1 ($size x 0))))
($bn x ($data g) ($data e) rm rv)
($bnorm x ($data g) ($data e) rm rv)))))
(defclass layer-normalization-layer (layer)
((g :initform nil)
(b :initform nil)))
(defun layer-normalization-layer (input-size)
(let ((n (make-instance 'layer-normalization-layer)))
(with-slots (g b) n
(setf g ($parameter (ones input-size))
b ($parameter (zeros input-size)))
n)))
(defmethod $train-parameters ((l layer-normalization-layer))
(with-slots (g b) l
(list g b)))
(defmethod $parameters ((l layer-normalization-layer))
(with-slots (g b) l
(list g b)))
(defmethod $execute ((l layer-normalization-layer) x &key (trainp t))
(with-slots (g b) l
(let* ((xsz ($size x))
(d0 (car xsz))
(d1 (reduce #'* (cdr xsz)))
(xv ($view (if ($parameterp x) ($data x) x) d0 d1))
(mxv ($expand ($mean xv 1) ($size xv)))
(sxv ($expand ($sd xv 1) ($size xv)))
(gv ($expand (if trainp g ($data g)) ($size xv)))
(bv ($expand (if trainp b ($data b)) ($size xv)))
(ov ($+ ($/ ($* gv ($- xv mxv)) ($+ sxv 1E-7)) bv)))
(apply #'$reshape ov xsz))))
(defun afn (activation)
(cond ((eq activation :sigmoid) #'$sigmoid)
((eq activation :tanh) #'$tanh)
((eq activation :relu) #'$relu)
((eq activation :lrelu) #'$lrelu)
((eq activation :selu) #'$selu)
((eq activation :swish) #'$swish)
((eq activation :mish) #'$mish)
((eq activation :gelu) #'$gelu)
((eq activation :celu) #'$celu)
((eq activation :softmax) #'$softmax)
((eq activation :logsoftmax) #'$logsoftmax)
((eq activation :nil) nil)
(t #'$sigmoid)))
(defun wif (weight-initializer sz as &optional factor)
(let ((w (cond ((eq weight-initializer :random-uniform) (apply #'vru (cons sz as)))
((eq weight-initializer :random-normal) (apply #'vrn (cons sz as)))
((eq weight-initializer :random-normal-truncated) (apply #'vrnt (cons sz as)))
((eq weight-initializer :xavier-uniform) (vxavier sz :uniform))
((eq weight-initializer :xavier-normal) (vxavier sz :normal))
((eq weight-initializer :he-uniform) (vhe sz :uniform))
((eq weight-initializer :he-normal) (vhe sz :normal))
((eq weight-initializer :lecun-uniform) (vlecun sz :uniform))
((eq weight-initializer :lecun-normal) (vlecun sz :normal))
((eq weight-initializer :selu-uniform) (vselu sz :uniform))
((eq weight-initializer :selu-normal) (vselu sz :normal))
(t (vru sz)))))
(when factor ($mul! ($data w) factor))
w))
(defclass affine-layer (layer)
((w :initform nil)
(b :initform nil)
(a :initform nil)
(bn :initform nil)))
(defun affine-layer (input-size output-size
&key (activation :sigmoid) (weight-initializer :he-normal)
weight-initialization
weight-factor
batch-normalization-p (biasp t))
(let ((n (make-instance 'affine-layer)))
(with-slots (w b a bn wi) n
(setf a (afn activation))
(when biasp (setf b ($parameter (zeros output-size))))
(setf w (wif weight-initializer (list input-size output-size) weight-initialization
weight-factor))
(when batch-normalization-p
(setf bn (batch-normalization-layer output-size))))
n))
(defmethod $train-parameters ((l affine-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($train-parameters bn))
(if b (list w b) (list w)))))
(defmethod $parameters ((l affine-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($parameters bn))
(if b (list w b) (list w)))))
(defmethod $execute ((l affine-layer) x &key (trainp t))
(with-slots (w b a bn) l
(if a
(if trainp
(if bn
(funcall a ($execute bn ($affine x w b)))
(funcall a ($affine x w b)))
(if bn
(funcall a ($execute bn ($affine x ($data w) (when b ($data b)))
:trainp trainp))
(funcall a ($affine x ($data w) (when b ($data b))))))
(if trainp
(if bn
($execute bn ($affine x w b) :trainp trainp)
($affine x w b))
(if bn
($execute bn ($affine x ($data w) (when b ($data b)))
:trainp trainp)
($affine x ($data w) (when b ($data b))))))))
(defclass convolution-2d-layer (layer)
((w :initform nil)
(b :initform nil)
(dw :initform 1)
(dh :initform 1)
(pw :initform 0)
(ph :initform 0)
(a :initform nil)
(bn :initform nil)))
(defun convolution-2d-output-size (l x)
(cond ((eq 4 ($ndim x))
(let* ((sz ($size x))
(nbatch ($ sz 0))
(input-channel-size ($ sz 1))
(input-height ($ sz 2))
(input-width ($ sz 3)))
(with-slots (w dw dh pw ph) l
(when (eq input-channel-size ($size w 1))
(let ((output-width (1+ (/ (+ input-width (- ($size w 3)) (* 2 pw)) dw)))
(output-height (1+ (/ (+ input-height (- ($size w 2)) (* 2 ph)) dh))))
(list nbatch ($size w 0) output-height output-width))))))
((eq 3 ($ndim x))
(let* ((sz ($size x))
(nbatch 1)
(input-channel-size ($ sz 0))
(input-height ($ sz 1))
(input-width ($ sz 2)))
(with-slots (w dw dh pw ph) l
(when (eq input-channel-size ($size w 1))
(let ((output-width (1+ (/ (+ input-width (- ($size w 3)) (* 2 pw)) dw)))
(output-height (1+ (/ (+ input-height (- ($size w 2)) (* 2 ph)) dh))))
(list nbatch ($size w 0) output-height output-width))))))))
(defun convolution-2d-layer (input-channel-size output-channel-size
filter-width filter-height
&key (stride-width 1) (stride-height 1)
(padding-width 0) (padding-height 0)
(activation :sigmoid) (weight-initializer :he-normal)
weight-initialization
weight-factor
batch-normalization-p
(biasp t))
(let ((n (make-instance 'convolution-2d-layer)))
(with-slots (w b dw dh pw ph a bn wi) n
(setf dw stride-width
dh stride-height
pw padding-width
ph padding-height)
(setf a (afn activation))
(when biasp (setf b ($parameter (zeros output-channel-size))))
(setf w (wif weight-initializer
(list output-channel-size input-channel-size filter-height filter-width)
weight-initialization weight-factor))
(when batch-normalization-p
(setf bn (batch-normalization-layer output-channel-size))))
n))
(defmethod $train-parameters ((l convolution-2d-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($train-parameters bn))
(if b (list w b) (list w)))))
(defmethod $parameters ((l convolution-2d-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($parameters bn))
(if b (list w b) (list w)))))
(defmethod $execute ((l convolution-2d-layer) x &key (trainp t))
(with-slots (w b dw dh pw ph a bn) l
(if a
(if trainp
(if bn
(funcall a ($execute bn ($conv2d x w b dw dh pw ph)))
(funcall a ($conv2d x w b dw dh pw ph)))
(if bn
(funcall a ($execute bn
($conv2d x ($data w)
(when b ($data b))
dw dh pw ph)
:trainp nil))
(funcall a ($conv2d x ($data w)
(when b ($data b))
dw dh pw ph))))
(if trainp
(if bn
($execute bn ($conv2d x w b dw dh pw ph))
($conv2d x w b dw dh pw ph))
(if bn
($execute bn
($conv2d x ($data w)
(when b ($data b))
dw dh pw ph)
:trainp nil)
($conv2d x ($data w)
(when b ($data b))
dw dh pw ph))))))
(defclass maxpool-2d-layer (layer)
((kw :initform nil)
(kh :initform nil)
(dw :initform nil)
(dh :initform nil)
(pw :initform nil)
(ph :initform nil)
(ceil-p :initform nil)))
(defun maxpool-2d-layer (pool-width pool-height
&key (stride-width 1) (stride-height 1)
(padding-width 0) (padding-height 0)
ceilp)
(let ((n (make-instance 'maxpool-2d-layer)))
(with-slots (kw kh dw dh pw ph ceil-p) n
(setf kw pool-width
kh pool-height
dw stride-width
dh stride-height
pw padding-width
ph padding-height
ceil-p ceilp))
n))
(defmethod $execute ((l maxpool-2d-layer) x &key (trainp t))
(declare (ignore trainp))
(with-slots (kw kh dw dh pw ph ceil-p) l
($maxpool2d x kw kh dw dh pw ph ceil-p)))
(defclass avgpool-2d-layer (layer)
((kw :initform nil)
(kh :initform nil)
(dw :initform nil)
(dh :initform nil)
(pw :initform nil)
(ph :initform nil)
(ceil-p :initform nil)
(count-p :initform nil)))
(defun avgpool-2d-layer (pool-width pool-height
&key (stride-width 1) (stride-height 1)
(padding-width 0) (padding-height 0)
ceilp count-pad-p)
(let ((n (make-instance 'avgpool-2d-layer)))
(with-slots (kw kh dw dh pw ph ceil-p count-p) n
(setf kw pool-width
kh pool-height
dw stride-width
dh stride-height
pw padding-width
ph padding-height
ceil-p ceilp
count-p count-pad-p))
n))
(defmethod $execute ((l avgpool-2d-layer) x &key (trainp t))
(declare (ignore trainp))
(with-slots (kw kh dw dh pw ph ceil-p count-p) l
($avgpool2d x kw kh dw dh pw ph ceil-p)))
(defclass flatten-layer (layer) ())
(defun flatten-layer () (make-instance 'flatten-layer))
(defmethod $execute ((l flatten-layer) x &key (trainp t))
(declare (ignore trainp))
(let ((sz0 ($size x 0))
(rsz (reduce #'* ($size x) :start 1)))
($reshape x sz0 rsz)))
(defclass full-convolution-2d-layer (layer)
((w :initform nil)
(b :initform nil)
(dw :initform 1)
(dh :initform 1)
(pw :initform 0)
(ph :initform 0)
(aw :initform 0)
(ah :initform 0)
(a :initform nil)
(bn :initform nil)))
(defun full-convolution-2d-output-size (l x)
(cond ((eq 4 ($ndim x))
(let* ((sz ($size x))
(nbatch ($ sz 0))
(input-channel-size ($ sz 1))
(input-height ($ sz 2))
(input-width ($ sz 3)))
(with-slots (w dw dh pw ph aw ah) l
(when (eq input-channel-size ($size w 1))
(let ((output-w (+ (* (- input-width 1) dw) (* -2 pw) ($size w 3) aw))
(output-h (+ (* (- input-height 1) dh) (* -2 ph) ($size w 2) ah)))
(list nbatch ($size w 0) output-h output-w))))))
((eq 3 ($ndim x))
(let* ((sz ($size x))
(nbatch 1)
(input-channel-size ($ sz 0))
(input-height ($ sz 1))
(input-width ($ sz 2)))
(with-slots (w dw dh pw ph aw ah) l
(when (eq input-channel-size ($size w 1))
(let ((output-w (+ (* (- input-width 1) dw) (* -2 pw) ($size w 3) aw))
(output-h (+ (* (- input-height 1) dh) (* -2 ph) ($size w 2) ah)))
(list nbatch ($size w 0) output-h output-w))))))))
(defun full-convolution-2d-layer (input-channel-size output-channel-size
filter-width filter-height
&key (stride-width 1) (stride-height 1)
(padding-width 0) (padding-height 0)
(adjust-width 0) (adjust-height 0)
(activation :sigmoid) (weight-initializer :he-normal)
weight-initialization
weight-factor
batch-normalization-p
(biasp t))
(let ((n (make-instance 'full-convolution-2d-layer)))
(with-slots (w b dw dh pw ph aw ah a bn wi) n
(setf dw stride-width
dh stride-height
pw padding-width
ph padding-height
aw adjust-width
ah adjust-height)
(setf a (afn activation))
(when biasp (setf b ($parameter (zeros output-channel-size))))
(setf w (wif weight-initializer
(list input-channel-size output-channel-size filter-height filter-width)
weight-initialization weight-factor))
(when batch-normalization-p
(setf bn (batch-normalization-layer output-channel-size))))
n))
(defmethod $train-parameters ((l full-convolution-2d-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($train-parameters bn))
(if b (list w b) (list w)))))
(defmethod $parameters ((l full-convolution-2d-layer))
(with-slots (w b bn) l
(if bn
(append (if b (list w b) (list w)) ($parameters bn))
(if b (list w b) (list w)))))
(defmethod $execute ((l full-convolution-2d-layer) x &key (trainp t))
(with-slots (w b dw dh pw ph aw ah a bn) l
(if a
(if trainp
(if bn
(funcall a ($execute bn ($dconv2d x w b dw dh pw ph aw ah)))
(funcall a ($dconv2d x w b dw dh pw ph aw ah)))
(if bn
(funcall a ($execute bn
($dconv2d x ($data w)
(when b ($data b))
dw dh pw ph aw ah)
:trainp nil))
(funcall a ($dconv2d x ($data w)
(when b ($data b))
dw dh pw ph aw ah))))
(if trainp
(if bn
($execute bn ($dconv2d x w b dw dh pw ph aw ah))
($dconv2d x w b dw dh pw ph aw ah))
(if bn
($execute bn
($dconv2d x ($data w)
(when b ($data b))
dw dh pw ph aw ah)
:trainp nil)
($dconv2d x ($data w)
(when b ($data b))
dw dh pw ph aw ah))))))
(defclass reshape-layer (layer)
((rsizes :initform nil)))
(defun reshape-layer (&rest sizes)
(let ((n (make-instance 'reshape-layer)))
(with-slots (rsizes) n
(setf rsizes sizes))
n))
(defmethod $execute ((l reshape-layer) x &key (trainp t))
(declare (ignore trainp))
(with-slots (rsizes) l
(apply #'$reshape x (cons ($size x 0) rsizes))))
(defclass functional-layer (layer)
((f :initform nil)
(args :initform nil :reader $function-arguments)))
(defun functional-layer (function)
(let ((n (make-instance 'functional-layer)))
(with-slots (f) n
(setf f function))
n))
(defmethod $execute ((l functional-layer) x &key (trainp t))
(with-slots (f args) l
(setf args x)
(if f
(cond ((listp x) (apply f (append x (list :trainp trainp))))
(t (apply f (list x :trainp trainp))))
x)))
(defclass dropout-layer (layer)
((dop :initform (tensor '(0.1)))))
(defun dropout-layer (dropout-probability)
(let ((n (make-instance 'dropout-layer)))
(with-slots (dop) n
(setf dop (tensor (list dropout-probability))))
n))
(defmethod $parameters ((l dropout-layer))
(with-slots (dop) l
(list dop)))
(defmethod $execute ((l dropout-layer) x &key (trainp t))
(with-slots (dop) l
($dropout x trainp ($ dop 0))))
(defclass dropout-cell (dropout-layer) ())
(defclass affine-cell (layer)
((wx :initform nil)
(a :initform nil)
(b :initform nil)))
(defun affine-cell (input-size output-size
&key (activation :sigmoid) (weight-initializer :he-normal)
weight-initialization weight-factor (biasp t))
(let ((n (make-instance 'affine-cell)))
(with-slots (wx b wi a) n
(setf a (afn activation))
(when biasp (setf b ($parameter (zeros output-size))))
(setf wx (wif weight-initializer (list input-size output-size)
weight-initialization weight-factor)))
n))
(defmethod $train-parameters ((l affine-cell))
(with-slots (wx b) l
(if b (list wx b) (list wx))))
(defmethod $parameters ((l affine-cell))
(with-slots (wx b) l
(if b (list wx b) (list wx))))
(defun embeddedp (x)
(or (typep x 'tensor.long)
(typep x 'tensor.int)))
(defun affine-cell-forward (x wx b)
(if (embeddedp x)
($emb x wx b)
($affine x wx b)))
(defmethod $execute ((l affine-cell) x &key (trainp t))
(with-slots (wx b a) l
(let ((b0 (when b ($data b))))
(if a
(if trainp
(funcall a (affine-cell-forward x wx b))
(funcall a (affine-cell-forward x ($data wx) b0)))
(if trainp
(affine-cell-forward x wx b)
(affine-cell-forward x ($data wx) b0))))))
(defun concat-sequence (seq)
(let ((concat-args (append seq '(0)))
(reshape-args (cons ($count seq) ($size (car seq)))))
(apply #'$reshape (cons (apply #'$concat concat-args) reshape-args))))
(defun compute-dot-product-attention (hs q)
"computes attention context from hs(TxBxD) and q(BxD)"
(let* ((d ($size q 1))
(q (-> (apply #'$reshape q (cons 1 ($size q)))
($transpose 0 1)))
(k ($transpose hs 0 1))
(kt ($transpose k 1 2))
(qkt ($div ($bmm q kt) ($sqrt d)))
(a (-> ($softmax ($reshape qkt ($size qkt 0) ($size qkt 2)))
($reshape ($size qkt 0) 1 ($size qkt 2))))
(ctx (-> ($bmm a k)
($reshape ($size k 0) ($size k 2)))))
ctx))
(defclass attention-cell (layer)
((hs :initform nil :accessor $memory)
(fn :initform #'compute-dot-product-attention :accessor $computer)))
(defun attention-cell (&key (computer :dot-product))
(let ((n (make-instance 'attention-cell)))
(with-slots (fn) n
(cond ((eq computer :dot-product) (setf fn #'compute-dot-product-attention))
(T (setf fn #'compute-dot-product-attention))))
n))
(defmethod $set-memory! ((cell attention-cell) hs)
(setf ($memory cell) hs)
cell)
(defmethod $execute ((cell attention-cell) q &key (trainp t))
(declare (ignore trainp))
(with-slots (hs fn) cell
(if (and hs fn)
(funcall fn hs q)
(error "no memory to compute"))))
(defclass rnn-cell (layer)
((wx :initform nil)
(wh :initform nil)
(a :initform nil)
(bh :initform nil)
(ph :initform nil)))
(defun rnn-cell (input-size output-size
&key (activation :tanh) (weight-initializer :he-normal)
weight-initialization weight-factor (biasp t))
(let ((n (make-instance 'rnn-cell)))
(with-slots (wx wh bh ph wi a) n
(setf a (afn activation))
(when biasp (setf bh ($parameter (zeros output-size))))
(setf wx (wif weight-initializer (list input-size output-size)
weight-initialization weight-factor))
(setf wh (wif weight-initializer (list output-size output-size)
weight-initialization weight-factor)))
n))
(defmethod $keep-state! ((l rnn-cell) statefulp &optional (truncatedp T))
(with-slots (ph) l
(when ph
(if statefulp
(if ($parameterp ph)
(if truncatedp
(setf ph ($clone ($data ph)))))
(setf ph nil)))
l))
(defmethod $cell-state ((l rnn-cell))
(with-slots (ph) l
ph))
(defmethod $update-cell-state! ((l rnn-cell) h)
(with-slots (ph) l
(setf ph h))
l)
(defmethod $train-parameters ((l rnn-cell))
(with-slots (wx wh bh) l
(if bh (list wx wh bh) (list wx wh))))
(defmethod $parameters ((l rnn-cell))
(with-slots (wx wh bh) l
(if bh (list wx wh bh) (list wx wh))))
(defun embedding-forward (xi wx ph wh b)
(let ((xp ($index wx 0 xi))
(hp ($affine ph wh b)))
($+ xp hp)))
(defun rnn-cell-forward (x wx ph wh bh)
(if (embeddedp x)
(embedding-forward x wx ph wh bh)
($affine2 x wx ph wh bh)))
(defmethod $execute ((l rnn-cell) x &key (trainp t))
(with-slots (wx wh bh ph a) l
(let ((ph0 (if ph ph (zeros ($size x 0) ($size wx 1))))
(bh0 (when bh ($data bh))))
(let ((ph1 (if a
(if trainp
(funcall a (rnn-cell-forward x wx ph0 wh bh))
(funcall a (rnn-cell-forward x ($data wx) ph0 ($data wh) bh0)))
(if trainp
(rnn-cell-forward x wx ph0 wh bh)
(rnn-cell-forward x ($data wx) ph0 ($data wh) bh0)))))
(setf ph ph1)
ph1))))
;; lstm alternative implementation - faster
(defclass lstm-cell (layer)
((wx :initform nil)
(wh :initform nil)
(bh :initform nil)
(ph :initform nil)
(pc :initform nil)))
(defun lstm-cell (input-size output-size
&key (weight-initializer :he-normal)
(weight-initialization) weight-factor (biasp t))
(let ((n (make-instance 'lstm-cell)))
(with-slots (wx wh bh) n
(when biasp
(setf bh ($parameter (zeros (* 4 output-size)))))
(setf wx (wif weight-initializer (list input-size (* 4 output-size))
weight-initialization weight-factor))
(setf wh (wif weight-initializer (list output-size (* 4 output-size))
weight-initialization weight-factor))
(when biasp
($fill! ($narrow ($data bh) 0 0 output-size) 1D0))
)
n))
(defmethod $keep-state! ((l lstm-cell) statefulp &optional (truncatedp T))
(with-slots (ph pc) l
(when (and ph pc)
(if statefulp
(if (and ($parameterp ph) ($parameterp pc))
(if truncatedp
(setf ph ($clone ($data ph))
pc ($clone ($data pc)))))
(setf ph nil
pc nil)))
l))
(defmethod $cell-state ((l lstm-cell))
(with-slots (ph pc) l
(list ph pc)))
(defmethod $update-cell-state! ((l lstm-cell) h)
(with-slots (ph pc) l
(setf ph (car h)
pc (cadr h)))
l)
(defmethod $train-parameters ((l lstm-cell))
(with-slots (wx wh bh) l
(if bh
(list wx wh bh)
(list wx wh))))
(defmethod $parameters ((l lstm-cell))
(with-slots (wx wh bh) l
(if bh
(list wx wh bh)
(list wx wh))))
(defmethod $execute ((l lstm-cell) x &key (trainp t))
(with-slots (wx wh bh ph pc fspec ispec ospec aspec) l
(let ((ph0 (if ph ph (zeros ($size x 0) ($size wh 0))))
(pc0 (if pc pc (zeros ($size x 0) ($size wh 0))))
(bh0 (when bh ($data bh)))
(szf (/ ($size wx 1) 4)))
(if trainp
(let* ((ra (rnn-cell-forward x wx ph0 wh bh))
(ft ($sigmoid ($narrow ra 1 0 szf)))
(it ($sigmoid ($narrow ra 1 szf szf)))
(ot ($sigmoid ($narrow ra 1 (* 2 szf) szf)))
(at ($tanh ($narrow ra 1 (* 3 szf) szf)))
(ct ($+ ($* ft pc0) ($* at it)))
(ht ($* ot ($tanh ct))))
(setf ph ht
pc ct)
ht)
(let* ((ra (rnn-cell-forward x ($data wx) ph0 ($data wh) bh0))
(ft ($sigmoid ($narrow ra 1 0 szf)))
(it ($sigmoid ($narrow ra 1 szf szf)))
(ot ($sigmoid ($narrow ra 1 (* 2 szf) szf)))
(at ($tanh ($narrow ra 1 (* 3 szf) szf)))
(ct ($+ ($* ft pc0) ($* at it)))
(ht ($* ot ($tanh ct))))
(setf ph ht
pc ct)
ht)))))
(defclass gru-cell (layer)
((wz :initform nil)
(uz :initform nil)
(bz :initform nil)
(wr :initform nil)
(ur :initform nil)
(br :initform nil)
(wh :initform nil)
(uh :initform nil)
(bh :initform nil)
(ph :initform nil)))
(defun gru-cell (input-size output-size
&key (weight-initializer :he-normal)
weight-initialization weight-factor (biasp t))
(let ((n (make-instance 'gru-cell)))
(with-slots (wz uz bz wr ur br wh uh bh) n
(when biasp
(setf bz ($parameter (zeros output-size))
br ($parameter ($* -1 (ones output-size)))
bh ($parameter (zeros output-size))))
(setf wz (wif weight-initializer (list input-size output-size)
weight-initialization weight-factor))
(setf uz (wif weight-initializer (list output-size output-size)
weight-initialization weight-factor))
(setf wr (wif weight-initializer (list input-size output-size)
weight-initialization weight-factor))
(setf ur (wif weight-initializer (list output-size output-size)
weight-initialization weight-factor))
(setf wh (wif weight-initializer (list input-size output-size)
weight-initialization weight-factor))
(setf uh (wif weight-initializer (list output-size output-size)
weight-initialization weight-factor)))
n))
(defmethod $keep-state! ((l gru-cell) statefulp &optional (truncatedp T))
(with-slots (ph) l
(when ph
(if statefulp
(if ($parameterp ph)
(if truncatedp
(setf ph ($clone ($data ph)))))
(setf ph nil)))
l))
(defmethod $cell-state ((l gru-cell))
(with-slots (ph) l
ph))
(defmethod $update-cell-state! ((l gru-cell) h)
(with-slots (ph) l
(setf ph h))
l)
(defmethod $train-parameters ((l gru-cell))
(with-slots (wz uz bz wr ur br wh uh bh) l
(if bz
(list wz uz bz wr ur br wh uh bh)
(list wz uz wr ur wh uh))))
(defmethod $parameters ((l gru-cell))
(with-slots (wz uz bz wr ur br wh uh bh) l
(if bz
(list wz uz bz wr ur br wh uh bh)
(list wz uz wr ur wh uh))))
(defmethod $execute ((l gru-cell) x &key (trainp t))
(with-slots (wz uz bz wr ur br wh uh bh ph) l
(let ((ph0 (if ph ph (zeros ($size x 0) ($size wz 1))))
(bz0 (when bz ($data bz)))
(br0 (when br ($data br)))
(bh0 (when bh ($data bh))))
(if trainp
(let* ((zt ($sigmoid (rnn-cell-forward x wz ph0 uz bz)))
(rt ($sigmoid (rnn-cell-forward x wr ph0 ur br)))
(ht ($+ ($* zt ph0)
($* ($- 1 zt)
($tanh (rnn-cell-forward x wh
($* rt ph0) uh bh))))))
(setf ph ht)
ht)
(let* ((zt ($sigmoid (rnn-cell-forward x ($data wz) ph0 ($data uz) bz0)))
(rt ($sigmoid (rnn-cell-forward x ($data wr) ph0 ($data ur) br0)))
(ht ($+ ($* zt ph0)
($* ($- 1 zt)
($tanh (rnn-cell-forward x ($data wh)
($* rt ph0) ($data uh) bh0))))))
(setf ph ht)
ht)))))
(defclass recurrent-layer (layer)
((stateful :initform nil)
(truncated :initform nil)
(cell :initform nil :accessor $cell)))
(defun recurrent-layer (cell &key statefulp truncatedp)
(let ((n (make-instance 'recurrent-layer))
(celli cell))
(with-slots (stateful truncated cell) n
(setf stateful statefulp)
(setf truncated truncatedp)
(setf cell celli))
n))
(defmethod $train-parameters ((l recurrent-layer))
(with-slots (cell) l
($train-parameters cell)))
(defmethod $keep-state! ((l recurrent-layer) statefulp &optional (truncatedp T))
(with-slots (stateful truncated cell) l
(setf stateful statefulp
truncated truncatedp)
($keep-state! cell statefulp truncatedp))
l)
(defmethod $parameters ((l recurrent-layer))
(with-slots (cell) l
($parameters cell)))
(defmethod $execute ((l recurrent-layer) xs &key (trainp t))
(with-slots (cell stateful truncated) l
($keep-state! cell stateful truncated)
(loop :for x :in xs
:collect ($execute cell x :trainp trainp))))
(defmethod $cell-state ((l recurrent-layer))
($cell-state ($cell l)))
(defmethod $update-cell-state! ((l recurrent-layer) h)
($update-cell-state! ($cell l) h)
l)
(defmethod $set-memory! ((l recurrent-layer) hs)
($set-memory! ($cell l) hs)
l)
(defclass bidirectional-recurrent-layer (layer)
((stateful :initform nil)
(truncated :initform nil)
(fcell :initform nil :accessor $fcell)
(bcell :initform nil :accessor $bcell)))
(defun bidirectional-recurrent-layer (fcell bcell &key statefulp truncatedp)
(let ((n (make-instance 'recurrent-layer))
(fcelli fcell)
(bcelli bcell))
(with-slots (stateful truncated fcell bcell) n
(setf stateful statefulp)
(setf truncated truncatedp)
(setf fcell fcelli)
(setf bcell bcelli))
n))
(defmethod $train-parameters ((l bidirectional-recurrent-layer))
(with-slots (fcell bcell) l
(append ($train-parameters fcell) ($train-parameters bcell))))
(defmethod $keep-state! ((l bidirectional-recurrent-layer) statefulp &optional (truncatedp T))
(with-slots (stateful truncated fcell bcell) l
(setf stateful statefulp
truncated truncatedp)
($keep-state! fcell statefulp truncatedp)
($keep-state! bcell statefulp truncatedp))
l)
(defmethod $parameters ((l bidirectional-recurrent-layer))
(with-slots (fcell bcell) l
(append ($parameters fcell) ($parameters bcell))))
(defmethod $execute ((l bidirectional-recurrent-layer) xs &key (trainp t))
(with-slots (fcell bcell stateful truncated) l
($keep-state! fcell stateful truncated)
($keep-state! bcell stateful truncated)
(let ((frs (loop :for x :in xs
:collect ($execute fcell x :trainp trainp)))
(brs (loop :for x :in (reverse xs)
:collect ($execute bcell x :trainp trainp))))
(mapcar (lambda (fr br) (list fr br)) frs brs))))
(defmethod $cell-state ((l bidirectional-recurrent-layer))
(list ($cell-state ($fcell l)) ($cell-state ($bcell l))))
(defmethod $update-cell-state! ((l bidirectional-recurrent-layer) h)
($update-cell-state! ($fcell l) ($0 h))
($update-cell-state! ($bcell l) ($1 h))
l)
(defmethod $set-memory! ((l bidirectional-recurrent-layer) hs)
($set-memory! ($fcell l) ($0 hs))
($set-memory! ($bcell l) ($1 hs))
l)
(defmacro with-keeping-state ((rnn) &body body)
`(progn
($keep-state! ,rnn T nil)
(unwind-protect
(progn ,@body)
($keep-state! ,rnn nil nil))))
(defgeneric $generate-sequence (rnn encoder seedseq n &optional temperature))
(defgeneric encoder-vocabulary-size (encoder))
(defgeneric encoder-encode (encoder sequences &key type))
(defgeneric encoder-decode (encoder matrices &key type))
(defgeneric encoder-choose (encoder probseqs &optional temperature))
(defclass encoder () ())
(defclass word-encoder (encoder)
((word-to-idx :initform #{} :reader encoder-vocabulary-map)
(idx-to-word :initform #{})
(words :initform nil :reader encoder-vocabularies)))
(defun build-word-encoder (encoder data)
(with-slots (word-to-idx idx-to-word words) encoder
(setf words nil
word-to-idx #{}
idx-to-word #{})
(let ((tid 0))
(loop :for word :in (coerce data 'list)
:do (unless ($ word-to-idx word)
(setf ($ word-to-idx word) tid
($ idx-to-word tid) word)
(push word words)
(incf tid))))
(setf words ($array (reverse words))))
encoder)
(defun word-encoder (&optional data)
(let ((n (make-instance 'word-encoder)))
(when data (build-word-encoder n data))
n))
(defmethod encoder-vocabulary-size ((encoder word-encoder))
(with-slots (idx-to-word) encoder
($count idx-to-word)))
;; sentence is the list of strings
(defmethod encoder-encode ((encoder word-encoder) sentences &key (type :index))
(cond ((eq type :index) (let ((ntime ($count ($0 sentences)))
(nbatch ($count sentences)))
(with-slots (word-to-idx) encoder
(loop :for time :from 0 :below ntime
:collect (let ((m (tensor.long (zeros nbatch))))
(loop :for sentence :in sentences
:for b :from 0
:for word = ($ sentence time)
:for idx = ($ word-to-idx word
($ word-to-idx "<unk>"))
:do (setf ($ m b) idx))
m)))))
((eq type :1-of-K) (let ((ntime ($count ($0 sentences)))
(nbatch ($count sentences))
(vocab-size (encoder-vocabulary-size encoder)))
(with-slots (word-to-idx) encoder
(loop :for time :from 0 :below ntime
:collect (let ((m (zeros nbatch vocab-size)))
(loop :for sentence :in sentences
:for b :from 0
:for word = ($ sentence time)
:for idx = ($ word-to-idx word
($ word-to-idx "<unk>"))
:do (setf ($ m b idx) 1))
m)))))))
(defmethod encoder-decode ((encoder word-encoder) matrices &key (type :index))
(cond ((eq type :index) (let ((nbatch ($size ($0 matrices) 0)))
(with-slots (idx-to-word) encoder
(loop :for b :from 0 :below nbatch
:collect (loop :for m :in matrices
:for idx = ($ m b)
:collect ($ idx-to-word idx))))))
((eq type :1-of-k) (let ((nbatch ($size ($0 matrices) 0)))
(with-slots (idx-to-word) encoder
(loop :for b :from 0 :below nbatch
:collect (loop :for m :in matrices
:for mi = ($nonzero m)
:for idx = ($ mi b 1)
:collect ($ idx-to-word idx))))))))
(defclass character-encoder (encoder)
((char-to-idx :initform #{} :reader encoder-vocabulary-map)
(idx-to-char :initform nil :reader encoder-vocabularies)))
(defun build-character-encoder (encoder data)
(with-slots (char-to-idx idx-to-char) encoder
(setf idx-to-char ($array (remove-duplicates (coerce data 'list))))
(let ((vocab-size ($count idx-to-char)))
(loop :for i :from 0 :below vocab-size
:for ch = ($ idx-to-char i)
:do (setf ($ char-to-idx ch) i))))
encoder)
(defun character-encoder (&optional data)
(let ((n (make-instance 'character-encoder)))
(when data (build-character-encoder n data))
n))
(defmethod encoder-vocabulary-size ((encoder character-encoder))
(with-slots (idx-to-char) encoder
($count idx-to-char)))
(defmethod encoder-encode ((encoder character-encoder) strings &key (type :index))
(cond ((eq type :index) (let ((ntime ($count ($0 strings)))
(nbatch ($count strings)))
(with-slots (char-to-idx) encoder
(loop :for time :from 0 :below ntime
:collect (let ((m (tensor.long (zeros nbatch))))
(loop :for str :in strings
:for b :from 0
:for ch = ($ str time)
:do (setf ($ m b) ($ char-to-idx ch)))
m)))))
((eq type :1-of-K) (let ((ntime ($count ($0 strings)))
(nbatch ($count strings))
(vocab-size (encoder-vocabulary-size encoder)))
(with-slots (char-to-idx) encoder
(loop :for time :from 0 :below ntime
:collect (let ((m (zeros nbatch vocab-size)))
(loop :for str :in strings
:for b :from 0
:for ch = ($ str time)
:do (setf ($ m b ($ char-to-idx ch)) 1))
m)))))))
(defmethod encoder-decode ((encoder character-encoder) matrices &key (type :index))
(cond ((eq type :index) (let ((nbatch ($size ($0 matrices) 0)))
(with-slots (idx-to-char) encoder
(loop :for b :from 0 :below nbatch
:collect (coerce (loop :for m :in matrices
:for idx = ($ m b)
:collect ($ idx-to-char idx))
'string)))))
((eq type :1-of-k) (let ((nbatch ($size ($0 matrices) 0)))
(with-slots (idx-to-char) encoder
(loop :for b :from 0 :below nbatch
:collect (coerce (loop :for m :in matrices
:for mi = ($nonzero m)
:for idx = ($ mi b 1)
:collect ($ idx-to-char idx))
'string)))))))
(defun $choose (probabilities &optional (temperature 1D0))
"select one of the index by their given relative probabilities"
(let* ((dprobs (if ($parameterp probabilities) ($data probabilities) probabilities))
(szprobs ($size dprobs)))
(if (>= temperature 0)
(let ((nprobs ($div dprobs temperature)))
(let ((probs ($softmax nprobs)))
($reshape! ($multinomial probs 1) (car szprobs))))
(let ((res ($argmax dprobs 1)))
($reshape! res (car szprobs))))))
(defmethod encoder-choose ((encoder character-encoder) probseqs &optional (temperature 1D0))
(encoder-decode encoder (mapcar (lambda (probs) ($choose probs temperature)) probseqs)))
(defmethod $generate-sequence ((rnn layer) (encoder character-encoder) seedstr n
&optional (temperature 1D0))
(let* ((seedps ($evaluate rnn (encoder-encode encoder (list seedstr))))
(seedstrs (encoder-choose encoder seedps temperature))
(laststrs (list (string ($last (car seedstrs)))))
(resultstr (concatenate 'string seedstr (car laststrs))))
($set-stateful rnn T)
(loop :for i :from 0 :below (1- n)
:for nextseq = (encoder-encode encoder laststrs)
:for nextoutps = ($evaluate rnn nextseq)
:for nextoutstrs = (encoder-choose encoder nextoutps temperature)
:do (progn
(setf laststrs nextoutstrs)
(setf resultstr (concatenate 'string resultstr (car nextoutstrs)))))
($set-stateful rnn nil)
resultstr))
(defmethod encoder-choose ((encoder word-encoder) probseqs &optional (temperature 1D0))
(encoder-decode encoder (mapcar (lambda (probs) ($choose probs temperature)) probseqs)))
(defmethod $generate-sequence ((rnn layer) (encoder word-encoder) seedsentence n
&optional (temperature 1D0))
(let* ((seedps ($evaluate rnn (encoder-encode encoder (list seedsentence))))
(seedsentences (encoder-choose encoder seedps temperature))
(lastsentences (list (last (car seedsentences))))
(resultsentence (append seedsentence (car lastsentences))))
($set-stateful rnn T)
(loop :for i :from 0 :below (1- n)
:for nextseq = (encoder-encode encoder lastsentences)
:for nextoutps = ($evaluate rnn nextseq)
:for nextouts = (encoder-choose encoder nextoutps temperature)
:do (progn
(setf lastsentences nextouts)
(setf resultsentence (append resultsentence (car nextouts)))))
($set-stateful rnn nil)
resultsentence))
| 48,803
|
Common Lisp
|
.lisp
| 1,133
| 31.842012
| 97
| 0.531178
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
de3b99ed9b1bcd06c8d20f6de948057cbd67631761b2a767cc31dc4ae5bd3095
| 3,173
|
[
-1
] |
3,174
|
binadd.lisp
|
chunsj_TH/examples/binary-add/binadd.lisp
|
(defpackage :binary-add
(:use #:common-lisp
#:mu
#:th))
(in-package :binary-add)
(defparameter *binary-dim* 8)
(defparameter *int2binary* #{})
(defun dec->bin (n)
(multiple-value-bind (r m) (floor n 2)
(if (= n 0) nil (append (dec->bin r) (list m)))))
(defun bin->dec (binary) (reduce (lambda (x y) (+ (* x 2) y)) binary))
;; binary to decimal and decimal to binary number
;; functions are for generating data
(prn (dec->bin 100))
(prn (bin->dec (dec->bin 123)))
;; for efficiency we will build map for dec-bin
(let* ((largest-number (round (expt 2 *binary-dim*))))
(loop :for i :from 0 :below largest-number
:for bin = (dec->bin i)
:for pad = (loop :for k :from 0 :below (- 8 ($count bin)) :collect 0)
:do (setf ($ *int2binary* i) (tensor.byte (append pad bin)))))
(defparameter *alpha* 0.1)
(defparameter *input-dim* 2)
(defparameter *hidden-dim* 16)
(defparameter *output-dim* 1)
(defparameter *iterations* 10000)
(defparameter *synapse0* ($parameter ($- ($* 2 (rnd *input-dim* *hidden-dim*)) 1)))
(defparameter *synapse1* ($parameter ($- ($* 2 (rnd *hidden-dim* *output-dim*)) 1)))
(defparameter *synapseh* ($parameter ($- ($* 2 (rnd *hidden-dim* *hidden-dim*)) 1)))
(defun binadd* (at bt &optional (ps (zeros 1 *hidden-dim*)))
(let* ((x (tensor (list (list at bt))))
(z1 ($add ($mm x *synapse0*) ($mm ps *synapseh*)))
(a1 ($sigmoid z1))
(z2 ($mm a1 *synapse1*))
(a2 ($sigmoid z2)))
(list a2 a1)))
(defun binadd (a b &optional c)
(let ((ps (zeros 1 *hidden-dim*))
(d (zeros *binary-dim*))
(losses nil)
(overall-error 0))
;; forward propagation
;; we start with the least significant bit or the right most bit
(loop :for position :from (1- *binary-dim*) :downto 0
:for res = (binadd* ($ a position) ($ b position) ps)
:for y* = ($0 res)
:for cs = ($1 res)
:for y = (when c ($transpose (tensor (list (list ($ c position))))))
:for l2e = (when c ($- y y*))
:for l = (when c ($expt l2e 2))
:do (progn
(setf ps cs)
(when c (push l losses))
(when c (incf overall-error (abs ($ ($data l2e) 0 0))))
(setf ($ d position) (round ($ ($data y*) 0 0)))))
(list (round (bin->dec ($list d))) d losses overall-error)))
(loop :for p :in (list *synapse0* *synapse1* *synapseh*)
:do ($cg! p))
(time
(loop :for j :from 0 :below *iterations*
:for half-largest-number = (round (expt 2 (1- *binary-dim*)))
:for a-int = (random half-largest-number)
:for a = ($ *int2binary* a-int)
:for b-int = (random half-largest-number)
:for b = ($ *int2binary* b-int)
:for c-int = (+ a-int b-int)
:for c = ($ *int2binary* c-int)
:do (let ((prediction (binadd a b c)))
($gd! (list *synapse0* *synapse1* *synapseh*) *alpha*)
(when (zerop (rem j 1000))
(prn "ITR:" j "ERR: " ($3 prediction))
(prn "PRD:" (tensor.byte ($1 prediction)))
(prn "TRU:" c)
(prn a-int "+" b-int "=" ($0 prediction) "/" c-int)))))
(prn (+ 11 32) (car (binadd ($ *int2binary* 11) ($ *int2binary* 32))))
(prn (+ 27 98) (car (binadd ($ *int2binary* 27) ($ *int2binary* 98))))
;; expanded code logic without functions
;; you can refer this code to see how the forward/backward propagation through time works
(time
(loop :for j :from 0 :below *iterations*
:for half-largest-number = (round (expt 2 (1- *binary-dim*)))
:for a-int = (random half-largest-number)
:for a = ($ *int2binary* a-int)
:for b-int = (random half-largest-number)
:for b = ($ *int2binary* b-int)
:for c-int = (+ a-int b-int)
:for c = ($ *int2binary* c-int)
:do (let ((d ($zero c))
(overall-error 0)
(losses nil)
(ps (zeros 1 *hidden-dim*)))
;; forward propagation
;; right most bit is least significant bit
(loop :for position :from (1- *binary-dim*) :downto 0
:for x = (tensor (list (list ($ a position) ($ b position))))
:for z1 = ($add ($mm x *synapse0*) ($mm ps *synapseh*))
:for a1 = ($sigmoid z1)
:for z2 = ($mm a1 *synapse1*)
:for a2 = ($sigmoid z2)
:for y = ($transpose (tensor (list (list ($ c position)))))
:for l2e = ($- y a2)
:for l = ($expt l2e 2)
:do (progn
(setf ps a1)
(push l losses)
(incf overall-error (abs ($ ($data l2e) 0 0)))
(setf ($ d position) (round ($ ($data a2) 0 0)))))
($gd! (list *synapse0* *synapse1* *synapseh*) *alpha*)
;;($rmgd! (list *synapse0* *synapse1* *synapseh*) *alpha*)
;;($amgd! (list *synapse0* *synapse1* *synapseh*))
(when (zerop (rem j 1000))
(prn "ITR:" j "ERR:" overall-error)
(prn "PRD:" d)
(prn "TRU:" c)
(prn a-int "+" b-int "=" (bin->dec ($list d)) "/" c-int)))))
| 5,283
|
Common Lisp
|
.lisp
| 115
| 36.582609
| 89
| 0.520373
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
dc35f8daec86a591aa58c0c41d6a42f3a8848fe56c0e29c443c1006f76d4381e
| 3,174
|
[
-1
] |
3,175
|
squeezenet11.lisp
|
chunsj_TH/examples/pretrained/squeezenet11.lisp
|
(defpackage :squeezenet11-example
(:use #:common-lisp
#:mu
#:th
#:th.m.squeezenet11
#:th.m.imagenet
#:th.image))
(in-package :squeezenet11-example)
;; load weights - takes some time
(defparameter *squeezenet11-weights* (read-squeezenet11-weights))
(defparameter *squeezenet11-function* (squeezenet11 *squeezenet11-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let ((squeezenet11-result (funcall *squeezenet11-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches squeezenet11-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((squeezenet11-result (funcall *squeezenet11-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches squeezenet11-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((squeezenet11-result (funcall *squeezenet11-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches squeezenet11-result))))
(setf *squeezenet11-weights* nil)
(setf *squeezenet11-function* nil)
(gcf)
| 1,256
|
Common Lisp
|
.lisp
| 27
| 42.296296
| 86
| 0.701554
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
5dc658d1f1dd19403c38d0e24acc4c290c8892d198af748b920dd12681886d8d
| 3,175
|
[
-1
] |
3,176
|
vgg16.lisp
|
chunsj_TH/examples/pretrained/vgg16.lisp
|
(defpackage :vgg16-example
(:use #:common-lisp
#:mu
#:th
#:th.m.vgg16
#:th.m.imagenet
#:th.image))
(in-package :vgg16-example)
;; load weights - takes some time
(defparameter *vgg16-weights* (read-vgg16-weights))
(defparameter *vgg16-function* (vgg16 :all *vgg16-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let ((vgg16-result (funcall *vgg16-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg16-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((vgg16-result (funcall *vgg16-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg16-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((vgg16-result (funcall *vgg16-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg16-result))))
(setf *vgg16-weights* nil)
(setf *vgg16-function* nil)
(gcf)
| 1,128
|
Common Lisp
|
.lisp
| 27
| 37.555556
| 86
| 0.66484
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
7eb750aa92c6f212622eedad3957f69e0e9777a35553fcea80e553b522ee598e
| 3,176
|
[
-1
] |
3,177
|
resnet101.lisp
|
chunsj_TH/examples/pretrained/resnet101.lisp
|
(defpackage :resnet101-example
(:use #:common-lisp
#:mu
#:th
#:th.m.resnet101
#:th.m.imagenet
#:th.image))
(in-package :resnet101-example)
;; load weights - takes some time
(defparameter *resnet101-weights* (read-resnet101-weights))
(defparameter *resnet101-function* (resnet101 :all *resnet101-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let* ((resnet101-result (funcall *resnet101-function* x))
(max-val-idx ($max resnet101-result 1))
(category-val ($ (car max-val-idx) 0 0))
(category-idx ($ (cadr max-val-idx) 0 0)))
(prn "SOFTMAX:" category-val)
(prn "CATEGORY INDEX:" category-idx)
(prn "CATEGORY DESCRIPTION:" ($ (imagenet-categories) category-idx))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet101-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet101-result (funcall *resnet101-function* x))
(max-val-idx ($max resnet101-result 1))
(category-val ($ (car max-val-idx) 0 0))
(category-idx ($ (cadr max-val-idx) 0 0)))
(prn "SOFTMAX:" category-val)
(prn "CATEGORY INDEX:" category-idx)
(prn "CATEGORY DESCRIPTION:" ($ (imagenet-categories) category-idx))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet101-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet101-result (funcall *resnet101-function* x))
(max-val-idx ($max resnet101-result 1))
(category-val ($ (car max-val-idx) 0 0))
(category-idx ($ (cadr max-val-idx) 0 0)))
(prn "SOFTMAX:" category-val)
(prn "CATEGORY INDEX:" category-idx)
(prn "CATEGORY DESCRIPTION:" ($ (imagenet-categories) category-idx))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet101-result))))
(setf *resnet101-weights* nil)
(setf *resnet101-function* nil)
(gcf)
| 2,101
|
Common Lisp
|
.lisp
| 45
| 41.155556
| 86
| 0.652683
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
41049daab41fe2f6454f28ce34e2d31c7b25a9da15c1900548d2e5dcd10a1e67
| 3,177
|
[
-1
] |
3,178
|
vgg19.lisp
|
chunsj_TH/examples/pretrained/vgg19.lisp
|
(defpackage :vgg19-example
(:use #:common-lisp
#:mu
#:th
#:th.m.vgg19
#:th.m.imagenet
#:th.image))
(in-package :vgg19-example)
;; load weights - takes some time
(defparameter *vgg19-weights* (read-vgg19-weights))
(defparameter *vgg19-function* (vgg19 :all *vgg19-weights*))
;; cat categorizing - input should be 3x224x224 BGR image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(bgr (imagenet-input rgb)))
(let ((vgg19-result (funcall *vgg19-function* bgr)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg19-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(bgr (imagenet-input rgb)))
(let ((vgg19-result (funcall *vgg19-function* bgr)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg19-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(bgr (imagenet-input rgb)))
(let ((vgg19-result (funcall *vgg19-function* bgr)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches vgg19-result))))
(setf *vgg19-weights* nil)
(setf *vgg19-function* nil)
(gcf)
| 1,140
|
Common Lisp
|
.lisp
| 27
| 38
| 86
| 0.668473
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
765bf038ca7e24757b8c9768e965286859e51780ac8079de9eed09598f8e4a79
| 3,178
|
[
-1
] |
3,179
|
resnet50.lisp
|
chunsj_TH/examples/pretrained/resnet50.lisp
|
(defpackage :resnet50-example
(:use #:common-lisp
#:mu
#:th
#:th.m.resnet50
#:th.m.imagenet
#:th.image))
(in-package :resnet50-example)
;; load weights - takes some time
(defparameter *resnet50-weights* (read-resnet50-weights))
(defparameter *resnet50-function* (resnet50 :all *resnet50-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let* ((resnet50-result (funcall *resnet50-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet50-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet50-result (funcall *resnet50-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet50-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet50-result (funcall *resnet50-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet50-result))))
(setf *resnet50-weights* nil)
(setf *resnet50-function* nil)
(gcf)
| 1,188
|
Common Lisp
|
.lisp
| 27
| 39.777778
| 86
| 0.679654
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
edfbc15756f77591d980b5b54456f8d4d93bb672ceda4637a738aec07d5ebfd7
| 3,179
|
[
-1
] |
3,180
|
densenet161.lisp
|
chunsj_TH/examples/pretrained/densenet161.lisp
|
(defpackage :densenet161-example
(:use #:common-lisp
#:mu
#:th
#:th.m.densenet161
#:th.m.imagenet
#:th.image))
(in-package :densenet161-example)
;; load weights - takes some time
(defparameter *densenet161-weights* (read-densenet161-weights))
(defparameter *densenet161-function* (densenet161 :all *densenet161-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let ((densenet161-result (funcall *densenet161-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches densenet161-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((densenet161-result (funcall *densenet161-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches densenet161-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let ((densenet161-result (funcall *densenet161-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches densenet161-result))))
(setf *densenet161-weights* nil)
(setf *densenet161-function* nil)
(gcf)
| 1,242
|
Common Lisp
|
.lisp
| 27
| 41.777778
| 86
| 0.696443
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
77f81c238ca86b77248573ff96d305186996d0a2a52ab552146d5d7f23ab2c61
| 3,180
|
[
-1
] |
3,181
|
fcn.lisp
|
chunsj_TH/examples/pretrained/fcn.lisp
|
(defpackage :fcn-models
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.m.imagenet
#:th.m.vgg16
#:th.m.vgg19
#:th.m.resnet50
#:th.m.densenet161
#:th.m.squeezenet11))
(in-package :fcn-models)
(defun cats (f)
(let* ((rgb1 (tensor-from-png-file "data/cat.vgg16.png"))
(rgb2 (tensor-from-jpeg-file "data/cat.vgg16.jpg"))
(x1 (imagenet-input rgb1 t))
(x2 (imagenet-input rgb2 t))
(m1 (funcall f x1))
(m2 (funcall f x2)))
(prn (imagenet-top5-matches m1))
(prn (imagenet-top5-matches m2))))
(let* ((weights (read-vgg16-weights))
(f (vgg16fcn weights)))
(cats f)
(gcf))
(let* ((weights (read-vgg19-weights))
(f (vgg19fcn weights)))
(cats f)
(gcf))
(let* ((weights (read-resnet50-weights))
(f (resnet50fcn weights)))
(cats f)
(gcf))
(let* ((weights (read-densenet161-weights))
(f (densenet161fcn weights)))
(cats f)
(gcf))
(let* ((weights (read-squeezenet11-weights))
(f (squeezenet11fcn weights)))
(cats f)
(gcf))
| 1,095
|
Common Lisp
|
.lisp
| 41
| 21.04878
| 60
| 0.580707
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
ff6b31681cb7ab3abe8813e6eb0fdad195bf1979bdae34e50d8d2a3bc623fbfd
| 3,181
|
[
-1
] |
3,182
|
resnet152.lisp
|
chunsj_TH/examples/pretrained/resnet152.lisp
|
(defpackage :resnet152-example
(:use #:common-lisp
#:mu
#:th
#:th.m.resnet152
#:th.m.imagenet
#:th.image))
(in-package :resnet152-example)
;; load weights - takes some time
(defparameter *resnet152-weights* (read-resnet152-weights))
(defparameter *resnet152-function* (resnet152 :all *resnet152-weights*))
;; cat categorizing - input should be 3x224x224 RGB image
(let* ((rgb (tensor-from-png-file "data/cat.vgg16.png"))
(x (imagenet-input rgb)))
(let* ((resnet152-result (funcall *resnet152-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet152-result))))
(let* ((rgb (tensor-from-jpeg-file "data/cat.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet152-result (funcall *resnet152-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet152-result))))
(let* ((rgb (tensor-from-jpeg-file "data/dog.vgg16.jpg" :resize-dimension '(224 224)))
(x (imagenet-input rgb)))
(let* ((resnet152-result (funcall *resnet152-function* x)))
(prn "TOP-5 MATCHES:" (imagenet-top5-matches resnet152-result))))
(setf *resnet152-weights* nil)
(setf *resnet152-function* nil)
(gcf)
| 1,207
|
Common Lisp
|
.lisp
| 27
| 40.481481
| 86
| 0.684838
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
746df07d65bbab658de80fdb5a8bfa290ae45fb6ceddb9b53de26e97b3c4ce40
| 3,182
|
[
-1
] |
3,183
|
rbm.lisp
|
chunsj_TH/examples/etc/rbm.lisp
|
;; from
;; https://github.com/odie2630463/Restricted-Boltzmann-Machines-in-pytorch
(defpackage :rbm
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :rbm)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *batch-size* 60)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(defparameter *n-vis* 784)
(defparameter *n-hin* 500)
(defparameter *rbm* (parameters))
(defparameter *w* ($push *rbm* ($* 1E-2 (rndn *n-vis* *n-hin*))))
(defparameter *vb* ($push *rbm* (zeros *n-vis*)))
(defparameter *hb* ($push *rbm* (zeros *n-hin*)))
(defparameter *k* 1)
(defun sample-from-p (p)
($relu ($sign ($- p (apply #'rnd ($size p))))))
(defun v-to-h (v)
(let* ((ah ($affine v *w* *hb*))
(ph ($sigmoid ah)))
ph))
(defun h-to-v (h)
(let* ((av ($affine h ($transpose *w*) *vb*))
(pv ($sigmoid av)))
pv))
(defun run (v)
(let* ((vh (v-to-h v))
(h (sample-from-p vh))
(vr nil))
(loop :for i :from 0 :below *k*
:for pv = (h-to-v h)
:for cv = (sample-from-p pv)
:for ph = (v-to-h cv)
:for ch = (sample-from-p ph)
:do (setf h ch
vr cv))
vr))
(defun free-energy (v)
(let* ((vbias ($mv v *vb*))
(wxb ($affine v *w* *hb*))
(hidden ($sum ($log ($+ ($exp wxb) 1)) 1)))
($mean ($- ($neg hidden) vbias))))
(defun opt! () ($gd! *rbm* 0.1))
(defun mean (vs) (* 1D0 (/ (reduce #'+ vs) ($count vs))))
(defparameter *epoch* 10)
($cg! *rbm*)
(loop :for epoch :from 1 :to *epoch*
:for loss = nil
:do (progn
($cg! *rbm*)
(loop :for input :in *mnist-train-image-batches*
:for sample = ($bernoulli input input)
:for v = sample
:for v1 = (run v)
:for l = ($- (free-energy v) (free-energy v1))
:do (progn
(push ($data l) loss)
(opt!)))
(prn epoch (mean loss))))
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun outpng (data fname &optional (w 28) (h 28))
(let ((img (opticl:make-8-bit-gray-image w h))
(d ($reshape data w h)))
(loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img i j) (round (* 255 ($ d i j)))))))
(opticl:write-png-file (format nil "~A/~A" *output* fname) img)))
;; randomly selects an input and emits corresponding outputs
(let* ((input (car *mnist-train-image-batches*))
(sample ($bernoulli input input))
(v sample)
(v1 (run v))
(idx (random ($size input 0))))
(outpng ($index input 0 idx) "input.png")
(outpng ($index v 0 idx) "outv.png")
(outpng ($index ($data v1) 0 idx) "outv1.png"))
;; clear resources
(setf *mnist* nil)
(setf *mnist-train-image-batches* nil)
(gcf)
| 3,402
|
Common Lisp
|
.lisp
| 94
| 29.329787
| 89
| 0.542935
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
536ab7af33069e06138f611fa1e1ae672e89fe0ee0ac5fc5c40a3e0c1913b932
| 3,183
|
[
-1
] |
3,184
|
sentiment.lisp
|
chunsj_TH/examples/etc/sentiment.lisp
|
(defpackage :sentiment-example
(:use #:common-lisp
#:mu
#:th
#:th.db.imdb))
(in-package :sentiment-example)
;; read imdb review data (from udacity course)
(defparameter *imdb* (read-imdb-data2))
;; each line of imdb reviews data is a review corresponding to each label in labels.
;; we need to read each review and convert them as a unique collection of words
(defun process-review (review)
(remove-duplicates (->> (split #\space review)
(mapcar (lambda (w)
(cl-ppcre:regex-replace-all
"[^a-z0-9A-Z]"
(string-downcase w)
"")))
(remove-if-not (lambda (w) (> ($count w) 0))))
:test #'equal))
(defparameter *reviews* (->> ($ *imdb* :reviews)
(mapcar #'process-review)))
(defparameter *labels* (->> ($ *imdb* :labels)
(mapcar (lambda (s) (if (equal s "positive") 1 0)))))
;; we need to build word-to-index map
(defparameter *words* (-> (remove-duplicates (apply #'$concat *reviews*) :test #'equal)
(coerce 'vector)))
(defparameter *w2i* (let ((h (make-hash-table :test 'equal :size ($count *words*))))
(loop :for i :from 0 :below ($count *words*)
:for w = ($ *words* i)
:do (setf ($ h w) i))
h))
;; our real dataset will be the indices corresponding to each words
(defun review-to-indices (review-words)
(sort (remove-duplicates (->> review-words
(mapcar (lambda (w) ($ *w2i* w)))
(remove-if (lambda (w) (null w))))
:test #'equal)
#'<))
;; prepare dataset for training
(defparameter *train-dataset* (mapcar #'review-to-indices
(subseq *reviews* 0 (- ($count *reviews*) 1000))))
(defparameter *train-targets* (tensor (subseq *labels* 0 (- ($count *labels*) 1000))))
;; for testing
(defparameter *test-dataset* (mapcar #'review-to-indices
(subseq *reviews* (- ($count *reviews*) 1000))))
(defparameter *test-targets* (tensor (subseq *labels* (- ($count *labels*) 1000))))
;; build network
(defparameter *alpha* 0.01)
(defparameter *iterations* 2)
(defparameter *hidden-size* 100)
(defparameter *w01* ($parameter ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1)))
(defparameter *w12* ($parameter ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1)))
;; for easier reset
(defun reset-weights ()
(setf *w01* ($parameter ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1)))
(setf *w12* ($parameter ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1))))
;; prediction
(defun predict-sentiment (x)
(-> x
($wimb *w01*)
($sigmoid)
($dot *w12*)
($sigmoid)))
;; test performance
(defun print-test-perf ()
(let ((total 0)
(correct 0))
(loop :for i :from 0 :below (min 1000 ($count *test-dataset*))
:for x = ($ *test-dataset* i)
:for y = ($ *test-targets* i)
:do (let ((s ($data (predict-sentiment x))))
(incf total)
(when (< (abs (- s y)) 0.5)
(incf correct))))
(prn "=>" total correct)))
(gcf)
;; train using autograd, however, this is very slow compared to direct implementation
(reset-weights)
(time
(loop :for iter :from 1 :to *iterations*
:do (let ((total 0)
(correct 0))
(loop :for i :from 0 :below ($count *train-dataset*)
:for x :in *train-dataset*
:for y = ($ *train-targets* i)
:for y* = (predict-sentiment x)
:for d = ($sub y* y)
:for er = ($dot d d)
:do (progn
($gs! er 1)
($adgd! (list *w01* *w12*))
(incf total)
(when (< (abs ($data d)) 0.5)
(incf correct))
(when (zerop (rem i 100))
(prn iter total correct))))
(when (zerop (rem iter 1))
(print-test-perf)))))
;; direct implementation without using autodiff, faster than above
;; (XXX, there're differences in backpropagation in this code and the book has no explanation)
(defparameter *w01* ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1))
(defparameter *w12* ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1))
(defun reset-weights ()
(setf *w01* ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1))
(setf *w12* ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1)))
;; prediction utility function
(defun predict-sentiment (x)
(let* ((w01 ($index *w01* 0 x))
(l1 (-> ($sum w01 0)
($sigmoid!)))
(l2 (-> ($dot l1 *w12*)
($sigmoid!))))
l2))
;; print test stats
(defun print-test-perf ()
(let ((total 0)
(correct 0))
(loop :for i :from 0 :below (min 1000 ($count *test-dataset*))
:for x = ($ *test-dataset* i)
:for y = ($ *test-targets* i)
:do (let ((s (predict-sentiment x)))
(incf total)
(when (< (abs (- s y)) 0.5)
(incf correct))))
(prn "=>" total correct)))
(defun train (&optional (niter *iterations*))
(loop :for iter :from 1 :to niter
:do (let ((total 0)
(correct 0))
(loop :for i :from 0 :below ($count *train-dataset*)
:for x :in *train-dataset*
:for y = ($ *train-targets* i)
:for w01 = ($index *w01* 0 x)
:for l1 = (-> ($sum w01 0)
($sigmoid))
:for l2 = (-> ($dot l1 *w12*)
($sigmoid))
:for dl2 = ($sub l2 y)
:for dl1 = ($* dl2 ($transpose *w12*))
:do (let ((d1 ($mul! dl1 *alpha*))
(d2 ($mul! l1 (* dl2 *alpha*))))
(setf ($index *w01* 0 x)
($sub! w01 ($expand! d1 ($size w01))))
($sub! *w12* d2)
(incf total)
(when (< (abs dl2) 0.5)
(incf correct))))
(when (zerop (rem iter 1))
(prn iter total correct)
(print-test-perf)))))
;; execute training
(reset-weights)
(train)
;; personal test to check the network really works
(let* ((my-review "this so called franchise movie of avengers is great master piece. i've enjoyed it very much and my kids love this one as well. though my wife generally does not like this kind of genre, she said this one is better than others.")
(review (process-review my-review))
(x (review-to-indices review)))
(print x)
(print (predict-sentiment x)))
(let* ((my-review "this movie is just a political propaganda, it has neither entertainment or message. i just regret my spending of precious time on this one.")
(review (process-review my-review))
(x (review-to-indices review)))
(print x)
(print (predict-sentiment x)))
;; what hidden layer learns
(defun similar (word)
(let ((target-index ($ *w2i* word)))
(when target-index
(let ((weight-target ($ *w01* target-index))
(scores nil))
(loop :for w :across *words*
:for weight = ($ *w01* ($ *w2i* w))
:for difference = ($sub weight weight-target)
:for wdiff = ($dot difference difference)
:do (let ((score (sqrt (if ($parameterp wdiff) ($data wdiff) wdiff))))
(push (cons w score) scores)))
(subseq (sort scores (lambda (a b) (< (cdr a) (cdr b)))) 0 (min 10 ($count scores)))))))
(prn (similar "beautiful"))
(prn (similar "terrible"))
| 8,083
|
Common Lisp
|
.lisp
| 179
| 33.312849
| 247
| 0.50146
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
9fe546e14c2e6dda10f50be10afcd3ab4d893924e5be4771b7d134549fc310fa
| 3,184
|
[
-1
] |
3,185
|
hmm.lisp
|
chunsj_TH/examples/etc/hmm.lisp
|
(defpackage :hmm-simple
(:use #:common-lisp
#:mu
#:th))
(in-package :hmm-simple)
(defclass hmm ()
((n :accessor state-counts)
(p0 :accessor initial-state-probabilities)
(tm :accessor transition-matrix)
(em :accessor emission-matrix)))
(defun get-emission (hmm iobs)
(let ((nr ($size (emission-matrix hmm) 0)))
($ (emission-matrix hmm) (list 0 nr) (list iobs 1))))
(defun forward-init (hmm iobs)
(let ((pobs (get-emission hmm iobs)))
($* (initial-state-probabilities hmm) pobs)))
(defun forward-step (hmm iobs fwd)
(let ((transitions ($@ fwd ($transpose (get-emission hmm iobs)))))
(let ((weighted-transitions ($* transitions (transition-matrix hmm))))
(let ((nfwd ($sum weighted-transitions 0)))
(apply #'$reshape nfwd ($size fwd))))))
(defun decode-step (hmm iobs viterbi)
(let ((transitions ($@ viterbi ($transpose (get-emission hmm iobs)))))
(let ((weighted-transitions ($* transitions (transition-matrix hmm))))
(let ((nviterbi (car ($max weighted-transitions 0))))
(apply #'$reshape nviterbi ($size viterbi))))))
(defun backpt-step (hmm viterbi)
(let ((back-transitions ($@ viterbi (ones 1 (state-counts hmm)))))
(let ((weighted-back-transitions ($* back-transitions (transition-matrix hmm))))
(let ((rmax ($max weighted-back-transitions 0)))
(cadr rmax)))))
(defun forward-algorithm (hmm observations)
(let ((fwd (forward-init hmm (car observations))))
(loop :for iobs :in (cdr observations)
:for nfwd = (forward-step hmm iobs fwd)
:do (setf fwd nfwd))
($sum fwd)))
(defun viterbi-decode (hmm observations)
(let ((viterbi (forward-init hmm (car observations)))
(backpts (tensor.long ($* (ones (state-counts hmm) ($count observations)) -1))))
(loop :for iobs :in (cdr observations)
:for i :from 1
:for nviterbi = (decode-step hmm iobs viterbi)
:for backpt = (backpt-step hmm viterbi)
:do (setf viterbi nviterbi
($ backpts (list 0 ($size backpts 0)) (list i 1)) backpt))
(let ((tokens (list ($ (cadr ($max viterbi 0)) 0 0))))
(loop :for i :from (1- ($count observations)) :downto 1
:for lt = (car tokens)
:for bt = ($ backpts lt i)
:do (push bt tokens))
tokens)))
(defparameter *hmm* (make-instance 'hmm))
(defparameter *initial-state-probabilities* (tensor '((0.6) (0.4))))
(defparameter *transition-matrix* (tensor '((0.7 0.3) (0.4 0.6))))
(defparameter *emission-matrix* (tensor '((0.1 0.4 0.5) (0.6 0.3 0.1))))
(defparameter *observations* '(0 1 1 2 1))
(setf (state-counts *hmm*) ($count *initial-state-probabilities*))
(setf (initial-state-probabilities *hmm*) *initial-state-probabilities*)
(setf (transition-matrix *hmm*) *transition-matrix*)
(setf (emission-matrix *hmm*) *emission-matrix*)
(forward-algorithm *hmm* *observations*)
(viterbi-decode *hmm* *observations*)
| 2,953
|
Common Lisp
|
.lisp
| 63
| 41.587302
| 88
| 0.643379
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
2741504b13d97381f31f403d0c26ade5589ba3cdc30461af24292453d2be8b66
| 3,185
|
[
-1
] |
3,186
|
chal.lisp
|
chunsj_TH/examples/pp/chal.lisp
|
(defpackage :challenger-accident
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :challenger-accident)
(defparameter *data* (->> (slurp "./data/challenger.csv")
(cdr)
(mapcar (lambda (line)
(let ((ss (split #\, line)))
(list (parse-integer ($1 ss))
(parse-integer ($2 ss) :junk-allowed T)))))
(filter (lambda (rec)
(and (car rec) (cadr rec))))))
(defparameter *temperature* (tensor (mapcar #'car *data*)))
(defparameter *failure* (tensor (mapcar #'cadr *data*)))
(defun p (temperature alpha beta)
($sigmoid ($neg ($add ($mul temperature beta) alpha))))
(defun posterior (alpha beta)
(let ((prior-alpha (score/normal alpha 0 1000.0))
(prior-beta (score/normal beta 0 1000.0)))
(when (and prior-alpha prior-beta)
(let ((l (score/bernoulli *failure* (p *temperature* alpha beta))))
(when l
($+ prior-alpha prior-beta l))))))
(let ((traces (mcmc/mh '(0.0 0.0) #'posterior :type :sc)))
(prn traces))
;; though with above posterior function, my code emits proper results.
;; however, the book, Bayesian Methods for Hackers uses fitting first.
;; and with thr fitting result as the starting point, samples again.
;; MAP - to fit alpha beta properly
(prn (map/fit #'posterior '(0.0 0.0)))
(defun posterior (alpha beta)
(let ((prior-alpha (score/normal alpha -15.05 100.0))
(prior-beta (score/normal beta 0.23 1.0)))
(when (and prior-alpha prior-beta)
(let ((l (score/bernoulli *failure* (p *temperature* alpha beta))))
(when l
($+ prior-alpha prior-beta l))))))
(let ((traces (mcmc/mh '(0.0 0.0) #'posterior :type :sc)))
(prn traces))
| 1,881
|
Common Lisp
|
.lisp
| 41
| 36.341463
| 87
| 0.565574
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
d018e465c4359dd321ae5548984b524c5ed792a44bdcf7ff508ae5f2664b5663
| 3,186
|
[
-1
] |
3,187
|
em-exam.lisp
|
chunsj_TH/examples/pp/em-exam.lisp
|
(defpackage :em-example
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :em-example)
(defparameter *ts* nil)
(defun p (p1 p2 &optional (m 0.5))
(if (< (random 1.0) m)
(max p1 p2)
(min p1 p2)))
;;
;; PROBLEM 1.
;;
;; we have a fair coin. flip it and if it shows head, then sample from a binomial
;; distribution with p = p1 = 0.8, if tail, sample from another binomial distribution
;; with p = p2 = 0.45.
;; the sampling shows the data of 5 9 8 4 7.
;;
;; if we do not know p1 and p2, then how can we estimate them from the data?
;; this is the problem for explaining EM algorithm, but here, i'd like to estimate
;; them using MCMC. (to know whether it is possible)
;;
(defparameter *data* (tensor '(5 9 8 4 7)))
(defun posterior (p1 p2)
(let ((prior-p1 (score/uniform p1 0.0 1.0))
(prior-p2 (score/uniform p2 0.0 1.0)))
(when (and (> p1 p2) prior-p1 prior-p2)
(let ((l (loop :for i :from 0 :below ($count *data*)
:for p = (p p1 p2)
:summing (score/binomial ($ *data* i) p 10))))
($+ prior-p1 prior-p2 l)))))
(let ((traces (mcmc/mh '(0.51 0.5) #'posterior)))
(setf *ts* traces)
(loop :for trace :in traces
:do (prn trace (trace/hpd trace))))
;;
;; PROBLEM 2.
;;
;; now the coin is not fair, its bias is m = 0.6, which is another unknown.
;; estimate p1, p2 and m.
(defparameter *data2* (tensor '(10 4 3 7 8)))
(defun posterior2 (p1 p2 m)
(let ((prior-p1 (score/uniform p1 0.0 1.0))
(prior-p2 (score/uniform p2 0.0 1.0))
(prior-m (score/uniform m 0.0 1.0)))
(when (and (> p1 p2) prior-p1 prior-p2 prior-m)
(let ((l (loop :for i :from 0 :below ($count *data2*)
:for p = (p p1 p2 m)
:summing (score/binomial ($ *data2* i) p 10))))
($+ prior-p1 prior-p2 prior-m l)))))
(let ((traces (mcmc/mh '(0.51 0.5 0.5) #'posterior2)))
(setf *ts* traces)
(loop :for trace :in traces
:do (prn trace (trace/hpd trace))))
(trace/values ($2 *ts*))
| 2,055
|
Common Lisp
|
.lisp
| 56
| 31.696429
| 85
| 0.585808
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
8450141f0629ec6fff67918854eed7d5e919e265ea9c17d651fbb6731e5be11e
| 3,187
|
[
-1
] |
3,188
|
mixture.lisp
|
chunsj_TH/examples/pp/mixture.lisp
|
(defpackage :mixture
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :mixture)
(defparameter *data* (->> (slurp "data/mixture.csv")
(mapcar (lambda (f) (parse-float f)))
(tensor)))
(defun posterior (p s0 s1 c0 c1)
(let ((p-prior (score/uniform p 0.0 1.0))
(s0-prior (score/uniform s0 0.0 100.0))
(s1-prior (score/uniform s1 0.0 100.0))
(c0-prior (score/gaussian c0 120.0 10.0))
(c1-prior (score/gaussian c1 190.0 10.0)))
(when (and p-prior s0-prior s1-prior c0-prior c1-prior)
(let ((assignments (sample/categorical (tensor (list p (- 1 p))) ($count *data*))))
(let ((sd (tensor (loop :for i :from 0 :below ($count assignments)
:collect (if (zerop ($ assignments i))
s0
s1))))
(mean (tensor (loop :for i :from 0 :below ($count assignments)
:collect (if (zerop ($ assignments i))
c0
c1)))))
(let ((likelihood (score/normal *data* mean sd)))
(when likelihood
($+ p-prior s0-prior s1-prior c0-prior c1-prior likelihood))))))))
(let ((traces (mcmc/mh '(0.5 50.0 50.0 120.0 190.0) #'posterior)))
(loop :for trace :in traces
:do (prn trace (trace/hpd trace))))
(map/fit #'posterior '(0.5 50.0 50.0 120.0 190.0))
| 1,555
|
Common Lisp
|
.lisp
| 32
| 33.6875
| 89
| 0.480237
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
c6421b24307ed0e8953392318eb32fcd5427b3015c16732848674af9c5924ca9
| 3,188
|
[
-1
] |
3,189
|
bin.lisp
|
chunsj_TH/examples/pp/bin.lisp
|
(defpackage :infer-binomial
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :infer-binomial)
(defvar *flips* (tensor '(1 1 1 1 1 1 1 1 1 1 1 0 0 0)))
(defun binomial-posterior (theta)
(let ((prior-theta (score/beta theta 1 1)))
(when prior-theta
(let ((likelihood-flips (score/bernoulli *flips* theta)))
(when likelihood-flips
($+ prior-theta likelihood-flips))))))
(let ((traces (mcmc/mh '(0.5) #'binomial-posterior)))
(prn traces))
| 500
|
Common Lisp
|
.lisp
| 15
| 28.2
| 63
| 0.615385
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
630b62cc3ed898b2cea254f064fad40de53e64752c30e7f72a91348d22ef0d78
| 3,189
|
[
-1
] |
3,190
|
pp-sms.lisp
|
chunsj_TH/examples/pp/pp-sms.lisp
|
(defpackage :pp-sms
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :pp-sms)
(defparameter *sms* (->> (slurp "./data/sms.txt")
(mapcar #'parse-float)
(mapcar #'round)
(tensor)))
(defparameter *mean* ($mean *sms*))
(defparameter *rate* (/ 1D0 *mean*))
(defun sms-posterior (switch-point early-mean late-mean)
(let ((data *sms*))
(let ((prior-switch-point (score/discrete-uniform switch-point 1 (- ($count data) 2)))
(prior-early-mean (score/exponential early-mean *rate*))
(prior-late-mean (score/exponential late-mean *rate*)))
(when (and prior-switch-point
prior-early-mean
prior-late-mean)
(let ((rate (-> ($one data)
($fill! early-mean))))
(setf ($slice rate switch-point) late-mean)
(let ((likelihood-mean (score/poisson data rate)))
(when likelihood-mean
($+ prior-switch-point
prior-early-mean
prior-late-mean
likelihood-mean))))))))
(defun sms-posterior2 (switch-point delta early-mean mid-mean late-mean)
(let ((data *sms*)
(n ($count *sms*))
(sw2 (+ switch-point delta)))
(when (< sw2 (- n 2))
(let ((prior-switch-point1 (score/discrete-uniform switch-point 20 60))
(prior-switch-point2 (score/discrete-uniform delta 2 (- n sw2 2)))
(prior-early-mean (score/exponential early-mean *rate*))
(prior-mid-mean (score/exponential mid-mean *rate*))
(prior-late-mean (score/exponential late-mean *rate*)))
(when (and prior-switch-point1
prior-switch-point2
prior-early-mean
prior-mid-mean
prior-late-mean)
(let ((rate (-> ($one data)
($fill! early-mean))))
(setf ($slice rate switch-point sw2) mid-mean)
(setf ($slice rate sw2) late-mean)
(let ((likelihood-mean (score/poisson data rate)))
(when likelihood-mean
($+ prior-switch-point1
prior-switch-point2
prior-early-mean
prior-mid-mean
prior-late-mean
likelihood-mean)))))))))
(prn (map/fit #'sms-posterior '(37 20.0 20.0)))
(time
(let ((traces (mcmc/mh '(37 20.0 20.0) #'sms-posterior)))
(loop :for trc :in traces
:for lbl :in '(:switch-point :early-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 20.0 20.0) #'sms-posterior :type :ae)))
(loop :for trc :in traces
:for lbl :in '(:switch-point :early-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 20.0 20.0) #'sms-posterior :type :sc)))
(loop :for trc :in traces
:for lbl :in '(:switch-point :early-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 20.0 20.0) #'sms-posterior :type :em)))
(loop :for trc :in traces
:for lbl :in '(:switch-point :early-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(prn (map/fit #'sms-posterior2 '(37 10 20.0 20.0 20.0)))
(time
(let ((traces (mcmc/mh '(37 10 20.0 20.0 20.0) #'sms-posterior2)))
(loop :for trc :in traces
:for lbl :in '(:switch-point1 :switch-point2 :early-mean :mid-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 10 20.0 20.0 20.0) #'sms-posterior2 :type :ae)))
(loop :for trc :in traces
:for lbl :in '(:switch-point1 :switch-point2 :early-mean :mid-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 10 20.0 20.0 20.0) #'sms-posterior2 :type :sc)))
(loop :for trc :in traces
:for lbl :in '(:switch-point1 :switch-point2 :early-mean :mid-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
(time
(let ((traces (mcmc/mh '(37 10 20.0 20.0 20.0) #'sms-posterior2 :type :em)))
(loop :for trc :in traces
:for lbl :in '(:switch-point1 :switch-point2 :early-mean :mid-mean :late-mean)
:do (prn lbl trc (trace/hpd trc) (trace/act trc)))))
| 4,460
|
Common Lisp
|
.lisp
| 98
| 35.816327
| 90
| 0.559006
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
d23411be4540bd1d402ae4229382dd0d30da55e12db4d9526eaf0af180538993
| 3,190
|
[
-1
] |
3,191
|
pp-disaster.lisp
|
chunsj_TH/examples/pp/pp-disaster.lisp
|
(defpackage :pp-disaster
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :pp-disaster)
;; mining disaster problem
(defparameter *disasters* (tensor
'(4 5 4 0 1 4 3 4 0 6 3 3 4 0 2 6
3 3 5 4 5 3 1 4 4 1 5 5 3 4 2 5
2 2 3 4 2 1 3 2 2 1 1 1 1 3 0 0
1 0 1 1 0 0 3 1 0 3 2 2 0 1 1 1
0 1 0 1 0 0 0 2 1 0 0 0 1 1 0 2
3 3 1 1 2 1 1 1 1 2 4 2 0 0 1 4
0 0 0 1 0 0 0 0 0 1 0 0 1 0 1)))
(defparameter *mean* ($mean *disasters*))
(defparameter *rate* (/ 1D0 *mean*))
(defun disaster-posterior (switch-point early-mean late-mean)
(let ((prior-switch-point (score/discrete-uniform switch-point 1 (- ($count *disasters*) 2)))
(prior-early-mean (score/exponential early-mean *rate*))
(prior-late-mean (score/exponential late-mean *rate*)))
(when (and prior-switch-point
prior-early-mean
prior-late-mean)
(let ((disasters-early ($slice *disasters* 0 switch-point))
(disasters-late ($slice *disasters* switch-point)))
(let ((likelihood-early-mean (score/poisson disasters-early early-mean))
(likelihood-late-mean (score/poisson disasters-late late-mean)))
(when (and likelihood-early-mean
likelihood-late-mean)
($+ prior-switch-point
prior-early-mean
prior-late-mean
likelihood-early-mean
likelihood-late-mean)))))))
(defparameter *ts* nil)
(time
(let ((traces (mcmc/mh '(50 2.0 2.0) #'disaster-posterior)))
(setf *ts* traces)
(loop :for trc :in traces
:for lbl :in '(:switch-point :early-mean :late-mean)
:do (prn lbl trc))))
;; fitting
(prn (map/fit #'disaster-posterior '(50 2.0 2.0)))
;; checking autocorrelation
(prn ($slice (trace/acr ($0 *ts*)) 0 20))
;; thinning
(let ((nt (trace/thin ($0 *ts*) 13)))
(prn ($slice (trace/acr nt) 0 10)))
| 2,091
|
Common Lisp
|
.lisp
| 49
| 32.265306
| 95
| 0.54179
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
33e3f6b67218cf6b0b5c3004d0f90fa201c0d35de2a69d44fc22d984574890ce
| 3,191
|
[
-1
] |
3,192
|
vi.lisp
|
chunsj_TH/examples/pp/vi.lisp
|
;; https://www.ritchievink.com/blog/2019/09/16/variational-inference-from-scratch/
(defpackage :vi-learn
(:use #:common-lisp
#:mu
#:th
#:th.layers))
(in-package :vi-learn)
(defun generate-dataset (&optional (n 150))
(let ((xmin -20)
(xmax 60)
(w0 0.125)
(b0 5))
(labels ((s (x)
(let ((g ($div ($sub x xmin) (- xmax xmin))))
($mul 3 ($add 0.25 ($square g))))))
(let* ((x ($add ($mul (sample/uniform 0 1 n) (- xmax xmin))
xmin))
(eps ($mul (sample/normal 0 1 n) (s x)))
(y ($add eps ($add ($* w0 x ($add 1 ($sin x))) b0)))
(y ($div ($sub y ($mean y)) ($sd y)))
(indices (sort (loop :for i :from 0 :below n :collect i)
(lambda (a b) (< ($ x a) ($ x b))))))
(list (tensor (loop :for i :in indices
:collect ($ x i)))
(tensor (loop :for i :in indices
:collect ($ y i))))))))
(defparameter *dataset* (generate-dataset))
(defparameter *x* ($reshape ($0 *dataset*) 150 1))
(defparameter *y* ($reshape ($1 *dataset*) 150 1))
;; maximum likelihood estimation
(defparameter *mle* (sequential-layer
(affine-layer 1 20 :activation :relu)
(affine-layer 20 1 :activation :nil)))
;; what we get here is the best model parameter of *mle* assuming gaussian likelihood(mse).
;; y ~ N(g_theta(x), sigma^2)
;; theta_mle = argmax_theta PI P(y_i | theta)
;; g_theta is *mle*.
(loop :repeat 200
:for ypred = ($execute *mle* *x*)
:for loss = ($mse ypred *y*)
:do ($amgd! *mle*))
(defparameter *qmu* (sequential-layer
(affine-layer 1 20 :activation :relu)
(affine-layer 20 10 :activation :relu)
(affine-layer 10 1 :activation :nil)))
(defparameter *qlv* (sequential-layer
(affine-layer 1 20 :activation :relu)
(affine-layer 20 10 :activation :relu)
(affine-layer 10 1 :activation :nil)))
(defparameter *vi* (list *qmu* *qlv*))
(defun reparameterize (mu lv)
(let ((s ($add ($exp ($mul 0.5 lv)) 1E-5)))
($add mu ($mul s (apply #'$reshape (sample/normal 0 1($count s)) ($size s))))))
(defun vi (x &key (trainp T))
(let ((mu ($execute *qmu* x :trainp trainp))
(lv ($execute *qlv* x :trainp trainp)))
(list (reparameterize mu lv)
mu
lv)))
(defun ll-gaussian (y mu lv)
(let ((s ($exp ($mul 0.5 lv))))
($sub ($mul -0.5 ($log ($* 2 pi ($square s))))
($mul ($div 1 ($mul 2 ($square s)))
($square ($sub y mu))))))
(defun elbo (ypred y mu lv)
(let ((ll (ll-gaussian y mu lv))
(lp (ll-gaussian ypred ($zero mu) ($log ($one lv))))
(lpq (ll-gaussian ypred mu lv)))
($mean ($+ ll lp ($neg lpq)))))
(defun loss-analytic (ypred y mu lv)
(let ((reconstruction-error ($sum ($mul 0.5 ($square ($sub y ypred)))))
(kld ($mul -0.5 ($sum ($+ 1 lv ($neg ($square mu)) ($neg ($exp lv)))))))
($sum ($add reconstruction-error kld))))
($cg! *vi*)
(loop :repeat 1500
:for (ypred mu lv) = (vi *x*)
:for loss = ($neg (elbo ypred *y* mu lv))
:do ($amgd! *vi*))
($cg! *vi*)
(loop :repeat 1500
:for (ypred mu lv) = (vi *x*)
:for loss = (loss-analytic ypred *y* mu lv)
:do ($amgd! *vi*))
(let* ((ys (-> (loop :repeat 1000
:for (y m lv) = (vi *x* :trainp nil)
:collect y)
($catn 1)
($sort 1 nil)
(car)))
(nr ($size ys 0))
(nc ($size ys 1))
(i/5 (ceiling (* 0.05 nc)))
(i/50 (ceiling (* 0.5 nc)))
(i/95 (min nc (ceiling (* 0.95 nc))))
(q1 (tensor nr))
(mu (tensor nr))
(q2 (tensor nr)))
(loop :for i :from 0 :below nr
:do (setf ($ q1 i) ($ ys i i/5)
($ mu i) ($ ys i i/50)
($ q2 i) ($ ys i i/95)))
(prn ($mean ($square ($sub *y* mu)))))
(defclass variational-affine-layer (layer)
((wmu :initform nil)
(wp :initform nil)
(bmu :initform nil)
(bp :initform nil)
(a :initform nil)
(kld :initform nil)))
(defun variational-affine-layer (input-size output-size &key (activation :sigmoid) (biasp t))
(let ((n (make-instance 'variational-affine-layer)))
(with-slots (wmu wp bmu bp a) n
(setf a (th.layers::afn activation))
(when biasp
(setf bmu ($parameter (zeros output-size))
bp ($parameter (zeros output-size))))
(setf wmu ($parameter ($normal (tensor input-size output-size) 0 0.001))
wp ($parameter ($normal (tensor input-size output-size) -2.5 0.001))))
n))
(defmethod $train-parameters ((l variational-affine-layer))
(with-slots (wmu wp bmu bp) l
(if bmu
(list wmu wp bmu bp)
(list wmu wp))))
(defmethod $parameters ((l variational-affine-layer))
($train-parameters l))
(defun variational-affine-layer-reparameterize (mu p &key (trainp t))
(if trainp
(let ((s ($log ($add 1 ($exp p))))
(eps ($normal ($data p) 0 1)))
($add mu ($mul eps s)))
(let ((s ($log ($add 1 ($exp ($data p)))))
(eps ($normal ($data p) 0 1)))
($add ($data mu) ($mul eps s)))))
(defun variational-affine-layer-w (l &key (trainp t))
(with-slots (wmu wp) l
(variational-affine-layer-reparameterize wmu wp :trainp trainp)))
(defun variational-affine-layer-b (l &key (trainp t))
(with-slots (bmu bp) l
(when bmu
(variational-affine-layer-reparameterize bmu bp :trainp trainp))))
(defgeneric layer-kl-divergence (layer))
(defmethod layer-kl-divergence ((l layer)) 0)
(defun variational-affine-layer-kl-divergence (z mu p &optional (ps 1))
(labels ((ll-normal (y mu p)
(let ((s ($log ($add 1 ($exp p)))))
($sub ($mul -0.5 ($log ($* 2 pi ($square s))))
($mul ($div 1 ($mul 2 ($square s)))
($square ($sub y mu)))))))
(let ((log-prior (score/normal z 0 ps))
(log-pq ($sum (ll-normal z mu p))))
($sub log-pq log-prior))))
(defmethod $execute ((l variational-affine-layer) x &key (trainp t))
(with-slots (wmu wp bmu bp a kld) l
(let ((w (variational-affine-layer-w l :trainp trainp))
(b (variational-affine-layer-b l :trainp trainp)))
(when trainp
(if b
(setf kld ($div ($add (variational-affine-layer-kl-divergence w
wmu
wp)
(variational-affine-layer-kl-divergence b
bmu
bp))
($size x 0)))
(setf kld ($div (variational-affine-layer-kl-divergence w
wmu
wp)
($size x 0)))))
(if a
(funcall a ($affine x w b))
($affine x w b)))))
(defmethod layer-kl-divergence ((l variational-affine-layer))
(with-slots (kld) l
kld))
(defmethod layer-kl-divergence ((ls list))
(let ((kld 0))
(loop :for l :in ls :do (setf kld ($add kld (layer-kl-divergence l))))
kld))
(defparameter *bayesnn* (sequential-layer
(variational-affine-layer 1 20 :activation :relu)
(variational-affine-layer 20 20 :activation :relu)
(variational-affine-layer 20 1 :activation :nil)))
(defun bnn-reconstruction-error (ypred y)
(let ((s ($mul 0.1 ($one y))))
($sum ($neg ($sub ($mul -0.5 ($log ($* 2 pi ($square s))))
($mul ($div 1 ($mul 2 ($square s)))
($square ($sub y ypred))))))))
(defun bnn-loss (nn y ypred)
(let ((reconstruction-error (bnn-reconstruction-error ypred y))
(kld (layer-kl-divergence nn)))
($add reconstruction-error kld)))
($cg! *bayesnn*)
(loop :repeat 20000
:for i :from 1
:for ypred = ($execute *bayesnn* *x*)
:for loss = (bnn-loss *bayesnn* *y* ypred)
:do (progn
(when (zerop (rem i 1000)) (prn loss))
($amgd! *bayesnn*)))
(let* ((ys (-> (loop :repeat 1000
:for y = ($evaluate *bayesnn* *x*)
:collect y)
($catn 1)
($sort 1 nil)
(car)))
(nr ($size ys 0))
(nc ($size ys 1))
(i/5 (ceiling (* 0.05 nc)))
(i/50 (ceiling (* 0.5 nc)))
(i/95 (min nc (ceiling (* 0.95 nc))))
(q1 (tensor nr))
(mu (tensor nr))
(q2 (tensor nr)))
(loop :for i :from 0 :below nr
:do (setf ($ q1 i) ($ ys i i/5)
($ mu i) ($ ys i i/50)
($ q2 i) ($ ys i i/95)))
(prn ($mean ($square ($sub *y* q1)))
($mean ($square ($sub *y* mu)))
($mean ($square ($sub *y* q2)))))
| 9,229
|
Common Lisp
|
.lisp
| 221
| 31.167421
| 93
| 0.496879
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
b34fa5f1d90aeaf6f26fcdf9e23360c159c35651ec53722841dcc4f15c936fc9
| 3,192
|
[
-1
] |
3,193
|
simple-mh.lisp
|
chunsj_TH/examples/pp/simple-mh.lisp
|
(defpackage :simple-mh
(:use #:common-lisp
#:mu
#:th
#:mplot))
(in-package :simple-mh)
;; PROBLEM DESCRIPTION
;;
;; estimate standard deviation of population using partial observations.
;; of course, this case should have analytic solution but i like to apply
;; markov chain monte caro, or metropolis-hastings algorithm.
(defun model1 (n) ($normal! (tensor n) 10 3))
;; population from N(10, 3) distribution
(defparameter *population* (model1 30000))
;; select random 10000 samples from population
(defparameter *observation* (tensor (loop :repeat 10000
:collect ($ *population* (random 30000)))))
;; we assume that we know the mean
(defparameter *mu-obs* ($mean *observation*))
;; proposal or transition
;; to make standard deviation proposal as positive, log-normal is used
(defun transition-model (theta)
(let ((mean (car theta))
(sd (cadr theta)))
(list mean (exp (random/normal (log sd) 0.01)))))
;; redundant in this case, however, we need one
(defun log-prior (theta)
(declare (ignore theta))
(log 1))
;; for numerical stability, we'll use log-likelihood values
;; our sample model is normal so this is log-likelihood for normal density
(defun log-likelihood-normal (theta data)
(let ((mean (car theta))
(sd (cadr theta))
(n ($count data)))
(- (* (/ n -2) (log (* 2 pi sd sd)))
(/ ($sum ($square ($- data mean))) (* 2 sd sd)))))
;; compare current likelihood and new likelihood, then choose the proposed theta
(defun acceptance (like newlike)
(let ((d (- newlike like)))
(if (> d 0)
T
(< (log (random 1D0)) d))))
;; metropolis hastings algorithm
(defun metropolis-hastings (lfn log-prior transition theta0 iterations data acceptance-rule)
(let ((theta theta0)
(accepted '())
(rejected '()))
(loop :repeat iterations
:for theta-new = (funcall transition theta)
:for theta-like = (funcall lfn theta data)
:for theta-new-like = (funcall lfn theta-new data)
:do (if (funcall acceptance-rule
($+ theta-like (funcall log-prior theta))
($+ theta-new-like (funcall log-prior theta-new)))
(progn
(setf theta theta-new)
(push theta-new accepted))
(push theta-new rejected)))
(prn "ACCEPTED/REJECTED:" ($count accepted) "/" ($count rejected))
(list :accepted (reverse accepted)
:rejected (reverse rejected))))
(defun simulation/accepted (simulation) (getf simulation :accepted))
(defun simulation/rejected (simulation) (getf simulation :rejected))
;; generate simulation data
(defparameter *simulation* (time
(metropolis-hastings #'log-likelihood-normal
#'log-prior
#'transition-model
(list *mu-obs* 0.1)
50000
*observation*
#'acceptance)))
(defun mean (vs) (/ (reduce #'+ vs) ($count vs)))
;; compare estimation vs sample standard deviation
(let* ((accepted (simulation/accepted *simulation*))
(n ($count accepted))
(sn (round (* n 0.25)))
(thetas (subseq accepted sn))
(esd (mean (mapcar #'cadr thetas))))
(list esd ($sd *observation*)))
;; accepted theta-sd, C-c C-i
(mplot:plot-points (loop :for theta :in (simulation/accepted *simulation*)
:for i :from 0
:collect (list i (cadr theta))))
;; check burn-in
(mplot:plot-points (loop :for theta :in (simulation/accepted *simulation*)
:for i :from 0 :below 1000
:collect (list i (cadr theta))))
;; last values
(mplot:plot-points (loop :for theta :in (last (simulation/accepted *simulation*) 5000)
:for i :from 0
:collect (list i (cadr theta)))
:yrange '(2.5 3.5))
| 4,195
|
Common Lisp
|
.lisp
| 94
| 34.223404
| 92
| 0.575031
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
bd7b6df138477f9b5c6da5ee74d077eb35b13b3da2b4444bdc5bbf389a88487e
| 3,193
|
[
-1
] |
3,194
|
linreg.lisp
|
chunsj_TH/examples/pp/linreg.lisp
|
(defpackage :linear-regression
(:use #:common-lisp
#:mu
#:th
#:th.pp))
(in-package :linear-regression)
(defparameter *xs* (tensor (loop :for i :from 1 :to 200 :collect i)))
(defparameter *ys* ($+ ($normal (tensor ($count *xs*)) 0 1) ($* 2 *xs*)))
(defun lr-posterior (b0 b1 s)
(let ((prior-b0 (score/normal b0 0 1))
(prior-b1 (score/normal b1 1 1))
(prior-s (score/normal s 0 1)))
(when (and prior-b0 prior-b1 prior-s)
(let ((ll (score/gaussian *ys* ($add b0 ($mul b1 *xs*)) ($exp s))))
(when ll
($+ ($+ prior-b0 prior-b1 prior-s ll)))))))
(let ((traces (mcmc/mh '(0.0 1.0 0.0) #'lr-posterior)))
(prn traces))
| 688
|
Common Lisp
|
.lisp
| 18
| 32.888889
| 73
| 0.564565
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
f1eca80d5ef930d40b74b22775bab94eea0d8b736b19bf5b10199e4a2eec75f7
| 3,194
|
[
-1
] |
3,195
|
xor.lisp
|
chunsj_TH/examples/simple/xor.lisp
|
(defpackage :xor-example
(:use #:common-lisp
#:mu
#:th))
(in-package :xor-example)
;; because there's no complete neural network library without xor example
;; direct, without using ad.
(defun fwd (input weight) ($sigmoid! ($@ input weight)))
(defun dwb (delta output) ($* delta output ($- 1 output)))
(time
(let* ((X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '((0) (1) (1) (0))))
(w1 (rndn 3 3))
(w2 (rndn 3 1))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 (fwd X w1))
(l2 (fwd l1 w2))
(l2d (dwb ($- l2 y) l2))
(l1d (dwb ($@ l2d ($transpose w2)) l1))
(dw2 ($@ ($transpose l1) l2d))
(dw1 ($@ ($transpose X) l1d)))
($sub! w1 ($* lr dw1))
($sub! w2 ($* lr dw2))))
(prn (fwd (fwd X w1) w2))))
;; using ad or autograd
(time
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($gd! w1 lr)
($gd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2)))))
(let* ((w1 ($parameter ($xaviern! (tensor 3 3))))
(w2 ($parameter ($xaviern! (tensor 3 1))))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($gd! w1 lr)
($gd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(let* ((w1 (vxavier '(3 3)))
(w2 (vxavier '(3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($gd! w1 lr)
($gd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($mgd! w1 lr)
($mgd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($agd! w1 lr)
($agd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(time
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1E-2)
(etas '(0.5 1.2)))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($rpgd! w1 lr etas)
($rpgd! w2 lr etas)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2)))))
(let* ((ps (parameters))
(w1 ($push ps (rndn 3 3)))
(w2 ($push ps (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 0.01))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($amgd! ps lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 0.1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($rmgd! w1 lr)
($rmgd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0))))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($adgd! (list w1 w2))))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
| 5,587
|
Common Lisp
|
.lisp
| 153
| 25.921569
| 73
| 0.394392
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
4d1bca886b8618d2be3b2abf6f30617736f93f3e359aac8df4b3fbfb2f58b216
| 3,195
|
[
-1
] |
3,196
|
catsdogs.lisp
|
chunsj_TH/examples/simple/catsdogs.lisp
|
;; note that this examples is for testing cnn on color images.
;; to get better result, you have to define better network and more data.
;; this example, as is provided, will have 31~40% of error rates.
(defpackage :cats-and-dogs
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.image
#:th.db.cats-and-dogs))
(in-package :cats-and-dogs)
(defparameter *batch-size* 10)
(defparameter *batch-count* 50)
(defparameter *test-size* 100)
(defparameter *img-size* 64)
(defparameter *data-index* 10)
(defparameter *train-data*
(let* ((n *data-index*)
(rng (loop :for i :from 1 :to n :collect i))
(txs (read-cats-and-dogs-data :indices rng)))
(when (< (* *batch-count* *batch-size*) (* n 1000))
(loop :for bidx :from 0 :below *batch-count*
:for sidx = (* bidx *batch-size*)
:for eidx = (+ sidx *batch-size*)
:for indices = (loop :for i :from sidx :below eidx :collect i)
:collect ($index txs 0 indices)))))
(defparameter *train-labels*
(let* ((n *data-index*)
(txs (zeros (* n 1000))))
(loop :for i :from 0 :below (* n 1000)
:do (setf ($ txs i) (if (zerop (rem i 2)) 1 0)))
(when (< (* *batch-count* *batch-size*) (* n 1000))
(loop :for bidx :from 0 :below *batch-count*
:for sidx = (* bidx *batch-size*)
:for eidx = (+ sidx *batch-size*)
:for indices = (loop :for i :from sidx :below eidx :collect i)
:collect ($index txs 0 indices)))))
(defparameter *test-data* ($index (read-cats-and-dogs-data :indices '(25))
0
(loop :for i :from 0 :below *test-size* :collect i)))
(defparameter *test-labels* (tensor (loop :for i :from 0 :below ($size *test-data* 0)
:collect (if (zerop (rem i 2)) 1 0))))
(defparameter *output-directory* ($concat (namestring (user-homedir-pathname))
"Desktop"))
(defun write-rgb-png-file (tensor filename)
(write-tensor-png-file tensor (format nil "~A/~A" *output-directory* filename)))
(defun write-gray-png-file (tensor filename &optional (channel 0))
(write-tensor-png-file ($ tensor channel) (format nil "~A/~A" *output-directory* filename)))
(defparameter *network* (sequential-layer
(convolution-2d-layer 3 32 3 3
:activation :lrelu
:weight-initializer :random-normal
:weight-initialization '(0 0.01))
(maxpool-2d-layer 2 2)
(convolution-2d-layer 32 32 3 3
:activation :lrelu
:weight-initializer :random-normal
:weight-initialization '(0 0.01))
(maxpool-2d-layer 2 2)
(flatten-layer)
(functional-layer (lambda (x &key (trainp t)) ($dropout x trainp 0.4)))
(affine-layer (* 32 58 58) 128
:activation :lrelu
:weight-initializer :random-normal
:weight-initialization '(0 0.01))
(affine-layer 128 1
:activation :sigmoid
:weight-initializer :random-normal
:weight-initialization '(0 0.01))))
($reset! *network*)
(gcf)
(defun opt! () ($amgd! *network* 1E-4))
(defparameter *epoch* 60) ;; at least 120
(defparameter *train-size* ($count *train-data*))
(time
(loop :for epoch :from 1 :to *epoch*
:do (progn
(loop :for data :in (subseq *train-data* 0 *train-size*)
:for labels :in (subseq *train-labels* 0 *train-size*)
:for bidx :from 1
:do (let* ((y* ($execute *network* data))
(loss ($bce y* labels)))
(prn epoch "|" bidx ($data loss))
(opt!)))
(when (zerop (rem epoch 5))
(let* ((res ($evaluate *network* *test-data*))
(fres (tensor.float ($ge res 0.5)))
(d ($- fres *test-labels*)))
(prn "TEST ERROR:" (/ ($dot d d) *test-size*)))))))
;; for testing
(setf *epoch* 1)
(setf *train-size* 1)
;; train check
(let* ((idx (random *train-size*))
(data (nth idx *train-data*))
(lbl (nth idx *train-labels*))
(y ($evaluate *network* data))
(res (tensor.float ($ge y 0.5)))
(d ($- res lbl)))
(prn "TRAIN IDX:" idx "ERROR:" (/ ($dot d d) *batch-size*))
(gcf))
;; test check
(let* ((res ($evaluate *network* *test-data*))
(fres (tensor.float ($ge res 0.5)))
(d ($- fres *test-labels*)))
(prn "TEST ERROR:" (/ ($dot d d) *test-size*))
(gcf))
| 5,139
|
Common Lisp
|
.lisp
| 107
| 33.971963
| 96
| 0.495014
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
fd0d7568e440fb38a899a8e644e5ad6851869a7d3d9f4d04711c10dbad24d0a2
| 3,196
|
[
-1
] |
3,197
|
mnist.lisp
|
chunsj_TH/examples/simple/mnist.lisp
|
(defpackage :mnist-example
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :mnist-example)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
;; utility functions for easier, systematic building convolution data
(defun mkfilter (fn nc kw kh) (tensor fn nc kw kh))
(defun mkfbias (fn) (tensor fn))
;; network parameters
(defparameter *filter-number* 30)
(defparameter *channel-number* 1)
(defparameter *filter-width* 5)
(defparameter *filter-height* 5)
(defparameter *pool-width* 2)
(defparameter *pool-height* 2)
(defparameter *pool-stride-width* 2)
(defparameter *pool-stride-height* 2)
(defparameter *pool-out-width* 12)
(defparameter *pool-out-height* 12)
(defparameter *l2-output* 100)
(defparameter *l3-output* 10)
(defparameter *k* (-> (mkfilter *filter-number* *channel-number*
*filter-height* *filter-width*)
($uniform! 0 0.01)
($parameter)))
(defparameter *kb* (-> (mkfbias *filter-number*)
($zero!)
($parameter)))
(defparameter *w2* (-> (rnd (* *filter-number* *pool-out-width* *pool-out-height*)
*l2-output*)
($mul! 0.01)
($parameter)))
(defparameter *b2* (-> (zeros *l2-output*)
($parameter)))
(defparameter *w3* (-> (rnd *l2-output* *l3-output*)
($mul! 0.01)
($parameter)))
(defparameter *b3* (-> (zeros *l3-output*)
($parameter)))
;; reading/writing network weights - this example comes from dlfs follow-ups
(defun mnist-write-weight-to (w fname)
(let ((f (file.disk fname "w")))
(setf ($fbinaryp f) t)
($fwrite ($data w) f)
($fclose f)))
(defun mnist-cnn-write-weights ()
(mnist-write-weight-to *k* "examples/weights/mnist/mnist-cnn-k.dat")
(mnist-write-weight-to *kb* "examples/weights/mnist/mnist-cnn-kb.dat")
(mnist-write-weight-to *w2* "examples/weights/mnist/mnist-cnn-w2.dat")
(mnist-write-weight-to *b2* "examples/weights/mnist/mnist-cnn-b2.dat")
(mnist-write-weight-to *w3* "examples/weights/mnist/mnist-cnn-w3.dat")
(mnist-write-weight-to *b3* "examples/weights/mnist/mnist-cnn-b3.dat"))
(defun mnist-read-weight-from (w fname)
(let ((f (file.disk fname "r")))
(setf ($fbinaryp f) t)
($fread ($data w) f)
($fclose f)))
(defun mnist-cnn-read-weights ()
(mnist-read-weight-from *k* "examples/weights/mnist/mnist-cnn-k.dat")
(mnist-read-weight-from *kb* "examples/weights/mnist/mnist-cnn-kb.dat")
(mnist-read-weight-from *w2* "examples/weights/mnist/mnist-cnn-w2.dat")
(mnist-read-weight-from *b2* "examples/weights/mnist/mnist-cnn-b2.dat")
(mnist-read-weight-from *w3* "examples/weights/mnist/mnist-cnn-w3.dat")
(mnist-read-weight-from *b3* "examples/weights/mnist/mnist-cnn-b3.dat"))
;; x should have been reshaped before entering
(defun mnist-predict (x)
(-> x
($conv2d *k* *kb*)
($relu)
($maxpool2d *pool-width* *pool-height* *pool-stride-width* *pool-stride-height*)
($reshape ($size x 0) (* *filter-number* *pool-out-width* *pool-out-height*))
($xwpb *w2* *b2*)
($relu)
($xwpb *w3* *b3*)
($softmax)))
(defparameter *batch-size* 500)
(defparameter *batch-count* (/ ($size ($ *mnist* :train-images) 0) *batch-size*))
;; training data - uses batches for performance
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 rng))))
(defparameter *mnist-train-label-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-labels) 0 rng))))
(loop :for p :in (list *k* *kb* *w2* *b2* *w3* *b3*)
:do (th::$cg! p))
(defparameter *epoch* 30)
(gcf)
;; the actual training
(time
(loop :for epoch :from 1 :to *epoch*
:do (loop :for i :from 0 :below *batch-count*
:for xi = ($ *mnist-train-image-batches* i)
:for x = (-> xi
($reshape ($size xi 0) *channel-number* 28 28))
:for y = (-> ($ *mnist-train-label-batches* i))
:for y* = (mnist-predict x)
:for loss = ($cee y* y)
:do (progn
(prn (format nil "[~A|~A]: ~A" (1+ i) epoch ($data loss)))
($adgd! (list *k* *kb* *w2* *b2* *w3* *b3*))))))
;; test stats
(defun mnist-test-stat (&optional verbose)
(let* ((xt ($ *mnist* :test-images))
(yt (-> ($ *mnist* :test-labels)
(tensor.byte)))
(yt* (-> ($reshape xt ($size xt 0) *channel-number* 28 28)
(mnist-predict)
($data)
($round)
(tensor.byte)))
(errors ($ne ($sum ($eq yt* yt) 1)
(-> (tensor.byte ($size yt 0) 1)
($fill! 10)))))
(when verbose (loop :for i :from 0 :below ($size errors 0)
:do (when (eq 1 ($ errors i 0))
(prn i))))
($sum errors)))
;; prn test stats after training
(prn (mnist-test-stat))
;; writing/reading
;;(mnist-cnn-write-weights)
(mnist-cnn-read-weights)
| 5,702
|
Common Lisp
|
.lisp
| 133
| 34.789474
| 87
| 0.574338
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
c7f7f14f89cd7af0971a4af8f89996b4fd4ce4a288a8749800bea00650e5be78
| 3,197
|
[
-1
] |
3,198
|
layer.lisp
|
chunsj_TH/examples/simple/layer.lisp
|
(defpackage :layer-example
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist
#:th.layers))
(in-package :layer-example)
;; this example will use layers to re-create the mnist classification
;; example using 2D convolutional network.
;; personally, i do not like this kind of approach/design of layers,
;; however, i cannot find better way yet.
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
;; prepare data
(defparameter *batch-size* 500)
(defparameter *batch-count* (/ ($size ($ *mnist* :train-images) 0) *batch-size*))
(defparameter *channel-number* 1)
(defparameter *image-width* 28)
(defparameter *image-height* 28)
;; training data - uses batches for performance
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect (-> ($contiguous! ($index ($ *mnist* :train-images) 0 rng))
($reshape *batch-size* *channel-number* *image-height* *image-width*))))
(defparameter *mnist-train-label-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-labels) 0 rng))))
(defparameter *mnist-test-images*
(let ((xt ($ *mnist* :test-images)))
($reshape xt ($size xt 0) *channel-number* *image-width* *image-height*)))
(defparameter *mnist-test-labels* ($ *mnist* :test-labels))
;; data is set up so free the original data
(setf *mnist* nil)
;; network parameters - copied from mnist example
(defparameter *filter-number* 30)
(defparameter *filter-width* 5)
(defparameter *filter-height* 5)
(defparameter *pool-width* 2)
(defparameter *pool-height* 2)
(defparameter *pool-stride-width* 2)
(defparameter *pool-stride-height* 2)
(defparameter *pool-out-width* 12)
(defparameter *pool-out-height* 12)
(defparameter *l2-output* 100)
(defparameter *l3-output* 10)
(defparameter *network* (sequential-layer
(convolution-2d-layer *channel-number*
*filter-number*
*filter-width*
*filter-height*
:activation :selu
:batch-normalization-p nil)
(maxpool-2d-layer *pool-width* *pool-height*
:stride-width *pool-stride-width*
:stride-height *pool-stride-height*)
(flatten-layer)
(affine-layer (* *filter-number* *pool-out-width* *pool-out-height*)
*l2-output*
:activation :selu
:batch-normalization-p nil)
(affine-layer *l2-output* *l3-output*
:activation :softmax)))
(defun mnist-predict (x &optional (trainp t)) ($execute *network* x :trainp trainp))
(defun mnist-test-stat (&optional verbose)
(let* ((yt (-> *mnist-test-labels*
(tensor.byte)))
(yt* (-> (mnist-predict *mnist-test-images* nil)
($round)
(tensor.byte)))
(errors ($ne ($sum ($eq yt* yt) 1)
(-> (tensor.byte ($size yt 0) 1)
($fill! 10)))))
(when verbose (loop :for i :from 0 :below ($size errors 0)
:do (when (eq 1 ($ errors i 0))
(prn i))))
($sum errors)))
;; load trained weights
($load-weights "./examples/weights/layers-mnist" *network*)
(mnist-test-stat)
;; if you want to train again, run following code
(defparameter *epoch* 30)
($reset! *network*)
(time
(loop :for epoch :from 1 :to *epoch*
:do (loop :for i :from 0 :below *batch-count*
:for x = ($ *mnist-train-image-batches* i)
:for y = ($ *mnist-train-label-batches* i)
:for y* = (mnist-predict x)
:for loss = ($cee y* y)
:do (progn
(when (zerop (rem i 10))
(prn (format nil "[~A|~A]: ~A" (1+ i) epoch ($data loss))))
($adgd! *network*)))))
;; save your new weights
($save-weights "./examples/weights/layers-mnist" *network*)
| 4,755
|
Common Lisp
|
.lisp
| 102
| 34.696078
| 93
| 0.543042
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
405067ce11d6f1ca381a677ef267f204fab1805027eefc46c914c117db6fd2f8
| 3,198
|
[
-1
] |
3,199
|
bn.lisp
|
chunsj_TH/examples/simple/bn.lisp
|
(defpackage :bn
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.db.mnist
#:th.db.fashion))
(in-package :bn)
;; use one of following
(defparameter *mnist* (read-mnist-data))
(defparameter *mnist* (read-fashion-data))
(defparameter *data-size* 1000)
(defparameter *x-train* ($index ($ *mnist* :train-images) 0 (xrange 0 *data-size*)))
(defparameter *y-train* ($index ($ *mnist* :train-labels) 0 (xrange 0 *data-size*)))
(defparameter *batch-size* 100)
(defparameter *batch-count* (/ *data-size* *batch-size*))
(defparameter *x-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index *x-train* 0 rng))))
(defparameter *y-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index *y-train* 0 rng))))
(defparameter *input-size* 784)
(defparameter *weight-size* 100)
(defparameter *output-size* 10)
(defparameter *net01* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :relu
:weight-initializer :he-normal)
(affine-layer *weight-size* *weight-size*
:activation :relu
:weight-initializer :he-normal)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *net02* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :relu
:weight-initializer :he-normal
:batch-normalization-p t)
(affine-layer *weight-size* *weight-size*
:activation :relu
:weight-initializer :he-normal
:batch-normalization-p t)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *net03* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :selu
:weight-initializer :he-normal)
(affine-layer *weight-size* *weight-size*
:activation :selu
:weight-initializer :he-normal)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *net04* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :selu
:weight-initializer :selu-normal)
(affine-layer *weight-size* *weight-size*
:activation :selu
:weight-initializer :selu-normal)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *net05* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :swish
:weight-initializer :he-normal)
(affine-layer *weight-size* *weight-size*
:activation :swish
:weight-initializer :he-normal)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *net06* (sequential-layer
(affine-layer *input-size* *weight-size*
:activation :mish
:weight-initializer :he-normal)
(affine-layer *weight-size* *weight-size*
:activation :mish
:weight-initializer :he-normal)
(affine-layer *weight-size* *output-size*
:activation :softmax
:weight-initializer :he-normal)))
(defparameter *epochs* 500)
(defun train (net)
(let ((losses nil))
(loop :for epoch :from 1 :to *epochs*
:do (loop :for xb :in *x-batches*
:for yb :in *y-batches*
:for i :from 0
:for y* = ($execute net xb)
:for l = ($cee y* yb)
:do (progn
($gd! net)
(when (and (zerop (rem epoch 50))
(zerop i))
(let ((lv ($data l)))
(push lv losses)
(prn (format nil "[~A] ~2,5E" epoch lv)))))))
losses))
(defparameter *losses01* (train *net01*))
(defparameter *losses02* (train *net02*))
(defparameter *losses03* (train *net03*))
(defparameter *losses04* (train *net04*))
(defparameter *losses05* (train *net05*))
(defparameter *losses06* (train *net06*))
| 5,904
|
Common Lisp
|
.lisp
| 114
| 30.95614
| 87
| 0.450987
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
143b6b13736444a66f5876ae678725e31f2ddd2ed14fa0e107a179852aa01333
| 3,199
|
[
-1
] |
3,200
|
ch3.lisp
|
chunsj_TH/examples/books/dlfs2/ch3.lisp
|
(defpackage :dlfs-ch3
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.text))
(in-package :dlfs-ch3)
(let* ((c (tensor '((1 0 0 0 0 0 1))))
(c2 ($select ($nonzero c) 1 1))
(w (rndn 7 3)))
(prn ($@ c w))
(prn ($wimb c2 w))
(prn ($wemb c w)))
(let ((c0 (tensor '((1 0 0 0 0 0 0))))
(c1 (tensor '((0 0 1 0 0 0 0))))
(win (rndn 7 3))
(wout (rndn 3 7)))
(let* ((h0 ($wemb c0 win))
(h1 ($wemb c1 win))
(h ($* 0.5 ($+ h0 h1)))
(s ($wemb h wout)))
(prn ($softmax s))))
(defun preprocess (text)
(let* ((lowered (string-downcase text))
(wm (make-word-maps (th.text::collect-words (list lowered))))
(corpus (make-corpus wm lowered)))
(list :corpus corpus
:vocab-size (getf wm :vocab-size)
:word-to-index(getf wm :word-to-index)
:index-to-word (getf wm :index-to-word))))
(defparameter *text* "You say goodbye and I say hello.")
(defparameter *data* (preprocess *text*))
(let ((data *data*))
(prn (getf data :corpus))
(prn (getf data :index-to-word)))
(defun create-contexts-target (corpus &key (window-size 1))
(let ((target (subseq corpus window-size (- ($count corpus) window-size)))
(contexts '()))
(loop :for idx :from window-size :below (- ($count corpus) window-size)
:do (let ((cs '()))
(loop :for i :from (- window-size) :below (1+ window-size)
:do (unless (eq i 0)
(push ($ corpus (+ idx i)) cs)))
(push (reverse cs) contexts)))
(list :contexts (tensor.long (reverse contexts))
:target (tensor (coerce target 'list)))))
(let ((ct (create-contexts-target (getf *data* :corpus))))
(prn (getf *data* :corpus))
(prn (getf ct :contexts))
(prn (getf ct :target))
(prn ($ (getf ct :contexts) 0)))
(defparameter *ct* (create-contexts-target (getf *data* :corpus)))
(defun convert-one-hot (x sz)
(cond ((eq ($ndim x) 1)
(let ((r (zeros ($size x 0) sz)))
(loop :for i :from 0 :below ($size x 0)
:for v = ($ x i)
:do (setf ($ r i (round v)) 1))
r))
((eq ($ndim x) 2)
(let ((r (zeros ($size x 0) ($size x 1) sz)))
(loop :for i :from 0 :below ($size x 0)
:for v = ($ x i)
:do (loop :for j :from 0 :below ($size v 0)
:for vv = ($ v j)
:do (setf ($ r i j (round vv)) 1)))
r))
(T (error "cannot convert tensor of ~A dimension" ($ndim x)))))
(let ((ct *ct*))
(prn (getf ct :target))
(prn (convert-one-hot (getf ct :target) (getf *data* :vocab-size)))
(prn (getf ct :contexts))
(prn (convert-one-hot (getf ct :contexts) (getf *data* :vocab-size))))
(prn ($squeeze ($index (convert-one-hot (getf *ct* :contexts) (getf *data* :vocab-size)) 1 '(1))))
(defparameter *hidden-size* 5)
(defparameter *win* ($parameter (rndn (getf *data* :vocab-size) *hidden-size*)))
(defparameter *wout* ($parameter (rndn *hidden-size* (getf *data* :vocab-size))))
(defun forward (contexts)
(let ((h0 ($@ ($squeeze ($index contexts 1 '(0))) *win*))
(h1 ($@ ($squeeze ($index contexts 1 '(1))) *win*)))
($@ ($* 0.5 ($+ h0 h1)) *wout*)))
(defun loss (h target) ($cee ($softmax h) target))
($cg! (list *win* *wout*))
(let ((contexts (convert-one-hot (getf *ct* :contexts) (getf *data* :vocab-size)))
(target (convert-one-hot (getf *ct* :target) (getf *data* :vocab-size))))
(loop :for epoch :from 0 :below 1000
:do (let ((loss (loss (forward contexts) target)))
(prn loss)
($amgd! (list *win* *wout*)))))
(gcf)
(loop :for wid :being :the :hash-keys :of (getf *data* :index-to-word)
:for word = ($ (getf *data* :index-to-word) wid)
:do (format T "~A~%~A~%" word ($ ($data *win*) wid)))
| 3,929
|
Common Lisp
|
.lisp
| 93
| 34.903226
| 98
| 0.532617
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
cfd0dcbfe86d35d08a3b01236a407c889f21dbe76efae0ba1073c8084a1affb6
| 3,200
|
[
-1
] |
3,201
|
ch7.lisp
|
chunsj_TH/examples/books/dlfs2/ch7.lisp
|
(defpackage :dlfs2-ch7
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.layers
#:th.text))
(in-package :dlfs2-ch7)
;; data for the chapter 7 example
;;
;; number addition problems
(defparameter *data* (addition))
(defparameter *data-length* ($count *data*))
(defparameter *encoder* (character-encoder "0123456789 _+="))
;; train and test datasets
(defparameter *train-input-data* (mapcar (lambda (s) (subseq s 0 7)) (subseq *data* 0 40000)))
(defparameter *train-target-data* (mapcar (lambda (s) (subseq s 8)) (subseq *data* 0 40000)))
(defparameter *test-input-data* (mapcar (lambda (s) (subseq s 0 7)) (subseq *data* 40000)))
(defparameter *test-target-data* (mapcar (lambda (s) (subseq s 8)) (subseq *data* 40000)))
;; network parameters
(defparameter *batch-size* 100)
(defparameter *hidden-size* 128)
(defparameter *wvec-size* 16)
;; preparing datasets - an helper function
(defun build-batches (data n)
(loop :for tail :on data :by (lambda (l) (nthcdr n l))
:collect (encoder-encode *encoder* (subseq tail 0 (min ($count tail) n)))))
;; for real training
(defparameter *train-xs-batches* (build-batches *train-input-data* *batch-size*))
(defparameter *train-ys-batches* (build-batches *train-target-data* *batch-size*))
;; for overfitting - to check implementation
(defparameter *overfit-xs-batches* (subseq (build-batches *train-input-data* 12) 0 1))
(defparameter *overfit-ys-batches* (subseq (build-batches *train-target-data* 12) 0 1))
;; helper functions for the seq2seq model
;; mostly generation, execution(for training) and evaluation(for running)
;; generate a string using the seed string
(defun generate-string (rnn encoder seedstr n &optional (temperature 1D0))
($generate-sequence rnn encoder seedstr n temperature))
(defun encoder-state (encoder-rnn) ($cell-state ($ encoder-rnn 1)))
(defun update-decoder-state! (decoder-rnn h) ($update-cell-state! ($ decoder-rnn 1) h))
;; execution function for training
(defun execute-seq2seq (encoder-rnn decoder-rnn encoder xs ts)
($execute encoder-rnn xs)
(let ((h0 (encoder-state encoder-rnn)))
(update-decoder-state! decoder-rnn h0)
(with-keeping-state (decoder-rnn)
(let* ((batch-size ($size (car xs) 0))
(ys (append (encoder-encode encoder (loop :repeat batch-size :collect "_"))
(butlast ts)))
(yts ($execute decoder-rnn ys)))
yts))))
;; loss function using cross entropy
(defun loss-seq2seq (encoder-rnn decoder-rnn encoder xs ts &optional verbose)
(let* ((ys (execute-seq2seq encoder-rnn decoder-rnn encoder xs ts))
(losses (mapcar (lambda (y c) ($cec y c)) ys ts))
(loss ($div (apply #'$+ losses) ($count losses))))
(when verbose
(prn "TS" (encoder-decode encoder ts))
(prn "YS" (encoder-choose encoder ys -1)))
loss))
;; generate using decoder
(defun generate-decoder (decoder-rnn encoder h xs0 n)
(let ((sampled '())
(xts xs0)
(batch-size ($size (car xs0) 0)))
(update-decoder-state! decoder-rnn h)
(with-keeping-state (decoder-rnn)
(loop :for i :from 0 :below n
:do (let* ((yts ($evaluate decoder-rnn xts))
(rts (encoder-choose encoder yts -1)))
(push rts sampled)
(setf xts (encoder-encode encoder rts)))))
(let ((res (reverse sampled))
(results (make-list batch-size)))
(loop :for r :in res
:do (loop :for v :in r
:for i :from 0
:do (push v ($ results i))))
(mapcar (lambda (rs) (apply #'concatenate 'string (reverse rs))) results))))
;; running the model
(defun evaluate-seq2seq (encoder-rnn decoder-rnn encoder xs &optional (n 4))
($evaluate encoder-rnn xs)
(generate-decoder decoder-rnn encoder (encoder-state encoder-rnn)
(encoder-encode encoder (loop :repeat ($size (car xs) 0) :collect "_"))
n))
;; compare the results - between the generated one and the truth
(defun matches-score (encoder ts ys)
(let ((tss (->> ts
(encoder-decode encoder)
(mapcar (lambda (s) (parse-integer s)))))
(yss (->> ys
(mapcar (lambda (s)
(handler-case (parse-integer s)
(error (c)
(declare (ignore c))
-1)))))))
(let ((matches (mapcar (lambda (tn yn) (if (eq tn yn) 0 1)) tss yss)))
(* 1D0 (/ (reduce #'+ matches) ($count matches))))))
(defun gd! (encoder-rnn decoder-rnn fn lr)
(funcall fn decoder-rnn lr)
(funcall fn encoder-rnn lr))
;; train seq2seq network
(defun train-seq2seq (encoder-rnn decoder-rnn encoder xss tss epochs pstep fn lr)
(let ((sz ($count xss)))
(block train
(loop :for epoch :from 0 :below epochs
:do (loop :for xs0 :in xss
:for ts :in tss
:for idx :from 0
:for iter = (+ idx (* epoch sz))
:for xs = (reverse xs0)
:do (let ((loss (loss-seq2seq encoder-rnn decoder-rnn encoder xs ts)))
(gd! encoder-rnn decoder-rnn fn lr)
(when (zerop (rem iter pstep))
(let* ((lv ($data loss))
(ys (evaluate-seq2seq encoder-rnn decoder-rnn encoder xs))
(score (matches-score encoder ts ys)))
(prn iter lv score)
(prn "TS" (encoder-decode encoder ts))
(prn "YS" ys)
(when (< score 1E-2) (return-from train))))))))))
;; model
(defparameter *encoder-rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (affine-cell vsize *wvec-size*
:activation :nil
:biasp nil))
(recurrent-layer (rnn-cell *wvec-size* *hidden-size*)))))
(defparameter *decoder-rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (affine-cell vsize *wvec-size*
:activation :nil
:biasp nil))
(recurrent-layer (rnn-cell *wvec-size* *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize
:activation :nil)))))
($reset! *encoder-rnn*)
($reset! *decoder-rnn*)
;; overfitting for checking implementation
(time (train-seq2seq *encoder-rnn* *decoder-rnn* *encoder*
*overfit-xs-batches* *overfit-ys-batches*
5000 100
#'$adgd!
1))
(prn (car *overfit-xs-batches*))
(prn (encoder-decode *encoder* ($0 *overfit-ys-batches*)))
(prn (evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *overfit-xs-batches*)))
;; real training
(time (train-seq2seq *encoder-rnn* *decoder-rnn* *encoder*
*train-xs-batches* *train-ys-batches*
30 100
#'$adgd!
1))
(matches-score *encoder* ($0 *train-ys-batches*)
(evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *train-xs-batches*)))
(prn (encoder-decode *encoder* ($0 *train-ys-batches*)))
(prn (evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *train-xs-batches*)))
| 7,877
|
Common Lisp
|
.lisp
| 157
| 38.019108
| 96
| 0.557837
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
0922c1a5c8eaf3c2eaf8b338c9b25e56741d72dc550a25e1074796ac271623de
| 3,201
|
[
-1
] |
3,202
|
ch8.lisp
|
chunsj_TH/examples/books/dlfs2/ch8.lisp
|
(defpackage :dlfs2-ch8
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.layers
#:th.text))
(in-package :dlfs2-ch8)
;; data for the chapter 8 example
;;
;; number addition problems
(defparameter *data* (date-data))
(defparameter *data-length* ($count *data*))
(defparameter *encoder* (character-encoder (concatenate 'string "0123456789"
"abcdefghijklmnopqrstuvwxyz"
"ABCDEFGHIJKLMNOPQRSTUVWXYZ"
" _-,/")))
;; train and test datasets
(defparameter *train-input-data* (mapcar (lambda (s) (subseq s 0 29)) (subseq *data* 0 40000)))
(defparameter *train-target-data* (mapcar (lambda (s) (subseq s 30)) (subseq *data* 0 40000)))
(defparameter *test-input-data* (mapcar (lambda (s) (subseq s 0 29)) (subseq *data* 40000)))
(defparameter *test-target-data* (mapcar (lambda (s) (subseq s 30)) (subseq *data* 40000)))
(defparameter *bs* ($ (car (encoder-encode *encoder* '("_"))) 0))
;; network parameters
(defparameter *batch-size* 100)
(defparameter *hidden-size* 256)
(defparameter *wvec-size* 16)
;; preparing datasets - an helper function
(defun build-batches (data n)
(loop :for tail :on data :by (lambda (l) (nthcdr n l))
:collect (encoder-encode *encoder* (subseq tail 0 (min ($count tail) n)))))
;; for real training
(defparameter *train-xs-batches* (build-batches *train-input-data* *batch-size*))
(defparameter *train-ys-batches* (build-batches *train-target-data* *batch-size*))
;; for overfitting - to check implementation
(defparameter *overfit-xs-batches* (subseq (build-batches *train-input-data* 5) 0 1))
(defparameter *overfit-ys-batches* (subseq (build-batches *train-target-data* 5) 0 1))
;; encoder decoder network connection managements
(defun update-decoder-state! (decoder-rnn h) ($update-cell-state! ($ decoder-rnn 1) h))
(defun update-attention-memory! (decoder-rnn hs)
($set-memory! ($ ($ ($cell ($ decoder-rnn 2)) 0) 0) (concat-sequence hs)))
;; execution function for training
(defun execute-seq2seq (encoder-rnn decoder-rnn xs ts)
(let* ((hs ($execute encoder-rnn xs))
(h0 ($cell-state ($cell ($ encoder-rnn 1)))))
(update-decoder-state! decoder-rnn h0)
(update-attention-memory! decoder-rnn hs)
(with-keeping-state (decoder-rnn)
(let* ((batch-size ($size (car xs) 0))
(ys (append (list ($fill! (tensor.long batch-size) *bs*))
(butlast ts)))
(yts ($execute decoder-rnn ys)))
yts))))
;; loss function using cross entropy
(defun loss-seq2seq (encoder-rnn decoder-rnn xs ts)
(let* ((ys (execute-seq2seq encoder-rnn decoder-rnn xs ts))
(losses (mapcar (lambda (y c) ($cec y c)) ys ts)))
($div (apply #'$+ losses) ($count losses))))
;; generate using decoder
(defun generate-decoder (decoder-rnn encoder h0 hs xs0 n)
(let ((sampled '())
(xts xs0)
(batch-size ($size (car xs0) 0)))
(update-decoder-state! decoder-rnn h0)
(update-attention-memory! decoder-rnn hs)
(with-keeping-state (decoder-rnn)
(loop :for i :from 0 :below n
:do (let* ((yts ($evaluate decoder-rnn xts))
(rts (encoder-choose encoder yts -1)))
(push rts sampled)
(setf xts (encoder-encode encoder rts)))))
(let ((res (reverse sampled))
(results (make-list batch-size)))
(loop :for r :in res
:do (loop :for v :in r
:for i :from 0
:do (push v ($ results i))))
(mapcar (lambda (rs) (apply #'concatenate 'string (reverse rs))) results))))
;; running the model
(defun evaluate-seq2seq (encoder-rnn decoder-rnn encoder xs &optional (n 10))
(let ((hs ($evaluate encoder-rnn xs))
(h0 ($cell-state ($cell ($ encoder-rnn 1)))))
(generate-decoder decoder-rnn encoder h0 hs
(list ($fill! (tensor.long ($size (car xs) 0)) *bs*))
n)))
;; compare the results - between the generated one and the truth
(defun matches-score (encoder ts ys)
(let ((tss (encoder-decode encoder ts))
(yss ys))
(let ((matches (mapcar (lambda (tn yn) (if (string-equal tn yn) 0 1)) tss yss)))
(* 1D0 (/ (reduce #'+ matches) ($count matches))))))
(defun gd! (encoder-rnn decoder-rnn fn lr)
(funcall fn decoder-rnn lr)
(funcall fn encoder-rnn lr))
;; train seq2seq network
(defun train-seq2seq (encoder-rnn decoder-rnn encoder xss tss epochs pstep fn lr)
(let ((sz ($count xss)))
(block train
(loop :for epoch :from 0 :below epochs
:do (loop :for xsi :in xss
:for ts :in tss
:for idx :from 0
:for iter = (+ idx (* epoch sz))
:for xs = (reverse xsi)
:do (let ((loss (loss-seq2seq encoder-rnn decoder-rnn xs ts)))
(gd! encoder-rnn decoder-rnn fn lr)
(when (zerop (rem iter pstep))
(let* ((lv ($data loss))
(ys (evaluate-seq2seq encoder-rnn decoder-rnn encoder xs))
(score (matches-score encoder ts ys)))
(prn iter lv score)
(prn "XS" (encoder-decode encoder xs))
(prn "TS" (encoder-decode encoder ts))
(prn "YS" ys)
(prn "==")
(when (< score 1E-2) (return-from train))))))))))
;; model
(defparameter *encoder-rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (affine-cell vsize *wvec-size*
:activation :nil
:biasp nil))
(recurrent-layer (lstm-cell *wvec-size* *hidden-size*)))))
(defparameter *decoder-rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (affine-cell vsize *wvec-size*
:activation :nil
:biasp nil))
(recurrent-layer (lstm-cell *wvec-size* *hidden-size*))
(recurrent-layer
(sequential-layer
(parallel-layer (attention-cell)
(functional-layer
(lambda (q &key (trainp t))
(declare (ignore trainp))
q)))
(functional-layer
(lambda (c q &key (trainp t))
(declare (ignore trainp))
($cat q c 1)))))
(recurrent-layer (affine-cell (* 2 *hidden-size*) vsize
:activation :nil)))))
($reset! *encoder-rnn*)
($reset! *decoder-rnn*)
;; overfitting for checking implementation
(time (train-seq2seq *encoder-rnn* *decoder-rnn* *encoder*
*overfit-xs-batches* *overfit-ys-batches*
500 100
#'$adgd!
1))
(prn (car *overfit-xs-batches*))
(prn (encoder-decode *encoder* ($0 *overfit-ys-batches*)))
(prn (evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *overfit-xs-batches*)))
;; real training
(time (train-seq2seq *encoder-rnn* *decoder-rnn* *encoder*
*train-xs-batches* *train-ys-batches*
10 100
#'$adgd!
1))
(matches-score *encoder* ($0 *train-ys-batches*)
(evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *train-xs-batches*)))
(prn (encoder-decode *encoder* ($0 *train-ys-batches*)))
(prn (evaluate-seq2seq *encoder-rnn* *decoder-rnn* *encoder* ($0 *train-xs-batches*)))
| 8,449
|
Common Lisp
|
.lisp
| 163
| 37.067485
| 96
| 0.521244
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
84428149b8e3f81dd90d174e3af337edc2c6eb09ebc9a220afec68473c2c26ba
| 3,202
|
[
-1
] |
3,203
|
ch6.lisp
|
chunsj_TH/examples/books/dlfs2/ch6.lisp
|
(defpackage :dlfs2-ch6
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.layers
#:th.text))
(in-package :dlfs2-ch6)
(defparameter *simple-data* '("you" "say" "goodbye" "I" "say" "hello" "."))
(defparameter *encoder* (word-encoder *simple-data*))
(prn (encoder-encode *encoder* '(("hello" "goodbye"))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* '(("hello" "goodbye")))))
(defparameter *xs-data* '(("you" "say" "goodbye" "I" "say" "hello")
("say" "goodbye" "I" "say" "hello" ".")
("goodbye" "I" "say" "hello" "." "you")
("I" "say" "hello" "." "you" "say")))
(defparameter *ys-data* '(("say" "goodbye" "I" "say" "hello" ".")
("goodbye" "I" "say" "hello" "." "you")
("I" "say" "hello" "." "you" "say")
("say" "hello" "." "you" "say" "goodbye")))
(defparameter *xs* (encoder-encode *encoder* *xs-data*))
(defparameter *ys* (encoder-encode *encoder* *ys-data*))
(defparameter *hidden-size* 50)
(defparameter *rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (lstm-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
;; reset network
($reset! *rnn*)
;; train network
(time
(let* ((epochs 1000)
(print-step 100))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn* *xs*))
(losses (mapcar (lambda (y c) ($cec y c)) outputs *ys*))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn*)))))
($keep-state! *rnn* nil)
(prn ($generate-sequence *rnn* *encoder* '("you" "say") 10))
;; more complex data
(defparameter *data* (loop :for line :in (ptb :train)
:append (->> (strim line)
(split #\space))))
(defparameter *encoder* (word-encoder *data*))
;; simple encoding tests
(prn (encoder-encode *encoder* '(("hello" "world"))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* '(("this" "world")))))
;; encoding tests from encoded vocabularies
(prn (encoder-vocabularies *encoder*))
(prn (subseq *data* 27 127))
(prn (encoder-encode *encoder* (list (subseq *data* 27 127))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* (list (subseq *data* 27 127)))))
(defparameter *xs-data* (loop :for i :from 0 :below 10
:for idx = (+ i 27)
:collect (subseq *data* idx (+ idx 40))))
(defparameter *ys-data* (loop :for i :from 0 :below 10
:for idx = (+ i 28)
:collect (subseq *data* idx (+ idx 40))))
(defparameter *xs* (encoder-encode *encoder* *xs-data*))
(defparameter *ys* (encoder-encode *encoder* *ys-data*))
(defparameter *hidden-size* 100)
(defparameter *rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (lstm-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
;; reset network
($reset! *rnn*)
;; train network
(time
(let* ((epochs 1000)
(print-step 10))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn* *xs*))
(losses (mapcar (lambda (y c) ($cec y c)) outputs *ys*))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn*)))))
($keep-state! *rnn* nil)
(prn ($generate-sequence *rnn* *encoder* '("N" "years" "old" "will") 20))
;; XXX maybe adding more lstm layers could be possible.
;; dropout-cell, shared-affine-cell could be applied if any.
| 4,082
|
Common Lisp
|
.lisp
| 85
| 38.105882
| 94
| 0.538481
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
27785258238dee83ad439ed53d9214b770db224e98db12daed6c505e0e487ad1
| 3,203
|
[
-1
] |
3,204
|
ch4.lisp
|
chunsj_TH/examples/books/dlfs2/ch4.lisp
|
(defpackage :dlfs2-ch4
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.text))
(in-package :dlfs2-ch4)
;; choice
(prn (loop :for i :from 0 :below 20 :collect ($choice '(:a :b :c) '(0.1 0.2 0.7))))
(defun wimb (x w)
(cond ((atom (car x)) ($wimb x w))
((listp (car x)) (apply #'$cat (append (loop :for idcs :in x :collect ($wimb idcs w))
(list 0))))
(T (error "cannot compute embedding for dim > 2"))))
(let* ((vs (loop :for i :from 0 :below 21 :collect i))
(w (-> (tensor vs)
($reshape! 7 3))))
(prn ($ w 2))
(prn ($ w 5))
(prn ($index w 0 '(1 0 3 0)))
(prn ($wimb '(1 0 3 0) w))
(prn (wimb '(1 0 3 0) w))
(prn (wimb '((1 0 3 0) (1 0 3 0)) w)))
(prn ($cat (tensor '((1 2 3))) (tensor '((4 5 6))) 0))
(defun embed (x h w) ($reshape ($sum ($* ($index w 0 x) h) 1) ($size h 1)))
(let ((w (-> (tensor (loop :for i :from 0 :below 21 :collect i))
($reshape! 7 3)))
(idx '(0 3 1))
(h (tensor '((0 1 2)
(3 4 5)
(6 7 8)))))
(prn ($index w 0 idx))
(prn ($* ($index w 0 idx) h))
(prn (embed idx h w)))
;; XXX to be implemented in th
($onehot tensor num-classes)
(let ((w (-> (tensor (loop :for i :from 0 :below 21 :collect i))
($reshape! 7 3)))
(index '(0 3 1))
(index2 '((0 3 1) (0 3 1))))
(prn ($index w 0 index))
(prn ($reshape (tensor.long index2) 6))
(prn ($view ($index w 0 ($reshape (tensor.long index2) 6)) 2 3 3))
(prn ($embedding (tensor.long index2) w))
(prn ($embedding (tensor.long index2) ($parameter w))))
(gcf)
| 1,660
|
Common Lisp
|
.lisp
| 46
| 29.73913
| 93
| 0.493142
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
a2e77a8270c630e6638c6e0fdabd7b37106b6d1029be9991fbfd5367759628d4
| 3,204
|
[
-1
] |
3,205
|
ch5.lisp
|
chunsj_TH/examples/books/dlfs2/ch5.lisp
|
(defpackage :dlfs2-ch5
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.layers
#:th.text))
(in-package :dlfs2-ch5)
(defparameter *simple-data* '("you" "say" "goodbye" "I" "say" "hello" "."))
(defparameter *encoder* (word-encoder *simple-data*))
(prn (encoder-encode *encoder* '(("hello" "goodbye"))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* '(("hello" "goodbye")))))
(defparameter *xs-data* '(("you" "say" "goodbye" "I" "say" "hello")
("say" "goodbye" "I" "say" "hello" ".")
("goodbye" "I" "say" "hello" "." "you")
("I" "say" "hello" "." "you" "say")))
(defparameter *ys-data* '(("say" "goodbye" "I" "say" "hello" ".")
("goodbye" "I" "say" "hello" "." "you")
("I" "say" "hello" "." "you" "say")
("say" "hello" "." "you" "say" "goodbye")))
(defparameter *xs* (encoder-encode *encoder* *xs-data*))
(defparameter *ys* (encoder-encode *encoder* *ys-data*))
(defparameter *hidden-size* 50)
(defparameter *rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (rnn-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
;; reset network
($reset! *rnn*)
;; train network
(time
(let* ((epochs 1000)
(print-step 50))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn* *xs*))
(losses (mapcar (lambda (y c) ($cec y c)) outputs *ys*))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn*)))))
($keep-state! *rnn* nil)
(prn ($generate-sequence *rnn* *encoder* '("you" "say") 10))
;; more complex data
(defparameter *data* (loop :for line :in (ptb :train)
:append (->> (strim line)
(split #\space))))
(defparameter *encoder* (word-encoder *data*))
;; simple encoding tests
(prn (encoder-encode *encoder* '(("hello" "world"))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* '(("this" "world")))))
;; encoding tests from encoded vocabularies
(prn (encoder-vocabularies *encoder*))
(prn (subseq *data* 27 127))
(prn (encoder-encode *encoder* (list (subseq *data* 27 127))))
(prn (encoder-decode *encoder* (encoder-encode *encoder* (list (subseq *data* 27 127)))))
(defparameter *xs-data* (loop :for i :from 0 :below 10
:for idx = (+ i 27)
:collect (subseq *data* idx (+ idx 40))))
(defparameter *ys-data* (loop :for i :from 0 :below 10
:for idx = (+ i 28)
:collect (subseq *data* idx (+ idx 40))))
(defparameter *xs* (encoder-encode *encoder* *xs-data*))
(defparameter *ys* (encoder-encode *encoder* *ys-data*))
(defparameter *hidden-size* 100)
(defparameter *rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (rnn-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
;; reset network
($reset! *rnn*)
;; train network
(time
(let* ((epochs 1000)
(print-step 10))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn* *xs*))
(losses (mapcar (lambda (y c) ($cec y c)) outputs *ys*))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn*)))))
($keep-state! *rnn* nil)
(prn ($generate-sequence *rnn* *encoder* '("N" "years" "old" "will") 20))
| 3,961
|
Common Lisp
|
.lisp
| 83
| 37.60241
| 94
| 0.531104
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
1fbe5284af2f9d327820b726a57b08b8366429bc8c5750d50802a987e22b5dc5
| 3,205
|
[
-1
] |
3,206
|
ch06.lisp
|
chunsj_TH/examples/books/gdl/ch06.lisp
|
(defpackage :gdl-ch06
(:use #:common-lisp
#:mu
#:th))
(in-package :gdl-ch06)
(defparameter *streetlights* (tensor '((1 0 1)
(0 1 1)
(0 0 1)
(1 1 1)
(0 1 1)
(1 0 1))))
(defparameter *walk-vs-stop* (tensor '(0 1 0 1 1 0)))
(defparameter *weights* (tensor '(0.5 0.48 -0.7)))
(defparameter *alpha* 0.1)
;; learning for single data
(let ((input ($ *streetlights* 0))
(goal-prediction ($ *walk-vs-stop* 0)))
(loop :for i :from 1 :to 20
:for prediction = ($dot input *weights*)
:for err = (expt (- prediction goal-prediction) 2)
:for delta = (- prediction goal-prediction)
:do (let ((nw ($- *weights* ($* *alpha* ($* input delta)))))
(setf *weights* nw)
(prn (list err prediction goal-prediction)))))
;; learning for every data - reset *weights*
(defparameter *weights* (tensor '(0.5 0.48 -0.7)))
(defparameter *alpha* 0.1)
(loop :for n :from 1 :to 50
:for all-err = 0
:do (progn
(loop :for i :from 0 :below ($size *streetlights* 0)
:for input = ($ *streetlights* i)
:for goal-prediction = ($ *walk-vs-stop* i)
:for prediction = ($dot input *weights*)
:for err = (expt (- prediction goal-prediction) 2)
:for delta = (- prediction goal-prediction)
:do (progn
(setf all-err (+ all-err err))
(setf *weights* ($- *weights* ($* *alpha* ($* input delta))))))
(prn (list n all-err))))
;; test learned weights
(prn ($mv *streetlights* *weights*))
;; first deep neural network
(setf ($seed th::*generator*) 101)
(defparameter *alpha* 0.2)
(defparameter *hidden-size* 4)
(defparameter *streetlights* (tensor '((1 0 1)
(0 1 1)
(0 0 1)
(1 1 1))))
(defparameter *walk-vs-stop* ($transpose (tensor '((1 1 0 0)))))
(defparameter *weights-0-1* ($- ($* 2 (rnd 3 *hidden-size*)) 1))
(defparameter *weights-1-2* ($- ($* 2 (rnd *hidden-size* 1)) 1))
(defun relu (x) ($* (tensor ($gt x 0)) x))
(defun drelu (output) (tensor ($gt output 0)))
(loop :for n :from 1 :to 60
:for layer-2-error = 0
:do (progn
(loop :for i :from 0 :below ($size *streetlights* 0)
:for layer-0 = ($index *streetlights* 0 (list i))
:for layer-1 = (relu ($mm layer-0 *weights-0-1*))
:for layer-2 = ($mm layer-1 *weights-1-2*)
:for y = ($index *walk-vs-stop* 0 (list i))
:for err = ($sum ($expt ($sub layer-2 y) 2))
:for layer-2-delta = ($sub layer-2 y)
:for layer-1-delta = ($* ($mm layer-2-delta ($transpose *weights-1-2*))
(drelu layer-1))
:do (let ((dweights-1-2 ($* *alpha* ($mm ($transpose layer-1)
layer-2-delta)))
(dweights-0-1 ($* *alpha* ($mm ($transpose layer-0)
layer-1-delta))))
(setf layer-2-error (+ layer-2-error err))
(setf *weights-1-2* ($- *weights-1-2* dweights-1-2))
(setf *weights-0-1* ($- *weights-0-1* dweights-0-1))))
(prn layer-2-error)))
;; result
(prn (-> ($@ *streetlights* *weights-0-1*)
(relu)
($@ *weights-1-2*)))
| 3,753
|
Common Lisp
|
.lisp
| 79
| 33.050633
| 89
| 0.457923
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
7dc8758cac33c03b7dff59eb1dd9761c12a23f52d0607a0230db830740a52e21
| 3,206
|
[
-1
] |
3,207
|
ch11.lisp
|
chunsj_TH/examples/books/gdl/ch11.lisp
|
(ql:quickload :cl-ppcre)
(defpackage :gdl-ch11
(:use #:common-lisp
#:mu
#:th
#:th.db.imdb))
(in-package :gdl-ch11)
;; onehots
(defparameter *onehots* #{})
(setf ($ *onehots* "cat") (tensor '(1 0 0 0)))
(setf ($ *onehots* "the") (tensor '(0 1 0 0)))
(setf ($ *onehots* "dog") (tensor '(0 0 1 0)))
(setf ($ *onehots* "sat") (tensor '(0 0 0 1)))
(defun word2hot (w) ($ *onehots* w))
(let ((sentence '("the" "cat" "sat")))
(prn (reduce #'$+ (mapcar #'word2hot sentence))))
;; to implement efficient embedding layer, we need row/column selection
;; which is possible by using $index function
(let ((w (tensor '((1 2 3) (2 3 4) (3 4 5) (4 5 6) (5 6 7) (6 7 8) (7 8 9)))))
(prn ($index w 0 (tensor.long '(0 1 4))))
(prn ($sum ($index w 0 (tensor.long '(0 1 4))) 0))
(prn w))
;; compare multiplication and embedding layer shortcut (conceptually)
(let ((x (tensor '((1 1 0 1))))
(w (tensor '((1 2 3) (2 3 4) (3 4 5) (4 5 6)))))
(prn (time ($mm x w)))
(prn (time ($sum ($index w 0 '(0 1 3)) 0)))
(prn ($index ($nonzero x) 1 '(1)))
(prn (time ($sum ($index w 0 ($reshape ($index ($nonzero x) 1 '(1)) 3)) 0))))
(defun process-review (review)
(remove-duplicates (->> (remove-duplicates (split #\space review) :test #'equal)
(mapcar (lambda (w)
(cl-ppcre:regex-replace-all
"[^a-z0-9A-Z]"
(string-downcase w)
"")))
(remove-if-not (lambda (w) (> ($count w) 0))))
:test #'equal))
(defparameter *imdb* (read-imdb-data2))
(defparameter *reviews* (mapcar #'process-review ($ *imdb* :reviews)))
(defparameter *labels* ($ *imdb* :labels))
(defparameter *train-reviews* (subseq *reviews* 0 24000))
(defparameter *train-labels* (subseq *labels* 0 24000))
(defparameter *test-reviews* (subseq *reviews* 24000))
(defparameter *test-labels* (subseq *labels* 24000))
(defparameter *words* (remove-duplicates (->> *reviews*
(apply #'$concat))
:test #'equal))
(defparameter *w2i* (let ((h (make-hash-table :test 'equal :size ($count *words*))))
(loop :for w :in *words*
:for i :from 0
:do (setf ($ h w) i))
h))
(defun review-to-indices (review-words)
(sort (remove-duplicates (->> review-words
(mapcar (lambda (w) ($ *w2i* w)))
(remove-if (lambda (w) (null w))))
:test #'equal)
#'<))
(defparameter *input-dataset* (mapcar #'review-to-indices *train-reviews*))
(defparameter *target-dataset* (tensor (mapcar (lambda (s) (if (equal s "positive") 1 0))
*train-labels*)))
(prn ($index *target-dataset* 0 '(0 1 2 3 4)))
;; now we have indices of words as input
(prn ($count *words*)) ;; this is conceptually real input size
;; instead of large matrix multiplication, we can use selection+sum
(let ((w (rnd ($count *words*) 100)))
(prn (time ($sum ($index w 0 ($0 *input-dataset*)) 0))))
;; for auto backpropagation support
(let ((w ($parameter (rnd ($count *words*) 100))))
(prn (time ($sum ($index w 0 ($0 *input-dataset*)) 0))))
(defparameter *alpha* 0.01)
(defparameter *iterations* 2)
(defparameter *hidden-size* 100)
(defparameter *w01* ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1))
(defparameter *w12* ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1))
(defun predict-sentiment (x)
(let* ((w01 ($index *w01* 0 x))
(l1 (-> ($sum w01 0)
($sigmoid!)))
(l2 (-> ($dot l1 *w12*)
($sigmoid!))))
l2))
(defparameter *test-dataset* (mapcar #'review-to-indices *test-reviews*))
(defparameter *test-target* (tensor (mapcar (lambda (s) (if (equal s "positive") 1 0))
*test-labels*)))
(defun prn-test-perf ()
(let ((total 0)
(correct 0))
(loop :for i :from 0 :below (min 1000 ($count *test-dataset*))
:for x = ($ *test-dataset* i)
:for y = ($ *test-target* i)
:do (let ((s (predict-sentiment x)))
(incf total)
(when (< (abs (- s y)) 0.5)
(incf correct))))
(prn "=>" total correct)))
(time
(loop :for iter :from 1 :to *iterations*
:do (let ((total 0)
(correct 0))
(loop :for i :from 0 :below ($count *input-dataset*)
:for x = ($ *input-dataset* i)
:for y = ($ *target-dataset* i)
:for w01 = ($index *w01* 0 x)
:for l1 = (-> ($sum w01 0)
($sigmoid))
:for l2 = (-> ($dot l1 *w12*)
($sigmoid))
:for dl2 = ($sub l2 y)
:for dl1 = ($* dl2 ($transpose *w12*))
:do (let ((d1 ($mul! dl1 *alpha*))
(d2 ($mul! l1 (* dl2 *alpha*))))
(setf ($index *w01* 0 x)
($sub! w01 ($expand! d1 ($size w01))))
($sub! *w12* d2)
(incf total)
(when (< (abs dl2) 0.5)
(incf correct))))
(when (zerop (rem iter 1))
(prn iter total correct)
(prn-test-perf)))))
(prn (predict-sentiment ($ *input-dataset* 10)))
(prn (predict-sentiment ($ *input-dataset* 2345)))
(let* ((review ($0 *test-reviews*))
(sentiment ($0 *test-labels*))
(input (review-to-indices review)))
(prn review)
(prn sentiment)
(prn input)
(prn (predict-sentiment input)))
(prn-test-perf)
;; wow, this really works
(let* ((my-review "this so called franchise movie of avengers is great master piece. i've enjoyed it very much and my kids love this one as well. though my wife generally does not like this kind of genre, she said this one is better than others.")
(review (process-review my-review))
(x (review-to-indices review)))
(prn x)
(prn (predict-sentiment x)))
(let* ((my-review "this movie is just a political propaganda, it has neither entertainment or message. i just regret my spending of precious time on this one.")
(review (process-review my-review))
(x (review-to-indices review)))
(prn x)
(prn (predict-sentiment x)))
;; What hidden layer learns
(defun similar (word)
(let ((target-index ($ *w2i* word)))
(when target-index
(let ((weight-target ($ *w01* target-index))
(scores nil))
(loop :for w :in *words*
:for weight = ($ *w01* ($ *w2i* w))
:for difference = ($sub weight weight-target)
:for wdiff = ($dot difference difference)
:do (let ((score (sqrt wdiff)))
(push (cons w score) scores)))
(subseq (sort scores (lambda (a b) (< (cdr a) (cdr b)))) 0 (min 10 ($count scores)))))))
(prn (similar "beautiful"))
(prn (similar "terrible"))
| 7,242
|
Common Lisp
|
.lisp
| 158
| 35.35443
| 247
| 0.516088
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
23928c518d0ce7f1fd910ff5b64a4d5c1de4c05fe41552e88a786a4e673b756d
| 3,207
|
[
-1
] |
3,208
|
ch03.lisp
|
chunsj_TH/examples/books/gdl/ch03.lisp
|
(defpackage :gdl-ch03
(:use #:common-lisp
#:mu
#:th))
(in-package :gdl-ch03)
;; simple neural network
(defun neural-network (input weight)
(* input weight))
(defparameter *number-of-toes* '(8.5 9.5 10 9))
(let ((weight 0.1))
(prn (neural-network ($ *number-of-toes* 0) weight)))
;; multiple inputs
(defun wsum (input weights)
(->> (mapcar #'* input weights)
(reduce #'+)))
(defun neural-network (input weights)
(wsum input weights))
(defparameter *ntoes* '(8.5 9.5 9.9 9.0))
(defparameter *wlrec* '(0.65 0.8 0.8 0.9))
(defparameter *nfans* '(1.2 1.3 0.5 1.0))
(let ((input (list ($ *ntoes* 0)
($ *wlrec* 0)
($ *nfans* 0)))
(weight '(0.1 0.2 0)))
(prn (neural-network input weight)))
;; multiple output
(defun neural-network (input weights)
(mapcar (lambda (w) (* input w)) weights))
(let ((weight '(0.3 0.2 0.9)))
(prn (neural-network ($ *wlrec* 0) weight)))
;; multiple input & multiple output
(defun neural-network (input weight)
($mv weight input))
(let ((weight (tensor '((0.1 0.1 -0.3)
(0.1 0.2 0.0)
(0.0 1.3 0.1))))
(input (tensor (list ($ *ntoes* 0)
($ *wlrec* 0)
($ *nfans* 0)))))
(prn (neural-network input weight)))
;; stacked!
(defun neural-network (input ih hp)
(->> input
($mv ih)
($mv hp)))
(let ((input-to-hidden (tensor '((0.1 0.2 -0.1)
(-0.1 0.1 0.9)
(0.1 0.4 0.1))))
(hidden-to-prediction (tensor '((0.3 1.1 -0.3)
(0.1 0.2 0.0)
(0.0 1.3 1.1))))
(input (tensor (list ($ *ntoes* 0)
($ *wlrec* 0)
($ *nfans* 0)))))
(prn (neural-network input input-to-hidden hidden-to-prediction)))
| 1,932
|
Common Lisp
|
.lisp
| 55
| 26.218182
| 68
| 0.495974
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
0c9a145e5c9732d98855e6c0b2f83899d99115f3dbd9d5f6602d0d5d10ce9d9a
| 3,208
|
[
-1
] |
3,209
|
ch11-2.lisp
|
chunsj_TH/examples/books/gdl/ch11-2.lisp
|
(ql:quickload :cl-ppcre)
(defpackage :gdl-ch11-2
(:use #:common-lisp
#:mu
#:th
#:th.db.imdb))
(in-package :gdl-ch11-2)
(defun process-review (review)
(remove-duplicates (->> (split #\space review)
(mapcar (lambda (w)
(cl-ppcre:regex-replace-all
"[^a-z0-9A-Z]"
(string-downcase w)
"")))
(remove-if-not (lambda (w) (> ($count w) 0))))
:test #'equal))
(defparameter *imdb* (read-imdb-data2))
(defparameter *reviews* (->> ($ *imdb* :reviews)
(mapcar #'process-review)))
(defparameter *labels* (->> ($ *imdb* :labels)
(mapcar (lambda (s) (if (equal s "positive") 1 0)))))
(defparameter *words* (remove-duplicates (apply #'$concat *reviews*) :test #'equal))
(defparameter *w2i* (let ((h (make-hash-table :test 'equal :size ($count *words*))))
(loop :for w :in *words*
:for i :from 0
:do (setf ($ h w) i))
h))
(defun review-to-indices (review-words)
(sort (remove-duplicates (->> review-words
(mapcar (lambda (w) ($ *w2i* w)))
(remove-if (lambda (w) (null w))))
:test #'equal)
#'<))
;; train dataset
(defparameter *train-dataset* (mapcar #'review-to-indices
(subseq *reviews* 0 (- ($count *reviews*) 1000))))
(defparameter *train-targets* (tensor (subseq *labels* 0 (- ($count *labels*) 1000))))
;; test dataset
(defparameter *test-dataset* (mapcar #'review-to-indices
(subseq *reviews* (- ($count *reviews*) 1000))))
(defparameter *test-targets* (tensor (subseq *labels* (- ($count *labels*) 1000))))
;; neural network
(defparameter *alpha* 0.01)
(defparameter *iterations* 2)
(defparameter *hidden-size* 100)
(defparameter *w01* ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1))
(defparameter *w12* ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1))
(defun reset-weights ()
(setf *w01* ($- ($* 0.2 (rnd ($count *words*) *hidden-size*)) 0.1))
(setf *w12* ($- ($* 0.2 (rnd *hidden-size* 1)) 0.1)))
;; prediction utility function
(defun predict-sentiment (x)
(let* ((w01 ($index *w01* 0 x))
(l1 (-> ($sum w01 0)
($sigmoid!)))
(l2 (-> ($dot l1 *w12*)
($sigmoid!))))
l2))
;; prn test stats
(defun prn-test-perf ()
(let ((total 0)
(correct 0))
(loop :for i :from 0 :below (min 1000 ($count *test-dataset*))
:for x = ($ *test-dataset* i)
:for y = ($ *test-targets* i)
:do (let ((s (predict-sentiment x)))
(incf total)
(when (< (abs (- s y)) 0.5)
(incf correct))))
(prn "=>" total correct)))
(defun train (&optional (niter *iterations*))
(loop :for iter :from 1 :to niter
:do (let ((total 0)
(correct 0))
(loop :for i :from 0 :below ($count *train-dataset*)
:for x = ($ *train-dataset* i)
:for y = ($ *train-targets* i)
:for w01 = ($index *w01* 0 x)
:for l1 = (-> ($sum w01 0)
($sigmoid))
:for l2 = (-> ($dot l1 *w12*)
($sigmoid))
:for dl2 = ($sub l2 y)
:for dl1 = ($* dl2 ($transpose *w12*))
:do (let ((d1 ($mul! dl1 *alpha*))
(d2 ($mul! l1 (* dl2 *alpha*))))
(setf ($index *w01* 0 x)
($sub! w01 ($expand! d1 ($size w01))))
($sub! *w12* d2)
(incf total)
(when (< (abs dl2) 0.5)
(incf correct))))
(when (zerop (rem iter 1))
(prn iter total correct)
(prn-test-perf)))))
;; execute training
(reset-weights)
(time (train))
(gcf)
;; personal test to check the network really works
(let* ((my-review "this so called franchise movie of avengers is great master piece. i've enjoyed it very much and my kids love this one as well. though my wife generally does not like this kind of genre, she said this one is better than others.")
(review (process-review my-review))
(x (review-to-indices review)))
(prn x)
(prn (predict-sentiment x)))
(let* ((my-review "this movie is just a political propaganda, it has neither entertainment or message. i just regret my spending of precious time on this one.")
(review (process-review my-review))
(x (review-to-indices review)))
(prn x)
(prn (predict-sentiment x)))
;; what hidden layer learns
(defun similar (word)
(let ((target-index ($ *w2i* word)))
(when target-index
(let ((weight-target ($ *w01* target-index))
(scores nil))
(loop :for w :in *words*
:for weight = ($ *w01* ($ *w2i* w))
:for difference = ($sub weight weight-target)
:for wdiff = ($dot difference difference)
:do (let ((score (sqrt wdiff)))
(push (cons w score) scores)))
(subseq (sort scores (lambda (a b) (< (cdr a) (cdr b)))) 0 (min 10 ($count scores)))))))
(prn (similar "beautiful"))
(prn (similar "terrible"))
(defun tokenize-review (review)
(->> (split #\space review)
(mapcar (lambda (w)
(cl-ppcre:regex-replace-all
"[^a-z0-9A-Z]"
(string-downcase w)
"")))
(remove-if-not (lambda (w) (> ($count w) 0)))))
(defparameter *review-tokens* (mapcar #'tokenize-review ($ *imdb* :reviews)))
(defparameter *vocab* (let ((counts #{}))
(loop :for sentence :in *review-tokens*
:do (loop :for word :in sentence
:do (let ((pcnt ($ counts word 0)))
(setf ($ counts word) (1+ pcnt)))))
(let ((cnts (loop :for w :in (hash-table-keys counts)
:collect (cons w ($ counts w)))))
(coerce (->> (sort cnts (lambda (p1 p2) (> (cdr p1) (cdr p2))))
(mapcar #'car))
'vector))))
(defparameter *w2i* #{})
(loop :for i :from 0 :below ($count *vocab*) :do (setf ($ *w2i* ($ *vocab* i)) i))
(defparameter *concatenated* nil)
(defparameter *input-dataset* nil)
(loop :for sentence :in *review-tokens*
:do (let ((sentence-indices nil))
(loop :for word :in sentence
:do (let ((wi ($ *w2i* word)))
(push wi sentence-indices)
(push wi *concatenated*)))
(push (reverse sentence-indices) *input-dataset*)))
(setf *concatenated* (coerce (reverse *concatenated*) 'vector))
(setf *input-dataset* (reverse *input-dataset*))
;; shuffle input-dataset
(let ((indices (tensor.int (rndperm ($count *input-dataset*)))))
(setf *input-dataset* (loop :for i :in ($list indices)
:collect ($ *input-dataset* i))))
(setf *input-dataset* (coerce *input-dataset* 'vector))
(defparameter *alpha* 0.05)
(defparameter *iterations* 2)
(defparameter *hidden-size* 50)
(defparameter *window* 2)
(defparameter *negative* 5)
(defparameter *w01* ($* ($- (rnd ($count *vocab*) *hidden-size*) 0.5) 0.2))
(defparameter *w12* ($- ($* 0.2 (rnd ($count *vocab*) *hidden-size*)) 0.1))
(defun reset-weights ()
(setf *w01* ($* ($- (rnd ($count *vocab*) *hidden-size*) 0.5) 0.2))
(setf *w12* ($- ($* 0.2 (rnd ($count *vocab*) *hidden-size*)) 0.1)))
(defparameter *layer-2-target* (zeros (1+ *negative*)))
(setf ($ *layer-2-target* 0) 1)
(defun similar (word)
(let ((target-index ($ *w2i* word)))
(when target-index
(let ((weight-target ($ *w01* target-index))
(scores nil))
(loop :for i :from 0 :below ($count *vocab*)
:for w = ($ *vocab* i)
:for index = ($ *w2i* w)
:for weight = ($ *w01* index)
:for difference = ($sub weight weight-target)
:for wdiff = ($dot difference difference)
:do (let ((score (sqrt wdiff)))
(push (cons w score) scores)))
(subseq (sort scores (lambda (a b) (< (cdr a) (cdr b)))) 0 (min 10 ($count scores)))))))
(defun negsample (target &optional (negative *negative*))
(let ((rns (loop :for k :from 0 :below negative :collect (random ($count *concatenated*)))))
(append (list target) (mapcar (lambda (i) ($ *concatenated* i)) rns))))
(defun mkctx (review i)
(let ((left (subseq review (max 0 (- i *window*)) i))
(right (subseq review (1+ i) (min ($count review) (+ 1 i *window*)))))
(append left right)))
(defun train (&optional (iterations *iterations*))
(loop :for niter :from 0 :below iterations
:for nci = ($count *input-dataset*)
:do (loop :for nreview :from 0 :below nci
:for review = ($ *input-dataset* nreview)
:for nr = ($count review)
:do (progn
(loop :for i :from 0 :below nr
:for targetw = ($ review i)
:for x = (mkctx review i)
:for sample = (negsample targetw)
:for w01 = ($index *w01* 0 x)
:for l1 = ($resize! ($mean w01 0) (list 1 *hidden-size*))
:for w2s = ($index *w12* 0 sample)
:for l2 = ($sigmoid ($mm l1 ($transpose w2s)))
:for dl2 = ($sub l2 *layer-2-target*)
:for dl1 = ($mm dl2 w2s)
:do (let ((dw1 ($mul! dl1 *alpha*))
(dw2 ($mul! ($vv ($resize! dl2 (list (1+ *negative*)))
($resize! l1 (list *hidden-size*)))
*alpha*)))
(setf ($index *w01* 0 x) ($sub w01 ($expand! dw1 ($size w01))))
(setf ($index *w12* 0 sample) ($sub w2s dw2))))
(when (zerop (rem nreview 200))
(prn niter nreview (similar "terrible")))))))
(reset-weights)
(time (train))
(gcf)
(prn (similar "terrible"))
(prn (similar "king"))
(prn (similar "queen"))
(defun analogy (positives negatives)
(let* ((norms (-> ($sum ($mul *w01* *w01*) 1)
($resize! (list ($size *w01* 0) 1))))
(normed-weights ($mul ($expand! norms ($size *w01*)) *w01*))
(query-vector (zeros ($size *w01* 1)))
(scores nil))
(loop :for word :in positives
:do ($add! query-vector ($ normed-weights ($ *w2i* word))))
(loop :for word :in negatives
:do ($sub! query-vector ($ normed-weights ($ *w2i* word))))
(loop :for i :from 0 :below ($count *vocab*)
:for w = ($ *vocab* i)
:for index = ($ *w2i* w)
:for weight = ($ *w01* index)
:for difference = ($sub weight query-vector)
:for wdiff = ($dot difference difference)
:for score = (sqrt wdiff)
:do (push (cons w score) scores))
(-> (sort scores (lambda (a b) (< (cdr a) (cdr b))))
(subseq 1 (min 10 ($count scores))))))
(prn (analogy '("terrible" "good") '("bad")))
(prn (analogy '("elizabeth" "he") '("she")))
(prn (analogy '("king" "woman") '("man")))
| 11,959
|
Common Lisp
|
.lisp
| 249
| 35.144578
| 247
| 0.487234
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
91fb3b753f09865d6067210f98961568d62bfe9a6b8d92f5f8a7d12a5f35db39
| 3,209
|
[
-1
] |
3,210
|
ch08.lisp
|
chunsj_TH/examples/books/gdl/ch08.lisp
|
(defpackage :gdl-ch08
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :gdl-ch08)
;; use smaller dataset for speed
(defun smaller-mnist-data ()
(let ((train-images (tensor))
(train-labels (tensor))
(test-images (tensor))
(test-labels (tensor)))
(let ((f (file.disk "./gdl/mnist/mnist-small-train-images.tensor" "r")))
($fread train-images f)
($fclose f))
(let ((f (file.disk "./gdl/mnist/mnist-small-train-labels.tensor" "r")))
($fread train-labels f)
($fclose f))
(let ((f (file.disk "./gdl/mnist/mnist-small-test-images.tensor" "r")))
($fread test-images f)
($fclose f))
(let ((f (file.disk "./gdl/mnist/mnist-small-test-labels.tensor" "r")))
($fread test-labels f)
($fclose f))
#{:train-images train-images
:train-labels train-labels
:test-images test-images
:test-labels test-labels}))
(defparameter *mnist* (smaller-mnist-data))
(defparameter *pixels-per-image* 784)
(defparameter *hidden-size* 40)
(defparameter *num-labels* 10)
(defparameter *alpha* 0.005)
(defparameter *iterations* 300)
(defparameter *w01* ($parameter ($- ($* 0.2 (rnd *pixels-per-image* *hidden-size*)) 0.1)))
(defparameter *w12* ($parameter ($- ($* 0.2 (rnd *hidden-size* *num-labels*)) 0.1)))
(defun mnist-predict (x)
(-> x
($@ *w01*)
($relu)
($@ *w12*)
($softmax)))
(defun mnist-loss (y* y) ($cee y* y))
;; test functions
(prn (mnist-predict ($index ($ *mnist* :train-images) 0 '(0))))
(prn (mnist-loss (mnist-predict ($index ($ *mnist* :train-images) 0 '(0)))
($index ($ *mnist* :train-labels) 0 '(0))))
(defparameter *mnist-train-images* ($ *mnist* :train-images))
(defparameter *mnist-train-labels* ($ *mnist* :train-labels))
(defun amax (x &optional (dimension 0))
(let ((vals (tensor))
(indices (tensor.long)))
($max! vals indices x dimension)
indices))
(loop :for n :from 1 :to 100
:do (let ((ndata ($size *mnist-train-images* 0)))
(loop :for i :from 0 :below ndata
:for x = ($index *mnist-train-images* 0 (list i))
:for y = ($index *mnist-train-labels* 0 (list i))
:for y* = (mnist-predict x)
:for l = (mnist-loss y* y)
:do (progn
($gs! l)
($gd! (list *w01* *w12*) *alpha*)))
(when (zerop (rem n 1))
(let* ((indices (loop :for k :from 0 :below ndata :collect k))
(predictions (mnist-predict ($index *mnist-train-images* 0 indices)))
(truevals ($index *mnist-train-labels* 0 indices)))
(prn n ($data (mnist-loss predictions truevals)))
(prn "missed:" ($sum ($ne (amax ($data predictions) 1) (amax truevals 1))))))))
(let* ((indices (loop :for k :from 0 :below 100 :collect k))
(ps (mnist-predict ($index ($ *mnist* :test-images) 0 indices)))
(cs ($index ($ *mnist* :test-labels) 0 indices)))
(prn ($sum ($ne (amax ($data ps) 1) (amax cs 1)))))
;; with dropout
(defun mnist-predict-do (x &optional trainp)
(-> x
($@ *w01*)
($relu)
($dropout trainp 0.2)
($@ *w12*)
($softmax)))
(prn (mnist-predict-do ($index ($ *mnist* :train-images) 0 '(0)) t))
(prn (mnist-loss (mnist-predict-do ($index ($ *mnist* :train-images) 0 '(0)) t)
($index ($ *mnist* :train-labels) 0 '(0))))
(defparameter *w01* ($parameter ($- ($* 0.2 (rnd *pixels-per-image* *hidden-size*)) 0.1)))
(defparameter *w12* ($parameter ($- ($* 0.2 (rnd *hidden-size* *num-labels*)) 0.1)))
(loop :for n :from 1 :to 100
:do (let ((ndata ($size *mnist-train-images* 0)))
(loop :for i :from 0 :below ndata
:for x = ($index *mnist-train-images* 0 (list i))
:for y = ($index *mnist-train-labels* 0 (list i))
:for y* = (mnist-predict-do x t)
:for l = (mnist-loss y* y)
:do (progn
($gs! l)
($gd! (list *w01* *w12*) *alpha*)))
(when (zerop (rem n 1))
(let* ((indices (loop :for k :from 0 :below ndata :collect k))
(predictions (mnist-predict-do ($index *mnist-train-images* 0 indices)))
(truevals ($index *mnist-train-labels* 0 indices)))
(prn n ($data (mnist-loss predictions truevals)))
(prn "missed:" ($sum ($ne (amax ($data predictions) 1) (amax truevals 1))))))))
(let* ((indices (loop :for k :from 0 :below 100 :collect k))
(ps (mnist-predict-do ($index ($ *mnist* :test-images) 0 indices)))
(cs ($index ($ *mnist* :test-labels) 0 indices)))
(prn ($sum ($ne (amax ($data ps) 1) (amax cs 1)))))
| 4,852
|
Common Lisp
|
.lisp
| 107
| 36.775701
| 95
| 0.544743
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
31ae672842b40cb88f16b9b750729a5294f8d3ffd120614b366a6e0dcf627cf2
| 3,210
|
[
-1
] |
3,211
|
ch04.lisp
|
chunsj_TH/examples/books/gdl/ch04.lisp
|
(defpackage :gdl-ch04
(:use #:common-lisp
#:mu
#:th))
(in-package :gdl-ch04)
;; error
(let ((knob-weight 0.5)
(input 0.5)
(goal-pred 0.8))
(let* ((pred (* input knob-weight))
(err (expt (- pred goal-pred) 2)))
(prn err)))
;; simplest form of neural learning
(let ((weight 0.1)
(lr 0.01)
(number-of-toes '(8.5))
(win-or-lose-binary '(1)))
(defun neural-network (input weight)
(* input weight))
(let ((input ($ number-of-toes 0))
(y ($ win-or-lose-binary 0)))
(let* ((pred (neural-network input weight))
(err (expt (- pred y) 2)))
(let* ((p-up (neural-network input (+ weight lr)))
(e-up (expt (- p-up y) 2))
(p-dn (neural-network input (- weight lr)))
(e-dn (expt (- p-dn y) 2)))
(if (or (> err e-dn) (> err e-up))
(if (< e-dn e-up)
(setf weight (- weight lr))
(setf weight (+ weight lr))))
(prn weight)))))
;; hot and cold learning
(defparameter *weight* 0.5)
(defparameter *input* 0.5)
(defparameter *goal-prediction* 0.8)
(defparameter *step-amount* 0.001)
(loop :for i :from 0 :below 1101
:for prediction = (* *input* *weight*)
:for err = (expt (- prediction *goal-prediction*) 2)
:do (let* ((up-prediction (* *input* (+ *weight* *step-amount*)))
(dn-prediction (* *input* (- *weight* *step-amount*)))
(up-error (expt (- up-prediction *goal-prediction*) 2))
(dn-error (expt (- dn-prediction *goal-prediction*) 2)))
(prn (list err prediction))
(when (< dn-error err)
(setf *weight* (- *weight* *step-amount*)))
(when (< up-error err)
(setf *weight* (+ *weight* *step-amount*)))))
;; gradient descent
(defparameter *weight* 0.0)
(defparameter *input* 0.5)
(defparameter *goal-prediction* 0.8)
(loop :for i :from 0 :below 40
:for pred = (* *input* *weight*)
:for err = (expt (- pred *goal-prediction*) 2)
:for delta = (- pred *goal-prediction*)
:for weight-delta = (* delta *input*)
:do (let ((new-weight (- *weight* weight-delta)))
(setf *weight* new-weight)
(prn (list err pred))))
;; chapter 05 is so tedious...
| 2,310
|
Common Lisp
|
.lisp
| 62
| 29.887097
| 73
| 0.53908
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
a8cbfd629e496567c67f1ba98b07f768bdbc3eabd96c415c1c0aa0476dd478af
| 3,211
|
[
-1
] |
3,212
|
ch06.lisp
|
chunsj_TH/examples/books/gdrl/ch06.lisp
|
(defpackage :gdrl-ch06
(:use #:common-lisp
#:mu
#:th
#:th.env)
(:import-from #:th.env.examples))
(in-package :gdrl-ch06)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun discounts (gamma max-steps)
(loop :for i :from 0 :below max-steps :collect (expt gamma i)))
(defun generate-trajectory (env Q select-action epsilon &key (max-steps 200))
(let ((done nil)
(trajectory '()))
(loop :while (not done)
:for state = (env/reset! env)
:do (loop :for e :from 0 :to max-steps
:while (not done)
:do (let* ((action (funcall select-action Q state epsilon))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(experience (list state action reward next-state terminalp)))
(push experience trajectory)
(setf done terminalp
state next-state)
(when (>= e max-steps)
(setf trajectory '())))))
(reverse trajectory)))
(defun experience/state (record) ($ record 0))
(defun experience/action (record) ($ record 1))
(defun experience/reward (record) ($ record 2))
(defun experience/next-state (record) ($ record 3))
(defun experience/terminalp (record) ($ record 4))
(defun mc-control (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nepisodes 3000)
(max-steps 200)
(first-visit-p T))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(discounts (discounts gamma max-steps))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for e :from 0 :below nepisodes
:for eps = ($ epsilons e)
:for trajectory = (generate-trajectory env Q select-action eps :max-steps max-steps)
:for visited = (zeros ns na)
:do (progn
(loop :for strj :on trajectory
:for experience = (car strj)
:for state = (experience/state experience)
:for action = (experience/action experience)
:for reward = (experience/reward experience)
:do (unless (and first-visit-p (> ($ visited state action) 0))
(let* ((g (->> (mapcar (lambda (d e) (* d (experience/reward e)))
discounts strj)
(reduce #'+)))
(mc-err (- g ($ Q state action))))
(setf ($ visited state action) 1)
(incf ($ Q state action) (* ($ alphas e) mc-err)))))
(setf ($ Q-track e) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 0.99D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (mc-control env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (mc-control env :gamma 0.99D0 :nepisodes 3000 :first-visit-p nil))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defun sarsa (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons e)
:for action = (funcall select-action Q state eps)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(next-action (funcall select-action Q next-state eps))
(td-target (+ reward (* gamma ($ Q next-state next-action)
(if terminalp 0 1))))
(td-error (- td-target ($ Q state action))))
(incf ($ Q state action) (* ($ alphas e) td-error))
(setf done terminalp
state next-state
action next-action)))
(setf ($ Q-track e) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 0.99D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (sarsa env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defun q-learning (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons e)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((action (funcall select-action Q state eps))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(td-target (+ reward (* gamma ($max ($ Q next-state))
(if terminalp 0 1))))
(td-error (- td-target ($ Q state action))))
(incf ($ Q state action) (* ($ alphas e) td-error))
(setf done terminalp
state next-state)))
(setf ($ Q-track e) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 0.99D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (q-learning env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defun double-q-learning (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q1 (zeros ns na))
(Q2 (zeros ns na))
(Q1-track (zeros nepisodes ns na))
(Q2-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons e)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((action (funcall select-action ($/ ($+ Q1 Q2) 2) state eps))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(fac (if terminalp 0 1)))
(if (zerop (random 2))
(let* ((argmaxQ1 ($argmax ($ Q1 next-state)))
(td-target (+ reward
(* gamma
($ Q2 next-state argmaxQ1)
fac)))
(td-error (- td-target ($ Q1 state action))))
(incf ($ Q1 state action) (* ($ alphas e) td-error)))
(let* ((argmaxQ2 ($argmax ($ Q2 next-state)))
(td-target (+ reward
(* gamma
($ Q1 next-state argmaxQ2)
fac)))
(td-error (- td-target ($ Q2 state action))))
(incf ($ Q2 state action) (* ($ alphas e) td-error))))
(setf done terminalp
state next-state)))
(setf ($ Q1-track e) Q1)
(setf ($ Q2-track e) Q2)
(push ($squeeze ($argmax ($/ ($+ Q1 Q2) 2) 1)) pi-track)))
(let* ((Q ($/ ($+ Q1 Q2) 2))
(v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) ($/ ($+ Q1-track Q2-track) 2) (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 0.99D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (double-q-learning env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(let* ((env (th.env.examples:grid-world-env))
(policy (lambda (s) ($ '(2 2 2 0
3 0 3 0
3 0 0 0)
s)))
(v-true (env/policy-evaluation env policy)))
(env/print-state-value-function env v-true))
(let* ((env (th.env.examples:grid-world-env))
(optres (env/value-iteration env))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v)
(env/print-policy env opt-p :action-symbols '("<" "v" ">" "^"))
(prn opt-q))
(let* ((env (th.env.examples:grid-world-env))
(res (mc-control env :nepisodes 4000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v)
(env/print-policy env policy :action-symbols '("<" "v" ">" "^"))
(prn Q))
(let* ((env (th.env.examples:grid-world-env))
(res (sarsa env :nepisodes 4000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v)
(env/print-policy env policy :action-symbols '("<" "v" ">" "^"))
(prn Q))
(let* ((env (th.env.examples:grid-world-env))
(res (q-learning env :nepisodes 4000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v)
(env/print-policy env policy :action-symbols '("<" "v" ">" "^"))
(prn Q))
(let* ((env (th.env.examples:grid-world-env))
(res (double-q-learning env :nepisodes 4000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v)
(env/print-policy env policy :action-symbols '("<" "v" ">" "^"))
(prn Q))
| 16,680
|
Common Lisp
|
.lisp
| 331
| 35.652568
| 94
| 0.494089
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
8b169fd65faa372af3bc2c95c3831df8073d183e9f1b895fe3a6305af0e615f0
| 3,212
|
[
-1
] |
3,213
|
ch11.lisp
|
chunsj_TH/examples/books/gdrl/ch11.lisp
|
(defpackage :gdrl-ch11
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :gdrl-ch11)
(defun train-env (&optional (max-steps 300)) (cartpole-env :easy :reward max-steps))
(defun eval-env () (cartpole-env :eval))
(defun clamp-probs (probs)
($clamp probs
single-float-epsilon
(- 1 single-float-epsilon)))
(defun $logPs (probs) ($log (clamp-probs probs)))
;;
;; REINFORCE
;;
(defun model (&optional (ni 4) (no 2))
(let ((h 8))
(sequential-layer
(affine-layer ni h :weight-initializer :random-uniform
:activation :relu)
(affine-layer h no :weight-initializer :random-uniform
:activation :softmax))))
(defun policy (m state &optional (trainp T))
(let ((s (if (eq ($ndim state) 1)
($unsqueeze state 0)
state)))
($execute m s :trainp trainp)))
(defun select-action (m state &optional (trainp T))
(let* ((probs (policy m state trainp))
(logPs ($logPs probs))
(ps (if ($parameterp probs) ($data probs) probs))
(entropy ($- ($dot ps logPs)))
(action ($multinomial ps 1))
(logP ($gather logPs 1 action)))
(list ($scalar action) logP entropy)))
(defun action-selector (m)
(lambda (state)
(let ((probs (policy m state nil)))
($scalar ($argmax probs 1)))))
(defun reinforce (m &optional (max-episodes 4000))
"REINFORCE updating per every episode."
(let* ((gamma 0.99)
(lr 0.01)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for score = 0
:for done = nil
:do (let ((losses nil))
(loop :while (not done)
:for (action logP entropy) = (select-action m state)
:for (next-state reward terminalp) = (cdr (env/step! env action))
:do (progn
(push logP logPs)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(setf logPs (reverse logPs))
(setf rewards (rewards (reverse rewards) gamma T))
(loop :for logP :in logPs
:for vt :in rewards
:for i :from 0
:for gm = (expt gamma i)
:for l = ($- ($* gm logP vt))
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (push l losses))
($amgd! m lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector m)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
;; train with REINFORCE
(defparameter *m* (model))
(reinforce *m* 4000)
;; evaluation
(evaluate (eval-env) (action-selector *m*))
;;
;; REINFORCE - batch updating
;;
(defun select-action (m state) ($scalar ($multinomial (policy m state nil) 1)))
(defun trace-episode (env m gamma &optional (nb 1))
"collect episode trajectories with given policy model"
(let ((states nil)
(actions nil)
(rewards nil)
(gammas nil)
(done nil)
(score 0)
(state nil))
(loop :repeat nb
:do (progn
(setf state (env/reset! env))
(loop :while (not done)
:for action = (select-action m state)
:for (_ next-state reward terminalp) = (env/step! env action)
:for i :from 0
:do (progn
(push ($list state) states)
(push action actions)
(push reward rewards)
(push (expt gamma i) gammas)
(incf score reward)
(setf state next-state
done terminalp)))
(setf done nil)))
(let ((n ($count states)))
(list (tensor (reverse states))
(-> (tensor.long (reverse actions))
($reshape! n 1))
(-> (rewards (reverse rewards) gamma T)
(tensor)
($reshape! n 1))
(-> (tensor (reverse gammas))
($reshape! n 1))
(/ score nb)))))
(defun compute-loss (m states actions rewards gammas)
(let ((logPs ($gather ($logPs (policy m states)) 1 actions)))
($mean ($* -1 gammas rewards logPs))))
(defun reinforce (m &optional (nbatch 5) (max-episodes 4000))
"REINFORCE with batch updating"
(let* ((gamma 0.99)
(lr 0.04)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat (round (/ max-episodes nbatch))
:for e :from 1
;;:for state = (env/reset! env)
:do (let* ((res (trace-episode env m gamma nbatch))
(states ($0 res))
(actions ($1 res))
(rewards ($2 res))
(gammas ($3 res))
(score ($4 res))
(loss nil))
(setf loss (compute-loss m states actions rewards gammas))
($amgd! m lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector m)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F ~12,4F" e avg-score escore
($scalar ($data loss))))))))
avg-score))
(defparameter *m* (model))
(reinforce *m* 10 4000) ;; more batch means large learning rate
(evaluate (eval-env) (action-selector *m*))
;;
;; VANILLA POLICY GRADIENT, VPG
;;
(defun pmodel (&optional (ni 4) (no 2))
(let ((h 8))
(sequential-layer
(affine-layer ni h :weight-initializer :random-uniform
:activation :relu)
(affine-layer h no :weight-initializer :random-uniform
:activation :softmax))))
(defun vmodel (&optional (ni 4) (no 1))
(let ((h 16))
(sequential-layer
(affine-layer ni h :weight-initializer :random-uniform
:activation :relu)
(affine-layer h no :weight-initializer :random-uniform
:activation :nil))))
(defun select-action (m state &optional (trainp T))
(let* ((probs (policy m state trainp))
(logPs ($logPs probs))
(ps (if ($parameterp probs) ($data probs) probs))
(entropy ($- ($dot ps logPs)))
(action ($multinomial ps 1))
(logP ($gather logPs 1 action)))
(list ($scalar action) logP entropy)))
(defun val (m state &optional (trainp T))
(let ((s (if (eq ($ndim state) 1)
($unsqueeze state 0)
state)))
($execute m s :trainp trainp)))
(defun vpg (pm vm &optional (max-episodes 4000))
(let* ((gamma 0.99)
(beta 0.001)
(plr 0.01)
(vlr 0.01)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for done = nil
:do (let ((plosses nil)
(vlosses nil))
(loop :while (not done)
:for (action logP entropy) = (select-action pm state)
:for (_ next-state reward terminalp) = (env/step! env action)
:for v = (val vm state)
:do (progn
(push logP logPs)
(push reward rewards)
(push ($* beta entropy) entropies)
(push v vals)
(incf score reward)
(setf state next-state
done terminalp)))
(setf logPs (reverse logPs)
entropies (reverse entropies)
vals (reverse vals))
(setf rewards (rewards (reverse rewards) gamma T))
(loop :for logP :in logPs
:for vt :in rewards
:for et :in entropies
:for v :in vals
:for i :from 0
:for gm = (expt gamma i)
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (let ((adv ($- vt v)))
(push ($- ($+ ($* gm logP ($data adv)) et)) plosses)
(push ($square adv) vlosses)))
($amgd! pm plr)
($amgd! vm vlr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector pm)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *pm* (model))
(defparameter *vm* (vmodel))
(vpg *pm* *vm* 4000)
(evaluate (eval-env) (action-selector *pm*))
;;
;; ACTOR-CRITIC - ADVANTAGE ACTOR CRITIC (A2C) BATCH
;;
(defun ac (pm vm &optional (max-episodes 4000))
(let* ((gamma 0.99)
(beta 0.001)
(plr 0.01)
(vlr 0.01)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for done = nil
:do (let ((plosses nil)
(vlosses nil))
(loop :while (not done)
:for (action logP entropy) = (select-action pm state)
:for (_ next-state reward terminalp) = (env/step! env action)
:for v = (val vm state)
:do (progn
(push logP logPs)
(push reward rewards)
(push ($* beta entropy) entropies)
(push v vals)
(incf score reward)
(setf state next-state
done terminalp)))
(setf logPs (reverse logPs)
entropies (reverse entropies)
vals (reverse vals))
(setf rewards (rewards (reverse rewards) gamma T))
(loop :for logP :in logPs
:for vt :in rewards
:for et :in entropies
:for v :in vals
:for i :from 0
:for gm = (expt gamma i)
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (let ((adv ($- vt v)))
(push ($- ($+ ($* gm logP ($data adv)) et)) plosses)
(push ($square adv) vlosses)))
($amgd! pm plr)
($amgd! vm vlr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector pm)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *pm* (model))
(defparameter *vm* (vmodel))
(ac *pm* *vm* 4000)
(evaluate (eval-env) (action-selector *pm*))
;;
;; ACTOR-CRITIC - ONLINE VERSION
;;
(defun ac2 (pm vm &optional (max-episodes 4000))
(let* ((gamma 0.99)
(plr 0.001)
(vlr 0.001)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for done = nil
:do (let ((plosses nil)
(vlosses nil))
(loop :while (not done)
:for (action logP entropy) = (select-action pm state)
:for (_ next-state reward terminalp) = (env/step! env action)
:for v = (val vm state)
:for vn = (val vm next-state)
:for adv = ($+ reward
($- ($* (- 1 (if terminalp 1 0))
gamma
vn)
v))
:do (let ((vloss ($square adv))
(ploss ($- ($* ($data adv) logP))))
(incf score reward)
(push ($data vloss) vlosses)
(push ($data ploss) plosses)
($amgd! pm plr)
($amgd! vm vlr)
(setf state next-state
done terminalp)))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector pm)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *pm* (model))
(defparameter *vm* (vmodel))
(ac2 *pm* *vm* 4000)
(evaluate (eval-env) (action-selector *pm*))
;;
;; ACTOR-CRITIC - N-STEP
;;
(defun acn (pm vm &optional (max-episodes 4000))
(let* ((gamma 0.99)
(beta 0.001)
(plr 0.01)
(vlr 0.01)
(max-steps 200)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for steps = 0
:for done = nil
:do (let ((plosses nil)
(vlosses nil))
(loop :while (not done)
:for (action logP entropy) = (select-action pm state)
:for (_ next-state reward terminalp) = (env/step! env action)
:for v = (val vm state)
:do (progn
(push logP logPs)
(push reward rewards)
(push ($* beta entropy) entropies)
(push v vals)
(incf score reward)
(incf steps)
;; every at most max-steps, do train
(when (or terminalp (zerop (rem steps max-steps)))
(setf logPs (reverse logPs)
entropies (reverse entropies)
vals (reverse vals))
(if (> ($count rewards) 1)
(setf rewards (rewards (reverse rewards) gamma T))
(setf rewards (reverse rewards)))
(loop :for logP :in logPs
:for vt :in rewards
:for et :in entropies
:for v :in vals
:for i :from 0
:for gm = (expt gamma i)
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (let ((adv ($- vt v)))
(push ($- ($+ ($* gm logP ($data adv)) et)) plosses)
(push ($square adv) vlosses)))
($amgd! pm plr)
($amgd! vm vlr)
(setf logPs nil
entropies nil
vals nil
rewards nil
plosses nil
vlosses nil))
(setf state next-state
done terminalp)))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector pm)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *pm* (model))
(defparameter *vm* (vmodel))
(acn *pm* *vm* 4000)
(evaluate (eval-env) (action-selector *pm*))
;;
;; ACTOR-CRITIC - SHARED NETWORK MODEL
;;
(defun smodel (&optional (ns 4) (na 2) (nv 1))
(let ((h 16))
(let ((common-net (sequential-layer
(affine-layer ns h :weight-initializer :random-uniform
:activation :relu)
(affine-layer h h :weight-initializer :random-uniform
:activation :relu)))
(policy-net (affine-layer h na :weight-initializer :random-uniform
:activation :softmax))
(value-net (affine-layer h nv :weight-initializer :random-uniform
:activation :nil)))
(sequential-layer common-net
(parallel-layer policy-net value-net)))))
(defun policy-and-value (m state &optional (trainp T))
($execute m ($unsqueeze state 0) :trainp trainp))
(defun select-action (m state &optional (trainp T))
(let* ((out (policy-and-value m state))
(probs ($0 out))
(logPs ($logPs probs))
(val ($1 out))
(ps (if ($parameterp probs) ($data probs) probs))
(entropy ($- ($dot ps logPs)))
(action ($multinomial ps 1))
(logP ($gather logPs 1 action)))
(list ($scalar action) logP entropy val)))
(defun action-selector (m)
(lambda (state)
(let* ((policy-and-value (policy-and-value m state nil))
(policy ($0 policy-and-value)))
($scalar ($argmax policy 1)))))
(defun acs (m &optional (max-episodes 4000))
(let* ((gamma 0.99)
(beta 0.001)
(lr 0.01)
(pw 1)
(vw 0.6)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for done = nil
:do (let ((ploss 0)
(vloss 0)
(loss 0))
(loop :while (not done)
:for (action logP entropy v) = (select-action m state)
:for (_ next-state reward terminalp) = (env/step! env action)
:do (progn
(push logP logPs)
(push reward rewards)
(push ($* beta entropy) entropies)
(push v vals)
(incf score reward)
(setf state next-state
done terminalp)))
(setf logPs (reverse logPs)
entropies (reverse entropies)
vals (reverse vals))
(setf rewards (rewards (reverse rewards) gamma T))
(loop :for logP :in logPs
:for vt :in rewards
:for et :in entropies
:for v :in vals
:for i :from 0
:for gm = (expt gamma i)
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (let ((adv ($- vt v)))
(setf ploss ($- ploss ($+ ($* gm logP ($data adv)) et)))
(setf vloss ($+ vloss ($square adv)))))
(setf loss ($+ ($* pw ploss) ($* vw vloss)))
($amgd! m lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector m)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *sm* (smodel))
(acs *sm* 4000)
(evaluate (eval-env) (action-selector *sm*))
;;
;; ACTOR-CRITIC - SHARED N-STEP
;;
(defun acsn (m &optional (max-episodes 4000))
(let* ((gamma 0.99)
(beta 0.001)
(lr 0.01)
(pw 1)
(vw 0.6)
(max-steps 200)
(env (train-env))
(avg-score nil)
(success nil))
(loop :while (not success)
:repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for entropies = '()
:for vals = '()
:for score = 0
:for steps = 0
:for done = nil
:do (let ((ploss 0)
(vloss 0)
(loss 0))
(loop :while (not done)
:for (action logP entropy v) = (select-action m state)
:for (_ next-state reward terminalp) = (env/step! env action)
:do (progn
(push logP logPs)
(push reward rewards)
(push ($* beta entropy) entropies)
(push v vals)
(incf score reward)
(incf steps)
(when (or terminalp (zerop (rem steps max-steps)))
(setf logPs (reverse logPs)
entropies (reverse entropies)
vals (reverse vals))
(if (> ($count rewards) 1)
(setf rewards (rewards (reverse rewards) gamma T))
(setf rewards (reverse rewards)))
(loop :for logP :in logPs
:for vt :in rewards
:for et :in entropies
:for v :in vals
:for i :from 0
:for gm = (expt gamma i)
;; in practice, we don't have to collect losses.
;; each loss has independent computational graph.
:do (let ((adv ($- vt v)))
(setf ploss ($- ploss ($+ ($* gm logP ($data adv)) et)))
(setf vloss ($+ vloss ($square adv)))))
(setf loss ($+ ($* pw ploss) ($* vw vloss)))
($amgd! m lr)
(setf logPs nil
entropies nil
vals nil
rewards nil
ploss 0
vloss 0
loss 0))
(setf state next-state
done terminalp)))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(let ((escore (cadr (evaluate (eval-env) (action-selector m)))))
(if (and (>= avg-score (* 0.9 300)) (>= escore 3000)) (setf success T))
(prn (format nil "~5D: ~8,2F / ~5,0F" e avg-score escore))))))
avg-score))
(defparameter *sm* (smodel))
(acsn *sm* 4000)
(evaluate (eval-env) (action-selector *sm*))
| 26,557
|
Common Lisp
|
.lisp
| 621
| 26.478261
| 98
| 0.433649
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
17e08b1740766c2f967a2077e157cf66c943049419cc1293fb9ab33c94cb6676
| 3,213
|
[
-1
] |
3,214
|
ch03.lisp
|
chunsj_TH/examples/books/gdrl/ch03.lisp
|
(defpackage :gdrl-ch03
(:use #:common-lisp
#:mu
#:th
#:th.env)
(:import-from #:th.env.examples))
(in-package :gdrl-ch03)
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(env/print-policy env policy :action-symbols '("<" ">") :ncols 7))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(list :success-rate (env/success-probability env policy 6)
:mean-return (env/mean-return env policy)))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v (env/policy-evaluation env policy)))
(env/print-state-value-function env v :ncols 7))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v (env/policy-evaluation env policy))
(new-policy (env/policy-improvement env v)))
(env/print-policy env new-policy :action-symbols '("<" ">") :ncols 7)
(list :success-rate (env/success-probability env new-policy 6)
:mean-return (env/mean-return env new-policy)))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v (env/policy-evaluation env policy))
(new-policy (env/policy-improvement env v))
(new-v (env/policy-evaluation env new-policy)))
(env/print-state-value-function env new-v :ncols 7))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v (env/policy-evaluation env policy))
(new-policy (env/policy-improvement env v))
(new-v (env/policy-evaluation env new-policy))
(new-new-policy (env/policy-improvement env new-v)))
(env/print-policy env new-new-policy :action-symbols '("<" ">") :ncols 7)
(list :success-rate (env/success-probability env new-new-policy 6)
:mean-return (env/mean-return env new-new-policy)))
(let* ((env (th.env.examples:slippery-walk-five-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v (env/policy-evaluation env policy))
(new-policy (env/policy-improvement env v))
(new-v (env/policy-evaluation env new-policy))
(new-new-policy (env/policy-improvement env new-v))
(new-new-v (env/policy-evaluation env new-new-policy)))
(env/print-state-value-function env new-new-v :ncols 7)
($equal new-v new-new-v))
(let* ((env (th.env.examples:slippery-walk-five-env))
(res (env/policy-iteration env))
(optimal-value-function (policy-iteration/optimal-value-function res))
(optimal-policy (policy-iteration/optimal-policy res)))
(env/print-policy env optimal-policy :action-symbols '("<" ">") :ncols 7)
(env/print-state-value-function env optimal-value-function :ncols 7)
(list :success-rate (env/success-probability env optimal-policy 6)
:mean-return (env/mean-return env optimal-policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(2 0 1 3
0 0 2 0
3 1 3 0
0 2 1 0)
s))))
(env/print-policy env policy)
(list :success-rate (env/success-probability env policy 15)
:mean-return (env/mean-return env policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(2 2 1 0
1 0 1 0
2 2 1 0
0 2 2 0)
s))))
(env/print-policy env policy)
(list :success-rate (env/success-probability env policy 15)
:mean-return (env/mean-return env policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(0 3 3 3
0 0 3 0
3 1 0 0
0 2 2 0)
s))))
(env/print-policy env policy)
(list :success-rate (env/success-probability env policy 15)
:mean-return (env/mean-return env policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(0 3 3 3
0 0 3 0
3 1 0 0
0 2 2 0)
s)))
(v (env/policy-evaluation env policy :gamma 0.99)))
(env/print-state-value-function env v))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(0 3 3 3
0 0 3 0
3 1 0 0
0 2 2 0)
s)))
(v (env/policy-evaluation env policy :gamma 0.99))
(new-policy (env/policy-improvement env v :gamma 0.99)))
(env/print-policy env new-policy)
(list :success-rate (env/success-probability env new-policy 15)
:mean-return (env/mean-return env new-policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(policy (lambda (s) ($ '(0 3 3 3
0 0 3 0
3 1 0 0
0 2 2 0)
s)))
(v (env/policy-evaluation env policy :gamma 0.99))
(new-policy (env/policy-improvement env v :gamma 0.99))
(new-v (env/policy-evaluation env new-policy :gamma 0.99)))
(env/print-state-value-function env new-v)
(env/print-state-value-function env ($- new-v v)))
(let* ((env (th.env.examples:frozen-lake-env))
(impres (env/policy-iteration env :gamma 0.99))
(v-best (policy-iteration/optimal-value-function impres))
(policy-best (policy-iteration/optimal-policy impres)))
(env/print-policy env policy-best)
(env/print-state-value-function env v-best)
(list :success-rate (env/success-probability env policy-best 15)
:mean-return (env/mean-return env policy-best)))
(let* ((env (th.env.examples:slippery-walk-five-env))
(res (env/value-iteration env))
(optimal-value-function (value-iteration/optimal-value-function res))
(optimal-policy (value-iteration/optimal-policy res)))
(env/print-policy env optimal-policy :action-symbols '("<" ">") :ncols 7)
(env/print-state-value-function env optimal-value-function :ncols 7)
(list :success-rate (env/success-probability env optimal-policy 6)
:mean-return (env/mean-return env optimal-policy)))
(let* ((env (th.env.examples:frozen-lake-env))
(res (env/value-iteration env :gamma 0.99))
(optimal-value-function (value-iteration/optimal-value-function res))
(optimal-policy (value-iteration/optimal-policy res)))
(env/print-policy env optimal-policy)
(env/print-state-value-function env optimal-value-function)
(list :success-rate (env/success-probability env optimal-policy 15)
:mean-return (env/mean-return env optimal-policy)))
| 6,829
|
Common Lisp
|
.lisp
| 138
| 40.137681
| 77
| 0.598382
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
60bfe5b6ce5a0ba0269a32c1709cc474990c7772cfa68fe266c63c25accaaff4
| 3,214
|
[
-1
] |
3,215
|
ch12.lisp
|
chunsj_TH/examples/books/gdrl/ch12.lisp
|
(defpackage :gdrl-ch12
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole
#:th.env.pendulum))
(in-package :gdrl-ch12)
(defparameter *max-action* 2)
(defun actor-model (&optional (ni 3) (no 1))
(let ((h 16)
(max-action *max-action*))
(sequential-layer
(affine-layer ni h
:weight-initializer :random-uniform
:activation :relu)
(affine-layer h h
:weight-initializer :random-uniform
:activation :relu)
(affine-layer h no
:weight-initializer :random-uniform
:activation :tanh)
(functional-layer
(lambda (x &key (trainp t))
(declare (ignore trainp))
($* x max-action))))))
(defun criticl-model (&optional (ns 3) (na 1) (no 1))
(let ((h 16))
(sequential-layer
(affine-layer (+ ns na) h
:weight-initializer :random-uniform
:activation :relu)
(affine-layer h h
:weight-initializer :random-uniform
:activation :relu)
(affine-layer h no
:weight-initializer :random-uniform
:activation :nil))))
(defun actor (m state &optional (randomizedp T) (trainp T))
(let ((s (if (eq ($ndim state) 1)
($unsqueeze state 0)
state))
(e (random/normal 0 1)))
(if randomizedp
($clamp ($+ ($execute m s :trainp trainp) e) (- *max-action*) *max-action*)
($execute m s :trainp trainp))))
(defun critic (m state action &optional (trainp T))
(let* ((s (if (eq ($ndim state) 1)
($unsqueeze state 0)
state))
(at (if ($tensorp action) action (tensor (list action))))
(a (if (eq ($ndim at) 1)
($unsqueeze at 0)
at))
(x ($cat s a 1)))
($execute m x :trainp trainp)))
(defun critics (m1 m2 state action &optional (trainp T))
(list (critic m1 state action trainp)
(critic m2 state action trainp)))
(defun sync-model (ms mt &optional (tau -1))
(loop :for ps :in ($parameters ms)
:for pt :in ($parameters mt)
:do (if (< tau 0)
($set! ($data pt) ($clone ($data ps)))
($set! ($data pt) ($+ ($* tau ($data ps))
($* (- 1 tau) ($data pt)))))))
(defun collect-experience (env am &optional (n 100))
(let ((s (env/reset! env)))
(loop :repeat n
:for a = ($scalar (actor am s T nil))
:for tx = (env/step! env a)
:for ns = (transition/next-state tx)
:for r = (transition/reward tx)
:collect (let ((s0 s))
(setf s ns)
(list s0 a r ns)))))
(defparameter *am* (actor-model))
(defparameter *am-target* (actor-model))
(defparameter *cm1* (criticl-model))
(defparameter *cm2* (criticl-model))
(defparameter *cm1-target* (criticl-model))
(defparameter *cm2-target* (criticl-model))
(sync-model *am* *am-target*)
(sync-model *cm1* *cm1-target*)
(sync-model *cm2* *cm2-target*)
;; TD3
(let ((env (pendulum-env))
(epochs 2000)
(gamma 0.99)
(tau 0.005)
(nsample 1000)
(npolicy 2)
(nprn 50)
(trainp T)
(lra 0.001)
(lrc 0.001)
(ql nil)
(al nil))
($cg! *am*)
($cg! *cm1*)
($cg! *cm2*)
(loop :repeat epochs
:for ne :from 1
:for exps = (collect-experience env *am* nsample)
:for states = ($catn (mapcar (lambda (e) ($unsqueeze ($0 e) 0)) exps) 0)
:for actions = ($catn (mapcar (lambda (e) (tensor (list (list ($1 e))))) exps) 0)
:for rewards = ($catn (mapcar (lambda (e) (tensor (list (list ($2 e))))) exps) 0)
:for next-states = ($catn (mapcar (lambda (e) ($unsqueeze ($3 e) 0)) exps) 0)
:for next-actions = (actor *am-target* next-states T nil)
:for q1s-target = (critic *cm1-target* next-states next-actions nil)
:for q2s-target = (critic *cm2-target* next-states next-actions nil)
:for qs-target = ($+ rewards ($* gamma ($min ($cat q1s-target q2s-target 1) 1)))
:for q1s = (critic *cm1* states actions trainp)
:for q2s = (critic *cm2* states actions trainp)
:for qloss = ($+ ($mse q1s qs-target) ($mse q2s qs-target))
:do (let ((qlv (if ($parameterp qloss) ($data qloss) qloss))
(alv nil))
($amgd! *cm1* lrc)
($amgd! *cm2* lrc)
(when (zerop (rem ne npolicy))
(let* ((as (actor *am* states nil trainp))
(qs (critic *cm1* states as trainp))
(aloss ($- ($mean qs))))
(setf alv (if ($parameterp aloss) ($data aloss) aloss))
($amgd! *am* lra)
($cg! *cm1*)
(sync-model *am* *am-target* tau)
(sync-model *cm1* *cm1-target* tau)
(sync-model *cm2* *cm2-target* tau)))
(when qlv
(if ql
(setf ql (+ (* 0.9 ql) (* 0.1 qlv)))
(setf ql qlv)))
(when alv
(if al
(setf al (+ (* 0.9 al) (* 0.1 alv)))
(setf al alv)))
(when (zerop (rem ne nprn))
(prn (format nil "[~5D] ~10,2F ~10,4F | ~10,2F" ne ql al
($scalar ($mean rewards))))))))
(let* ((env (pendulum-env))
(s (env/reset! env))
(n 100)
(score 0))
(loop :repeat n
:for a = ($scalar (actor *am* s nil nil))
:for tx = (env/step! env a)
:for ns = (transition/next-state tx)
:for r = (transition/reward tx)
:do (progn
(setf s ns)
(incf score r)))
(* 1D0 (/ score n)))
| 5,879
|
Common Lisp
|
.lisp
| 154
| 27.909091
| 89
| 0.501751
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
03d7fb0cce5044bc313794b3c947b3bb0c38b313a4e6aa9035b24bee9e66cf56
| 3,215
|
[
-1
] |
3,216
|
ch09.lisp
|
chunsj_TH/examples/books/gdrl/ch09.lisp
|
(defpackage :gdrl-ch09
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :gdrl-ch09)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model (&optional (ni 4) (no 2))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform)
(affine-layer h2 no :weight-initializer :random-uniform))))
(defun best-action-selector (model epsilon)
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(q ($evaluate model state)))
($scalar ($argmin q 1)))
(random 2))))
(defun sample-experiences (experiences nbatch)
(let ((nr ($count experiences)))
(if (> nr nbatch)
(loop :repeat nbatch :collect ($ experiences (random nr)))
experiences)))
(defun generate-dataset (om tm experiences &optional (gamma 0.95D0))
(let* ((nr ($count experiences))
(state-list (mapcar #'$0 experiences))
(states (-> (apply #'$concat state-list)
($reshape! nr 4)))
(actions (-> (tensor.long (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1)))
(as ($argmin ($evaluate om next-states) 1))
(qns ($gather ($evaluate tm next-states) 1 as))
(tqvs ($+ costs ($* gamma qns ($- 1 dones)))))
(list states actions tqvs)))
(defun train (model xs as ts &optional (lr 0.008))
(let* ((qs ($execute model xs))
(ys ($gather qs 1 as))
(loss ($mse ys ts)))
($rmgd! model lr)
($data loss)))
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 2000)
(defvar *sync-period* 15)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun sync-models (target online)
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:do ($set! ($data pt) ($data po))))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun dqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-target eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain))
(let* ((xts (generate-dataset model-target
model-target
(sample-experiences experiences *batch-size*)
0.95D0))
(xs (car xts))
(as (cadr xts))
(ys (caddr xts)))
(let* ((loss (train model-online xs as ys 0.003))
(eres (evaluate eval-env (best-action-selector model-online 0D0)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))
(when (zerop (rem epoch *sync-period*))
(sync-models model-target model-online))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defun ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain))
(let* ((xts (generate-dataset model-online
model-target
(sample-experiences experiences *batch-size*)
0.95D0))
(xs (car xts))
(as (cadr xts))
(ts (caddr xts)))
(let* ((loss (train model-online xs as ts))
(eres (evaluate eval-env (best-action-selector model-online 0D0)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))
(when (zerop (rem epoch *sync-period*))
(sync-models model-target model-online))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defparameter *m* nil)
(setf *m* (let ((rl #'dqn)) (funcall rl *m*)))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m* 0D0)))
| 7,809
|
Common Lisp
|
.lisp
| 173
| 31.098266
| 95
| 0.488451
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
1d9bbc1923b5e54fdf36a6c2519fd96afd1c40380cfc8920a547da842023eb1f
| 3,216
|
[
-1
] |
3,217
|
ch05.lisp
|
chunsj_TH/examples/books/gdrl/ch05.lisp
|
(defpackage :gdrl-ch05
(:use #:common-lisp
#:mu
#:mplot
#:th
#:th.env)
(:import-from #:th.env.examples))
(in-package :gdrl-ch05)
(let* ((env (th.env.examples:random-walk-env))
(goal 6)
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy)))
(env/print-state-value-function env v-true :ncols 7)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 7)
(prn "PGOAL:" (env/success-probability env policy goal)
"MRET:" (env/mean-return env policy)))
(defun generate-trajectory (env policy &key (max-steps 200))
(let ((done nil)
(trajectory '()))
(loop :while (not done)
:for state = (env/reset! env)
:do (loop :for e :from 0 :to max-steps
:while (not done)
:do (let* ((action (funcall policy state))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx)))
(push (list state action reward next-state terminalp)
trajectory)
(setf done terminalp
state next-state)
(when (>= e max-steps)
(setf trajectory '())))))
(reverse trajectory)))
(defun experience/state (record) ($ record 0))
(defun experience/action (record) ($ record 1))
(defun experience/reward (record) ($ record 2))
(defun experience/next-state (record) ($ record 3))
(defun experience/terminalp (record) ($ record 4))
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(vs ($/ ($- vs ($min vs)) ($- ($max vs) ($min vs))))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun mc-prediction (env policy &key (gamma 1D0) (alpha0 0.5) (min-alpha 0.01)
(alpha-decay-ratio 0.5) (nepisodes 500) (max-steps 200)
(first-visit-p T))
(let* ((alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(ns (env/state-count env))
(v (zeros ns))
(v-track (zeros nepisodes ns))
(targets (loop :for s :from 0 :below ns :collect '())))
(loop :for e :from 0 :below nepisodes
:for trajectory = (generate-trajectory env policy :max-steps max-steps)
:for visited = (zeros ns)
:do (progn
(loop :for strj :on trajectory
:for experience = (car strj)
:for state = (experience/state experience)
:for it :from 0
:do (unless (and first-visit-p (> ($ visited state) 0))
(let* ((strj (subseq trajectory it))
(g (loop :for exi :in strj
:for ri = (experience/reward exi)
:for i :from 0
:summing (* (expt gamma i) ri)))
(mc-err (- g ($ v state))))
(setf ($ visited state) 1)
(push g ($ targets state))
(incf ($ v state) (* ($ alphas e) mc-err)))))
(setf ($ v-track e) v)))
(list v v-track targets)))
(defun prediction/state-value-function (record) ($ record 0))
(defun prediction/state-value-function-trakc (record) ($ record 1))
(defun prediction/targets (record) ($ record 2))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(env/print-policy env policy :ncols 7)
(generate-trajectory env policy))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(mc-prediction env policy))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(mcpred (mc-prediction env policy))
(v (prediction/state-value-function mcpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(mcpred (mc-prediction env policy :first-visit-p nil))
(v (prediction/state-value-function mcpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
(defun td-prediction (env policy &key (gamma 1D0) (alpha0 0.5) (min-alpha 0.01)
(alpha-decay-ratio 0.5) (nepisodes 500))
(let* ((alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(ns (env/state-count env))
(v (zeros ns))
(v-track (zeros nepisodes ns))
(targets (loop :for s :from 0 :below ns :collect '())))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for done = nil
:do (progn
(loop :while (not done)
:for action = (funcall policy state)
:for tx = (env/step! env action)
:for next-state = (transition/next-state tx)
:for reward = (transition/reward tx)
:for terminalp = (transition/terminalp tx)
:for td-target = (+ reward (* gamma ($ v next-state) (if terminalp 0 1)))
:do (progn
(push td-target ($ targets state))
(incf ($ v state) (* ($ alphas e) (- td-target ($ v state))))
(setf done terminalp
state next-state)))
(setf ($ v-track e) v)))
(list v v-track targets)))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(td-prediction env policy))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(mcpred (td-prediction env policy))
(v (prediction/state-value-function mcpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
;; from sutton & barto's book
(defun ntd-prediction (env policy &key (gamma 1D0) (alpha0 0.5) (min-alpha 0.01)
(alpha-decay-ratio 0.5) (nstep 3) (nepisodes 500))
(let* ((alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(ns (env/state-count env))
(v (zeros ns))
(v-track (zeros nepisodes ns)))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for ende = most-positive-fixnum
:for endp = nil
:do (let ((rewards '())
(states (list state)) ;; XXX states is required to refer S(tau)
(next-state -1)
(tau -1))
(loop :for tm :from 0
:while (not endp)
:do (progn
(when (< tm ende)
(let* ((action (funcall policy state))
(tx (env/step! env action)))
(setf next-state (transition/next-state tx))
(push next-state states)
(push (transition/reward tx) rewards)
(if (transition/terminalp tx) (setf ende (1+ tm)))))
(setf tau (+ 1 (- tm nstep)))
(when (>= tau 0)
(let* ((rs (subseq (reverse rewards)
(1+ tau) (min ende (+ tau nstep))))
(sts (reverse states))
(stau ($ sts tau))
(g (loop :for r :in rs
:for i :from 0
:summing (* (expt gamma (- i tau 1)) r))))
(when (< (+ tau nstep) ende)
(incf g (* (expt gamma nstep) ($ v next-state))))
(incf ($ v stau) (* ($ alphas e) (- g ($ v stau))))))
(if (= tau (- ende 1)) (setf endp T))
(setf state next-state)))
(setf ($ v-track e) v)))
(list v v-track '())))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(ntd-prediction env policy :nstep 100))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(ntdpred (ntd-prediction env policy))
(v (prediction/state-value-function ntdpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
(defun ntd-prediction (env policy &key (gamma 1D0) (alpha0 0.5) (min-alpha 0.01)
(alpha-decay-ratio 0.5) (nstep 3) (nepisodes 500))
(let* ((alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(ns (env/state-count env))
(v (zeros ns))
(v-track (zeros nepisodes ns)))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for done = nil
:do (let ((experiences '()))
(loop :while (or (not done) (not (null experiences)))
:do (progn
(loop :while (and (not done) (< ($count experiences) nstep))
:for action = (funcall policy state)
:for tx = (env/step! env action)
:for next-state = (transition/next-state tx)
:for reward = (transition/reward tx)
:for terminalp = (transition/terminalp tx)
:for experience = (list state action reward next-state terminalp)
:do (progn
(push experience experiences)
(setf state next-state)
(setf done terminalp)))
(when experiences
(let* ((ne ($count experiences))
(exs (reverse experiences))
(e0 (car exs))
(el ($last exs))
(est-state (experience/state e0))
(next-state (experience/next-state el))
(termp (experience/terminalp el))
(partial-returns (loop :for exp :in exs
:for n :from 0
:for reward = (experience/reward exp)
:summing (* (expt gamma n) reward)))
(bs-val (* (expt gamma nstep) ($ v next-state)
(if termp 0 1)))
(ntd-target (+ bs-val partial-returns))
(ntd-error (- ntd-target ($ v est-state))))
(incf ($ v est-state) (* ($ alphas e) ntd-error))
(when (and (= 1 ne) (experience/terminalp e0))
(setf experiences '()))))
(setf experiences (butlast experiences))))
(setf ($ v-track e) v)))
(list v v-track '())))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(ntd-prediction env policy))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(ntdpred (ntd-prediction env policy))
(v (prediction/state-value-function ntdpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
(defun td-lambda (env policy &key (gamma 1D0) (alpha0 0.5) (min-alpha 0.01)
(alpha-decay-ratio 0.5) (lam 0.3) (nepisodes 500))
(let* ((alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(ns (env/state-count env))
(v (zeros ns))
(es (zeros ns))
(v-track (zeros nepisodes ns)))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for done = nil
:do (progn
($zero! es)
(loop :while (not done)
:for action = (funcall policy state)
:for tx = (env/step! env action)
:for next-state = (transition/next-state tx)
:for reward = (transition/reward tx)
:for terminalp = (transition/terminalp tx)
:for fac = (if terminalp 0 1)
:for td-target = (+ reward (* gamma ($ v next-state) fac))
:for td-error = (- td-target ($ v state))
:for alpha-err = (* ($ alphas e) td-error)
:do (progn
(incf ($ es state))
($add! v ($* alpha-err es))
(setf es ($* es gamma lam))
(setf done terminalp
state next-state)))
(setf ($ v-track e) v)))
(list v v-track '())))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s))))
(td-lambda env policy))
(let* ((env (th.env.examples:random-walk-env))
(policy (lambda (s) ($ '(0 0 0 0 0 0 0) s)))
(v-true (env/policy-evaluation env policy))
(ntlpred (td-lambda env policy))
(v (prediction/state-value-function ntlpred)))
(env/print-state-value-function env v :ncols 7)
(env/print-state-value-function env v-true :ncols 7 :title "TRUE")
(env/print-state-value-function env ($- v v-true) :ncols 7 :title "ERROR"))
(let* ((env (th.env.examples:grid-world-env))
(policy (lambda (s) ($ '(2 2 2 0
3 0 3 0
3 0 0 0)
s)))
(v-true (env/policy-evaluation env policy)))
(env/print-state-value-function env v-true)
(td-lambda env policy))
(let* ((env (th.env.examples:grid-world-env))
(policy (lambda (s) ($ '(2 2 2 0
3 0 3 0
3 0 0 0)
s)))
(v-true (env/policy-evaluation env policy))
(pred (mc-prediction env policy))
(v (prediction/state-value-function pred)))
(env/print-policy env policy)
(env/print-state-value-function env v)
(env/print-state-value-function env v-true :title "TRUE")
(env/print-state-value-function env ($- v v-true) :title "ERROR"))
(let* ((env (th.env.examples:grid-world-env))
(policy (lambda (s) ($ '(2 2 2 0
3 0 3 0
3 0 0 0)
s)))
(v-true (env/policy-evaluation env policy))
(pred (td-lambda env policy))
(v (prediction/state-value-function pred)))
(env/print-policy env policy)
(env/print-state-value-function env v)
(env/print-state-value-function env v-true :title "TRUE")
(env/print-state-value-function env ($- v v-true) :title "ERROR")
(prn "PGOAL:" (env/success-probability env policy 3)
"MRET:" (env/mean-return env policy)))
| 17,066
|
Common Lisp
|
.lisp
| 323
| 36.464396
| 99
| 0.484656
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
6283e1da0927cf9b52dd9659b29a6e68e445f417adc3510d04fb8ab154687453
| 3,217
|
[
-1
] |
3,218
|
ch08.lisp
|
chunsj_TH/examples/books/gdrl/ch08.lisp
|
(defpackage :gdrl-ch08
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.examples))
(in-package :gdrl-ch08)
(defun fcq (input-dim output-dim)
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer input-dim h1 :activation :tanh)
(affine-layer h1 h2 :activation :tanh)
(affine-layer h2 output-dim))))
(defun epsilon-greedy (model state &key (epsilon 0.1D0))
(let ((qvs ($evaluate model state)))
(if (> (random 1D0) epsilon)
($argmax qvs 1)
(tensor.long ($bernoulli (tensor ($size state 0) 1) 0.5D0)))))
(defun loss (v r) ($mse v r))
(defun select-action (model state &key (epsilon 0.1D0))
(-> (epsilon-greedy model (tensor (list (list state))) :epsilon epsilon)
($scalar)))
(let* ((env (slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 1D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun q-learning (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for e :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons e)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((action (funcall select-action Q state eps))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(td-target (+ reward (* gamma ($max ($ Q next-state))
(if terminalp 0 1))))
(td-error (- td-target ($ Q state action))))
(incf ($ Q state action) (* ($ alphas e) td-error))
(setf done terminalp
state next-state)))
(setf ($ Q-track e) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max* Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (q-learning env :gamma 1D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defparameter *model* (fcq 1 2))
(prn ($evaluate *model* (tensor '((0) (1) (2) (3) (4) (5) (6) (7) (8)))))
(prn ($argmax ($evaluate *model* (tensor '((0) (1) (2) (3) (4) (5) (6) (7) (8)))) 1))
(let* ((env (slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 1D0))
(opt-q (value-iteration/optimal-action-value-function optres))
(states (tensor '((0) (1) (2) (3) (4) (5) (6) (7) (8)))))
(loop :repeat 5000
:for k :from 1
:do (let* ((v ($execute *model* states))
(l (loss v opt-q)))
($rmgd! *model*)
(when (zerop (rem k 500)) (prn l))))
(prn opt-q)
(prn ($evaluate *model* states)))
(let ((env (slippery-walk-seven-env))
(model *model*)
(max-episodes 100)
(nbatch 100)
(ntrain 5)
(epsilon 0.5D0)
(gamma 1D0)
(lr 0.01)
(experiences '()))
(loop :repeat max-episodes
:for episode :from 1
:do (let ((state (env/reset! env))
(done nil))
(loop :while (not done)
:for action = (select-action model state :epsilon epsilon)
:for tx = (env/step! env action)
:for next-state = (transition/next-state tx)
:for reward = (transition/reward tx)
:for terminalp = (transition/terminalp tx)
:do (let ((experience (list state action reward next-state terminalp)))
(push experience experiences)
(let ((ne ($count experiences)))
(when (>= ne nbatch)
(let ((states (-> (mapcar #'$0 experiences)
(tensor)
($reshape ne 1)))
(actions (-> (mapcar #'$1 experiences)
(tensor.long)
($reshape ne 1)))
(rewards (-> (mapcar #'$2 experiences)
(tensor)
($reshape ne 1)))
(next-states (-> (mapcar #'$3 experiences)
(tensor)
($reshape ne 1)))
(facs (-> (mapcar (lambda (p) (if ($4 p) 0 1)) experiences)
(tensor)
($reshape ne 1))))
(with-max-heap ()
(loop :repeat ntrain
:for k :from 1
:for maxqs = (-> ($evaluate model next-states)
($max 1))
:for tqs = ($+ rewards ($* gamma maxqs facs))
:for qsa = ($gather ($execute model states) 1 actions)
:do (let* ((loss (loss qsa tqs))
(lv (format nil "~,4F" ($data loss)))
(lbl (format nil "[~4D/~2D]" episode k)))
(when (zerop (rem episode 20))
(when (eq k 1) (prn lbl lv))
(when (eq k ntrain) (prn lbl lv)))
($rpgd! model lr)))))))
(setf state next-state
done terminalp))))))
;; for cartpole example check examples/etc/cartpole-nfq.lisp
;; following is copy of it.
(defconstant +gravity+ 9.8D0)
(defconstant +masscart+ 1D0)
(defconstant +masspole+ 0.1D0)
(defconstant +total-mass+ (+ +masscart+ +masspole+))
(defconstant +length+ 0.5D0)
(defconstant +polemass-length+ (* +masspole+ +length+))
(defconstant +force-mag+ 10D0)
(defconstant +tau+ 0.02D0)
(defconstant +x-success-range+ 2.4D0)
(defconstant +theta-success-range+ (/ (* 12 PI) 180D0))
(defconstant +x-threshold+ 2.4D0)
(defconstant +theta-threshold-radians+ (/ PI 2))
(defconstant +c-trans+ 0.01D0)
(defconstant +train-max-steps+ 100)
(defconstant +eval-max-steps+ 3000)
(defclass cartpole-env ()
((mode :initform nil :accessor env/mode)
(step :initform 0 :accessor env/episode-step)
(state :initform nil :accessor env/state)))
(defun cartpole-env (&optional (m :train))
(let ((n (make-instance 'cartpole-env)))
(setf (env/mode n) m)
(env/reset! n)
n))
(defmethod env/reset! ((env cartpole-env))
(with-slots (mode state step) env
(setf step 0)
(setf state (if (eq mode :train)
(tensor (list (random/uniform -2.3D0 2.3D0)
0
(random/uniform -0.3 0.3)
0))
(tensor (list (random/uniform -1D0 1D0)
0
(random/uniform -0.3 0.3)
0))))
state))
(defmethod env/step! ((env cartpole-env) action)
(let* ((x ($0 (env/state env)))
(xd ($1 (env/state env)))
(th ($2 (env/state env)))
(thd ($3 (env/state env)))
(force (if (eq action 1) +force-mag+ (- +force-mag+)))
(costh (cos th))
(sinth (sin th))
(tmp (/ (+ force (* +polemass-length+ thd thd sinth))
+total-mass+))
(thacc (/ (- (* +gravity+ sinth) (* costh tmp))
(* +length+
(- 4/3 (/ (* +masspole+ costh costh) +total-mass+)))))
(xacc (- tmp (/ (* +polemass-length+ thacc costh) +total-mass+)))
(cost +c-trans+)
(done nil)
(blown nil))
(incf (env/episode-step env))
(incf x (* +tau+ xd))
(incf xd (* +tau+ xacc))
(incf th (* +tau+ thd))
(incf thd (* +tau+ thacc))
(cond ((or (< x (- +x-threshold+)) (> x +x-threshold+)
(< th (- +theta-threshold-radians+)) (> th +theta-threshold-radians+))
(setf cost 1D0
done T))
((and (> x (- +x-success-range+)) (< x +x-success-range+)
(> th (- +theta-success-range+)) (< th +theta-success-range+))
(setf cost 0D0
done nil))
(T (setf cost +c-trans+
done nil)))
(when (>= (env/episode-step env)
(if (eq :train (env/mode env)) +train-max-steps+ +eval-max-steps+))
(setf blown T))
(let ((next-state (tensor (list x xd th thd))))
(setf (env/state env) next-state)
(list nil next-state cost done blown))))
(defun generate-goal-patterns (&optional (size 100))
(list (tensor (loop :repeat size
:collect (list (random/uniform -0.05 0.05)
(random/normal 0 1)
(random/uniform (- +theta-success-range+)
+theta-success-range+)
(random/normal 0 1)
(random 2))))
(zeros size 1)))
(defun collect-experiences (env &optional selector)
(let ((rollout '())
(episode-cost 0)
(state (env/reset! env))
(done nil)
(blown nil))
(loop :while (and (not done) (not blown))
:for action = (if selector
(funcall selector state)
(random 2))
:for tx = (env/step! env action)
:do (let ((next-state ($1 tx))
(cost ($2 tx)))
(setf done ($3 tx)
blown ($4 tx))
(push (list state action cost next-state done) rollout)
(incf episode-cost cost)
(setf state next-state)))
(list (reverse rollout) episode-cost)))
(defun model (&optional (ni 5) (no 1))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform)
(affine-layer h2 no :weight-initializer :random-uniform))))
(defun best-action-selector (model)
(lambda (state)
(let* ((state ($reshape state 1 4))
(qleft ($evaluate model ($concat state (zeros 1 1) 1)))
(qright ($evaluate model ($concat state (ones 1 1) 1))))
(if (>= ($ qleft 0 0) ($ qright 0 0)) 1 0))))
(defun generate-patterns (model experiences &optional (gamma 0.95D0))
(let* ((nr ($count experiences))
(state-list (mapcar #'$0 experiences))
(states (-> (apply #'$concat state-list)
($reshape! nr 4)))
(actions (-> (tensor (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1)))
(xs ($concat states actions 1))
(qleft ($evaluate model ($concat next-states (zeros nr 1) 1)))
(qright ($evaluate model ($concat next-states (ones nr 1) 1)))
(qns ($min ($concat qleft qright 1) 1))
(tqvs ($+ costs ($* gamma qns ($- 1 dones)))))
(list xs tqvs)))
(defun train (model xs ts)
(let* ((ys ($execute model xs))
(loss ($mse ys ts)))
($rpgd! model)
($data loss)))
(defun evaluate (env model)
(let ((state (env/reset! env))
(ne 0)
(done nil)
(blown nil)
(ecost 0D0)
(selector (best-action-selector model)))
(loop :while (and (not done) (not blown))
:for step :from 0 :below +eval-max-steps+
:for action = (funcall selector state)
:for tx = (env/step! env action)
:do (let ((next-state ($1 tx))
(cost ($2 tx)))
(setf done ($3 tx)
blown ($4 tx))
(incf ecost cost)
(incf ne)
(setf state next-state)))
(list ne
(and (>= ne (- +eval-max-steps+ 2)) (<= (abs ($0 state)) +x-success-range+))
ecost)))
(defvar *init-experience* T)
(defvar *increment-experience* T)
(defvar *hint-to-goal* T)
(defvar *max-epochs* 300)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(with-max-heap ()
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model (model))
(experiences '())
(total-cost 0)
(success nil))
(when *init-experience*
(let* ((exsi (collect-experiences train-env))
(exs (car exsi))
(ecost (cadr exsi)))
(setf experiences exs)
(incf total-cost ecost)))
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:do (let ((ctrain 0)
(ntrain 0))
(when *increment-experience*
(let* ((exsi (collect-experiences train-env (best-action-selector model)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (append experiences exs))
(incf total-cost ctrain)))
(let* ((xys (generate-patterns model experiences 0.95D0))
(xs (car xys))
(ys (cadr xys)))
(when *hint-to-goal*
(let ((gxys (generate-goal-patterns)))
(setf xs ($concat xs (car gxys) 0))
(setf ys ($concat ys (cadr gxys) 0))))
(let* ((loss (train model xs ys))
(eres (evaluate eval-env model))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))))
| 16,796
|
Common Lisp
|
.lisp
| 368
| 31.230978
| 95
| 0.472769
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
be72c0cd3def8702f46f286ab8ade06a9db61d8674b6ad7d0ec73ff4cd69f05d
| 3,218
|
[
-1
] |
3,219
|
ch07.lisp
|
chunsj_TH/examples/books/gdrl/ch07.lisp
|
(defpackage :gdrl-ch07
(:use #:common-lisp
#:mu
#:th
#:th.env)
(:import-from #:th.env.examples))
(in-package :gdrl-ch07)
(let* ((env (th.env.examples:slippery-walk-seven-env))
(optres (env/value-iteration env :gamma 0.99D0))
(opt-v (value-iteration/optimal-value-function optres))
(opt-p (value-iteration/optimal-policy optres))
(opt-q (value-iteration/optimal-action-value-function optres)))
(env/print-state-value-function env opt-v :ncols 9)
(env/print-policy env opt-p :action-symbols '("<" ">") :ncols 9)
(prn opt-q))
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun sarsa-lambda (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(lm 0.5) (replacing-traces-p T)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(E (zeros ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for k :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons k)
:for alpha = ($ alphas k)
:for action = (funcall select-action Q state eps)
:do (let ((done nil))
($zero! E)
(loop :while (not done)
:do (let* ((tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(next-action (funcall select-action Q next-state eps))
(fac (if terminalp 0 1))
(td-target (+ reward (* gamma ($ Q next-state next-action)
fac)))
(td-error (- td-target ($ Q state action))))
(when replacing-traces-p ($zero! ($ E state)))
(incf ($ E state action))
(when replacing-traces-p ($clamp! E 0 1))
(setf Q ($+ Q ($* alpha td-error E)))
(setf E ($* gamma lm E))
(setf done terminalp
state next-state
action next-action)))
(setf ($ Q-track k) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (sarsa-lambda env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (sarsa-lambda env :gamma 0.99D0 :replacing-traces-p nil :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defun q-lambda (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(lm 0.5) (replacing-traces-p T)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(Q-track (zeros nepisodes ns na))
(E (zeros ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state)))))))
(loop :for k :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons k)
:for alpha = ($ alphas k)
:for action = (funcall select-action Q state eps)
:do (let ((done nil))
($zero! E)
(loop :while (not done)
:do (let* ((tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(next-action (funcall select-action Q next-state eps))
(maxa ($max ($ Q next-state)))
(greedy-p (eq ($ Q next-state next-action) maxa))
(fac (if terminalp 0 1))
(td-target (+ reward (* gamma maxa fac)))
(td-error (- td-target ($ Q state action))))
(when replacing-traces-p ($zero! ($ E state)))
(incf ($ E state action))
(when replacing-traces-p ($clamp! E 0 1))
(setf Q ($+ Q ($* alpha td-error E)))
(if greedy-p
(setf E ($* gamma lm E))
($zero! E))
(setf done terminalp
state next-state
action next-action)))
(setf ($ Q-track k) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (q-lambda env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (q-lambda env :gamma 0.99D0 :replacing-traces-p nil :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q))
(defun dyna-Q (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(nplanning 3)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(T-count (zeros ns na ns))
(R-model (zeros ns na ns))
(Q-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state))))))
(states (loop :for s :from 0 :below ns :collect s)))
(loop :for k :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons k)
:for alpha = ($ alphas k)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((action (funcall select-action Q state eps))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(fac (if terminalp 0 1))
(maxa ($max ($ Q next-state)))
(td-target (+ reward (* gamma maxa fac)))
(td-error (- td-target ($ Q state action)))
(R-diff (- reward ($ R-model state action next-state))))
(incf ($ Q state action) (* alpha td-error))
(incf ($ T-count state action next-state))
(incf ($ R-model state action next-state)
(/ R-diff ($ T-count state action next-state)))
(unless (zerop ($sum Q))
(loop :repeat nplanning
:for visited-states = ($select ($nonzero T-count) 1 0)
:for nvs = ($count visited-states)
:for state = ($ visited-states (random nvs))
:for actions-taken = ($select ($nonzero ($ T-count state)) 1 0)
:for nat = ($count actions-taken)
:for action = ($ actions-taken (random nat))
:for probs = ($/ ($ T-count state action)
($sum ($ T-count state action)))
:for next-state = ($choice states probs)
:for reward = ($ R-model state action next-state)
:do (progn
(setf td-target (+ reward
(* gamma ($max ($ Q next-state)))))
(setf td-error (- td-target ($ Q state action)))
(incf ($ Q state action) (* alpha td-error)))))
(setf done terminalp
state next-state)))
(setf ($ Q-track k) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(with-max-heap ()
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (dyna-Q env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q)))
(defun trajectory-sampling (env &key (gamma 1D0)
(alpha0 0.5) (min-alpha 0.01) (alpha-decay-ratio 0.5)
(epsilon0 1.0) (min-epsilon 0.1) (epsilon-decay-ratio 0.9)
(max-trajectory-depth 100) (planning-freq 5) (greedy-p T)
(nepisodes 3000))
(let* ((ns (env/state-count env))
(na (env/action-count env))
(pi-track '())
(Q (zeros ns na))
(T-count (zeros ns na ns))
(R-model (zeros ns na ns))
(Q-track (zeros nepisodes ns na))
(alphas (decay-schedule alpha0 min-alpha alpha-decay-ratio nepisodes))
(epsilons (decay-schedule epsilon0 min-epsilon epsilon-decay-ratio nepisodes))
(select-action (lambda (Q state epsilon)
(if (> (random 1D0) epsilon)
($argmax ($ Q state))
(random ($count ($ Q state))))))
(states (loop :for s :from 0 :below ns :collect s)))
(loop :for k :from 0 :below nepisodes
:for state = (env/reset! env)
:for eps = ($ epsilons k)
:for alpha = ($ alphas k)
:do (let ((done nil))
(loop :while (not done)
:do (let* ((action (funcall select-action Q state eps))
(tx (env/step! env action))
(next-state (transition/next-state tx))
(reward (transition/reward tx))
(terminalp (transition/terminalp tx))
(fac (if terminalp 0 1))
(maxa ($max ($ Q next-state)))
(td-target (+ reward (* gamma maxa fac)))
(td-error (- td-target ($ Q state action)))
(R-diff (- reward ($ R-model state action next-state)))
(backup-next-state next-state))
(incf ($ Q state action) (* alpha td-error))
(incf ($ T-count state action next-state))
(incf ($ R-model state action next-state)
(/ R-diff ($ T-count state action next-state)))
(when (zerop (mod k planning-freq))
(unless (zerop ($sum Q))
(loop :repeat max-trajectory-depth
:for action = (if greedy-p
($argmax ($ Q state))
(funcall select-action Q state eps))
:for txcount = ($sum ($ T-count state action))
:when (> txcount 0)
:do (let* ((probs ($/ ($ T-count state action)
txcount))
(next-state ($choice states probs))
(reward ($ R-model state action next-state))
(qmax ($max ($ Q next-state)))
(td-target (+ reward (* gamma qmax)))
(td-error (- td-target ($ Q state action))))
(incf ($ Q state action) (* alpha td-error))
(setf state next-state)))))
(setf done terminalp
state backup-next-state)))
(setf ($ Q-track k) Q)
(push ($squeeze ($argmax Q 1)) pi-track)))
(let ((v ($squeeze (car ($max Q 1))))
(va ($squeeze ($argmax Q 1))))
(list Q v (lambda (s) ($ va s)) Q-track (reverse pi-track)))))
(with-max-heap ()
(let* ((env (th.env.examples:slippery-walk-seven-env))
(res (trajectory-sampling env :gamma 0.99D0 :nepisodes 3000))
(Q ($ res 0))
(v ($ res 1))
(policy ($ res 2)))
(env/print-state-value-function env v :ncols 9)
(env/print-policy env policy :action-symbols '("<" ">") :ncols 9)
(prn Q)))
| 16,393
|
Common Lisp
|
.lisp
| 302
| 34.586093
| 99
| 0.438985
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
26a240bc5287633ee3f639606b47609e7f29d7622b4c84a3ba51afd9e55592fa
| 3,219
|
[
-1
] |
3,220
|
ch10.lisp
|
chunsj_TH/examples/books/gdrl/ch10.lisp
|
(defpackage :gdrl-ch10
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :gdrl-ch10)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model-common (&optional (ni 4))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform))))
(defun model-value (&optional (ni 5))
(sequential-layer
(affine-layer ni 1 :weight-initializer :random-uniform)))
(defun model-advantage (&optional (ni 5) (no 2))
(sequential-layer
(affine-layer ni no :weight-initializer :random-uniform)))
(defclass duel-ddqn-model (layer)
((cm :initform (model-common))
(vm :initform (model-value))
(am :initform (model-advantage))))
(defun model () (make-instance 'duel-ddqn-model))
(defmethod $execute ((m duel-ddqn-model) x &key (trainp T))
(with-slots (cm vm am) m
(let* ((hc ($execute cm x :trainp trainp))
(hv ($execute vm hc :trainp trainp))
(ha ($execute am hc :trainp trainp))
(sa ($size ha))
(ma ($mean ha 1)))
($add ($expand hv sa) ($sub ha ($expand ma sa))))))
(defmethod $train-parameters ((m duel-ddqn-model))
(with-slots (cm vm am) m
(append ($train-parameters cm)
($train-parameters vm)
($train-parameters am))))
(defun best-action-selector (model &optional (epsilon 0))
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(q ($evaluate model state)))
($scalar ($argmin q 1)))
(random 2))))
(defclass replay-buffer ()
((entries :initform nil)
(deltas :initform nil)
(nsz :initform 0)
(idx :initform -1)
(alpha :initform 0.6)
(beta :initform 0.1)
(beta-rate :initform 0.99992)))
(defun replay-buffer (size)
(let ((n (make-instance 'replay-buffer)))
(with-slots (entries deltas nsz idx) n
(setf entries (make-array size :initial-element nil)
deltas ($fill! (tensor size) 1D0)
nsz 0
idx 0))
n))
(defun add-sample (buffer sample)
(with-slots (idx nsz entries deltas) buffer
(let ((maxsz ($count entries))
(maxd ($max deltas)))
(setf ($ entries idx) sample
($ deltas idx) maxd)
(setf nsz (min (1+ nsz) maxsz))
(incf idx)
(setf idx (rem idx maxsz)))
buffer))
(defun update-deltas (buffer idcs tderrs)
(with-slots (entries deltas) buffer
(setf ($index deltas 0 idcs) ($abs ($reshape tderrs ($count tderrs))))))
(defun update-beta! (buffer)
(with-slots (beta beta0 beta-rate) buffer
(setf beta (min 1D0 (/ beta beta-rate)))))
(defconstant +eps+ 1E-6)
(defun sample-experiences (buffer nbatch)
(with-slots (entries nsz deltas alpha beta) buffer
(if (>= nsz nbatch)
(let* ((prs ($expt ($+ ($subview deltas 0 nsz) +eps+) alpha))
(pbs ($/ prs ($sum prs)))
(wts ($expt ($* pbs nsz) beta))
(nwts ($/ wts ($max wts)))
(indices (tensor.long (loop :for i :from 0 :below nsz :collect i)))
(idcs (loop :repeat nbatch :collect ($choice indices pbs))))
(update-beta! buffer)
(list idcs
($reshape ($index nwts 0 idcs) nbatch 1)
(loop :for i :in idcs :collect ($ entries i))))
(list (loop :for i :from 0 :below nsz :collect i)
($reshape (tensor (loop :repeat nsz :collect 1)) nsz 1)
(loop :for i :from 0 :below nsz :collect ($ entries i))))))
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 1000)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(defun train-model (model-online model-target buffer &optional (gamma 0.95D0) (lr 0.003))
(let* ((experiences0 (sample-experiences buffer *batch-size*))
(indices ($ experiences0 0))
(nweights ($ experiences0 1))
(experiences ($ experiences0 2))
(nr ($count experiences)))
(let ((states (-> (apply #'$concat (mapcar #'$0 experiences))
($reshape! nr 4)))
(actions (-> (tensor.long (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1))))
(let* ((argmins (-> ($evaluate model-online next-states)
($argmin 1)))
(qns (-> ($evaluate model-target next-states)
($gather 1 argmins)))
(xs states)
(ts ($+ costs ($* gamma qns ($- 1 dones))))
(ys (-> ($execute model-online xs)
($gather 1 actions)))
(tderrs ($- ys ts))
(loss ($mean ($square ($* nweights tderrs)))))
($rmgd! model-online lr)
(update-deltas buffer indices ($data tderrs))
($data loss)))))
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun polyak-averaging (target online &optional (tau 0.1D0))
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:for a = ($* tau ($data po))
:for b = ($* (- 1 tau) ($data pt))
:do ($set! ($data pt) ($+ a b))))
(defun sync-models (target online)
(polyak-averaging target online))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun duel-ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(buffer (replay-buffer *max-buffer-size*))
(excount 0)
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(incf excount ntrain)
(loop :for e :in exs :do (add-sample buffer e))
(incf total-cost ctrain))
(let* ((loss (train-model model-online model-target
buffer
0.95D0 0.008))
(eres (evaluate eval-env (best-action-selector model-online)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success))
(sync-models model-target model-online)))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" excount total-cost)))
model-online))
(defparameter *m* nil)
(setf *m* (duel-ddqn *m*))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m*)))
| 8,239
|
Common Lisp
|
.lisp
| 197
| 32.426396
| 91
| 0.54922
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
85bfd83ea20f665b696764ace5a3ceedaf00312c853f9d24bdebd1477de3df35
| 3,220
|
[
-1
] |
3,221
|
ch04.lisp
|
chunsj_TH/examples/books/gdrl/ch04.lisp
|
(defpackage :gdrl-ch04
(:use #:common-lisp
#:mu
#:mplot
#:th
#:th.env)
(:import-from #:th.env.bandits))
(in-package :gdrl-ch04)
(defun run-episodes (env &key (name "STRATEGY NAME") strategy initq initn (nepisodes 1000))
(let ((q (zeros (env/action-count env)))
(n (tensor.int (zeros (env/action-count env))))
(qe (tensor nepisodes (env/action-count env)))
(returns (tensor nepisodes))
(actions (tensor.int nepisodes)))
(when initq (funcall initq q))
(when initn (funcall initn n))
(loop :for e :from 0 :below nepisodes
:for action = (funcall strategy e q n)
:for tx = (env/step! env action)
:for reward = (transition/reward tx)
:do (progn
(incf ($ n action))
(incf ($ q action) (/ (- reward ($ q action)) ($ n action)))
(setf ($ qe e) q)
(setf ($ returns e) reward)
(setf ($ actions e) action)))
(list name returns qe actions)))
(defun expr/name (res) ($ res 0))
(defun expr/returns (res) ($ res 1))
(defun expr/qe (res) ($ res 2))
(defun expr/actions (res) ($ res 3))
(defun pure-exploitation (env &key (nepisodes 1000))
(run-episodes env :name "PURE EXPLOITATION"
:strategy (lambda (e q n)
(declare (ignore e n))
($argmax q))
:nepisodes nepisodes))
(defun pure-exploration (env &key (nepisodes 1000))
(run-episodes env :name "PURE EXPLORATION"
:strategy (lambda (e q n)
(declare (ignore e n))
(random ($count q)))
:nepisodes nepisodes))
(defun epsilon-greedy (env &key (epsilon 0.01) (nepisodes 1000))
(run-episodes env :name (format nil "E-GREEDY ~A" epsilon)
:strategy (lambda (e q n)
(declare (ignore e n))
(if (> (random 1D0) epsilon)
($argmax q)
(random ($count q))))
:nepisodes nepisodes))
(defun linear-decreasing-epsilon-greedy (env &key (epsilon0 1D0)
(min-epsilon 0.01)
(decay-ratio 0.05)
(nepisodes 1000))
(let ((decay-episodes (* nepisodes decay-ratio)))
(run-episodes env :name (format nil "LINEAR E-GREEDY ~A ~A ~A"
epsilon0 min-epsilon decay-ratio)
:strategy (lambda (e q n)
(declare (ignore n))
(let ((epsilon (+ (* (- 1D0 (/ e decay-episodes))
(- epsilon0 min-epsilon))
min-epsilon)))
(if (< epsilon min-epsilon)
(setf epsilon min-epsilon))
(if (> epsilon epsilon0)
(setf epsilon epsilon0))
(if (> (random 1D0) epsilon)
($argmax q)
(random ($count q)))))
:nepisodes nepisodes)))
(defun exponential-decreasing-epsilon-greedy (env &key (epsilon0 1D0)
(min-epsilon 0.01)
(decay-ratio 0.1)
(nepisodes 1000))
(let ((epsilons (let ((es ($+ ($* ($/ 0.01 (logspace -2 0 (round (* nepisodes decay-ratio))))
(- epsilon0 min-epsilon))
min-epsilon))
(eps (tensor nepisodes)))
($fill! eps ($last es))
(setf ($subview eps 0 ($count es)) es)
eps)))
(run-episodes env :name (format nil "EXPONENTIAL E-GREEDY ~A ~A ~A"
epsilon0 min-epsilon decay-ratio)
:strategy (lambda (e q n)
(declare (ignore n))
(let ((epsilon ($ epsilons e)))
(if (> (random 1D0) epsilon)
($argmax q)
(random ($count q))))))))
(defun optimistic-initialization (env &key (optimistic-estimate 1D0)
(initial-count 100)
(nepisodes 1000))
(run-episodes env :name (format nil "OPTIMISTIC INITIALIZATION ~A ~A"
optimistic-estimate initial-count)
:initq (lambda (q) ($fill! q optimistic-estimate))
:initn (lambda (n) ($fill! n initial-count))
:strategy (lambda (e q n)
(declare (ignore e n))
($argmax q))
:nepisodes nepisodes))
(defun softmax-strategy (env &key (temperature0 99999) (min-temperature 0.00001D0)
(decay-ratio 0.04)
(nepisodes 1000))
(let ((decay-episodes (* nepisodes decay-ratio)))
(run-episodes env :name (format nil "SOFTMAX ~A ~A ~A"
temperature0 min-temperature decay-ratio)
:strategy (lambda (e q n)
(declare (ignore n))
(let* ((tmp (let ((temp (- 1 (/ e decay-episodes))))
(setf temp (* temp (- temperature0 min-temperature)))
(incf temp min-temperature)
(if (< temp min-temperature)
(setf temp min-temperature))
(if (> temp temperature0)
(setf temp temperature0))
temp))
(scaled-q ($/ q tmp))
(norm-q ($- scaled-q ($max scaled-q)))
(exp-q ($exp norm-q))
(probs ($/ exp-q ($sum exp-q))))
($choice (env/action-space env) probs)))
:nepisodes nepisodes)))
(defun upper-confidence-bound-strategy (env &key (c 2) (nepisodes 1000))
(run-episodes env :name (format nil "UCB ~A" c)
:strategy (lambda (e q n)
(let ((a e))
(when (>= e ($count q))
(let ((u ($sqrt ($* c ($/ (log e) (tensor n))))))
(setf a ($argmax ($+ u q)))))
a))
:nepisodes nepisodes))
(defun thompson-sampling-strategy (env &key (alpha 1) (beta 1) (nepisodes 1000))
(run-episodes env :name (format nil "THOMPSON SAMPLING ~A ~A" alpha beta)
:strategy (lambda (e q n)
(declare (ignore e))
($argmax (random-normals q ($/ alpha ($+ ($sqrt (tensor n)) beta)))))
:nepisodes nepisodes))
(defun basic-experiments ()
(list (lambda (env)
(pure-exploitation env))
(lambda (env)
(pure-exploration env))
(lambda (env)
(epsilon-greedy env :epsilon 0.07))
(lambda (env)
(epsilon-greedy env :epsilon 0.1))
(lambda (env)
(linear-decreasing-epsilon-greedy env :epsilon0 1.0
:min-epsilon 0.0
:decay-ratio 0.1))
(lambda (env)
(linear-decreasing-epsilon-greedy env :epsilon0 0.3
:min-epsilon 0.001
:decay-ratio 0.1))
(lambda (env)
(exponential-decreasing-epsilon-greedy env :epsilon0 1.0
:min-epsilon 0.0
:decay-ratio 0.1))
(lambda (env)
(exponential-decreasing-epsilon-greedy env :epsilon0 0.3
:min-epsilon 0.0
:decay-ratio 0.3))
(lambda (env)
(optimistic-initialization env :optimistic-estimate 1.0 :initial-count 10))
(lambda (env)
(optimistic-initialization env :optimistic-estimate 1.0 :initial-count 50))))
(defun advanced-experiments ()
(list (lambda (env) (pure-exploitation env))
(lambda (env) (pure-exploration env))
(lambda (env)
(exponential-decreasing-epsilon-greedy env :epsilon0 0.3
:min-epsilon 0.0
:decay-ratio 0.3))
(lambda (env)
(optimistic-initialization env :optimistic-estimate 1.0 :initial-count 10))
(lambda (env) (softmax-strategy env :decay-ratio 0.005))
(lambda (env) (softmax-strategy env :temperature0 100
:min-temperature 0.01
:decay-ratio 0.005))
(lambda (env) (upper-confidence-bound-strategy env :c 0.2))
(lambda (env) (upper-confidence-bound-strategy env :c 0.5))
(lambda (env) (thompson-sampling-strategy env))
(lambda (env) (thompson-sampling-strategy env :alpha 0.5 :beta 0.5))))
(defun report-last-qs (env strategies)
(let* ((true-q (env/true-q env))
(opt-v ($max true-q))
(expreses (loop :for strategy :in strategies
:collect (funcall strategy env))))
(prn "***********")
(prn "* RESULTS *")
(prn "***********")
(prn "")
(prn "* TRUE Q:" true-q)
(prn "* OPTM V:" opt-v "* OPTM A:" ($argmax true-q))
(loop :for er :in expreses
:for sname = (expr/name er)
:for qe = (expr/qe er)
:for lq = ($ qe (1- ($size qe 0)))
:do (prn sname "=>" lq))
(prn "")))
;; testing strategy - basic
(report-last-qs (th.env.bandits:two-armed-bernoulli-bandit-env 0.8)
(basic-experiments))
;; testing strategy - advanced
(report-last-qs (th.env.bandits:two-armed-bernoulli-bandit-env 0.8)
(advanced-experiments))
(defun run-experiments (experiments env)
(let* ((true-q (env/true-q env))
(opt-v ($max true-q))
(res #{}))
(loop :for experiment :in experiments
:for strategy-result = (progn
(env/reset! env)
(funcall experiment env))
:for name = (expr/name strategy-result)
:for returns = (expr/returns strategy-result)
:for q-episodes = (expr/qe strategy-result)
:for action-episodes = (expr/actions strategy-result)
:for accum-returns = ($cumsum returns)
:for mean-rewards = ($/ accum-returns ($+ 1 (arange 0 ($count returns))))
:for q-selected = (tensor (loop :for i :from 0 :below ($count action-episodes)
:for a = ($ action-episodes i)
:collect ($ true-q a)))
:for regret = ($- opt-v q-selected)
:for accum-regret = ($cumsum regret)
:do (setf ($ res name)
(let ((sres #{}))
(setf ($ sres :returns) returns
($ sres :accum-returns) accum-returns
($ sres :qe) q-episodes
($ sres :ae) action-episodes
($ sres :accum-regret) accum-regret
($ sres :mean-rewards) mean-rewards)
sres)))
res))
(defparameter *basic-results* (run-experiments
(basic-experiments)
(th.env.bandits:two-armed-bernoulli-bandit-env 0.8)))
(let* ((name "EXPONENTIAL E-GREEDY 1.0 0.0 0.1")
(vs ($list ($ ($ *basic-results* name) :accum-regret))))
(plot-lines (nthcdr 1 vs) :yrange (cons 0 30)))
(defparameter *advanced-results* (run-experiments
(advanced-experiments)
(th.env.bandits:two-armed-bernoulli-bandit-env 0.8)))
(let* ((name "SOFTMAX 100 0.01 0.005")
(vs ($list ($ ($ *advanced-results* name) :accum-regret))))
(plot-lines (nthcdr 1 vs) :yrange (cons 0 5)))
(let ((env (th.env.bandits:ten-armed-gaussian-bandit-env)))
(env/step! env 0))
;; 10 armed bandit
(let* ((env (th.env.bandits:ten-armed-gaussian-bandit-env))
(true-q (env/true-q env))
(opt-v ($max true-q)))
(list (env/p-dist env) (env/r-dist env) true-q opt-v))
(let* ((env (th.env.bandits:ten-armed-gaussian-bandit-env))
(true-q (env/true-q env))
(expres (pure-exploration env)))
(list true-q (expr/qe expres)))
(let* ((env (th.env.bandits:ten-armed-gaussian-bandit-env))
(true-q (env/true-q env))
(expres (epsilon-greedy env))
(qe (expr/qe expres))
(lq ($ qe (1- ($size qe 0)))))
(list true-q ($max true-q) ($argmax true-q) lq ($max lq) ($argmax lq)))
(let* ((env (th.env.bandits:ten-armed-gaussian-bandit-env))
(true-q (env/true-q env))
(expres (optimistic-initialization env :optimistic-estimate 1D0
:initial-count 50))
(qe (expr/qe expres))
(lq ($ qe (1- ($size qe 0)))))
(list true-q ($max true-q) ($argmax true-q) lq ($max lq) ($argmax lq)))
(report-last-qs (th.env.bandits:ten-armed-gaussian-bandit-env)
(basic-experiments))
(report-last-qs (th.env.bandits:ten-armed-gaussian-bandit-env)
(advanced-experiments))
(defparameter *basic-results*
(run-experiments (basic-experiments) (th.env.bandits:ten-armed-gaussian-bandit-env)))
(let* ((name "EXPONENTIAL E-GREEDY 1.0 0.0 0.1")
(vs ($list ($ ($ *basic-results* name) :accum-regret))))
(plot-lines (nthcdr 1 vs) :yrange (cons 0 100)))
(defparameter *advanced-results*
(run-experiments (advanced-experiments) (th.env.bandits:ten-armed-gaussian-bandit-env)))
(let* ((name "SOFTMAX 100 0.01 0.005")
(vs ($list ($ ($ *advanced-results* name) :accum-regret))))
(plot-lines (nthcdr 1 vs) :yrange (cons 0 5)))
| 14,969
|
Common Lisp
|
.lisp
| 286
| 33.730769
| 101
| 0.463423
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
a4400f2b965bd4b6b3b306070093c1aa8e5b53e1635d9cd7c08197420ba7e209
| 3,221
|
[
-1
] |
3,222
|
dlfs06.lisp
|
chunsj_TH/examples/books/dlfs/dlfs06.lisp
|
(defpackage :dlfs-06
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :dlfs-06)
;; mnist data loading - takes time, so load and set
(defparameter *mnist* (read-mnist-data))
(prn *mnist*)
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below 6
:for rng = (loop :for k :from (* i 10000) :below (* (1+ i) 10000)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 rng))))
(defparameter *mnist-train-label-batches*
(loop :for i :from 0 :below 6
:for rng = (loop :for k :from (* i 10000) :below (* (1+ i) 10000)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-labels) 0 rng))))
;; network parameters
(defparameter *w1* ($parameter (rndn 784 50)))
(defparameter *b1* ($parameter (zeros 50)))
(defparameter *w2* ($parameter (rndn 50 100)))
(defparameter *b2* ($parameter (zeros 100)))
(defparameter *w3* ($parameter (rndn 100 10)))
(defparameter *b3* ($parameter (zeros 10)))
(defun mnist-reset-parameters-xavier ()
(setf *w1* ($parameter ($div (rnd 784 50) ($sqrt 784))))
(setf *b1* ($parameter (zeros 50)))
(setf *w2* ($parameter ($div (rnd 50 100) ($sqrt 50))))
(setf *b2* ($parameter (zeros 100)))
(setf *w3* ($parameter ($div (rnd 100 10) ($sqrt 100))))
(setf *b3* ($parameter (zeros 10))))
(defun mnist-reset-parameters-he ()
(setf *w1* ($parameter ($div (rnd 784 50) ($sqrt (/ 784 2)))))
(setf *b1* ($parameter (zeros 50)))
(setf *w2* ($parameter ($div (rnd 50 100) ($sqrt (/ 50 2)))))
(setf *b2* ($parameter (zeros 100)))
(setf *w3* ($parameter ($div (rnd 100 10) ($sqrt (/ 100 2)))))
(setf *b3* ($parameter (zeros 10))))
(defun mnist-predict (x)
(-> x
($xwpb *w1* *b1*)
($sigmoid)
($xwpb *w2* *b2*)
($sigmoid)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-predict-relu (x)
(-> x
($xwpb *w1* *b1*)
($relu)
($xwpb *w2* *b2*)
($relu)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-predict-relu-do (x &optional (trainp t) (p 0.1))
(-> x
($xwpb *w1* *b1*)
($relu)
($dropout trainp p)
($xwpb *w2* *b2*)
($relu)
($dropout trainp p)
($xwpb *w3* *b3*)
($softmax)))
(defparameter *g1* ($parameter (ones 50)))
(defparameter *e1* ($parameter (zeros 50)))
(defparameter *m1* (zeros 50))
(defparameter *v1* (ones 50))
(defparameter *g2* ($parameter (ones 100)))
(defparameter *e2* ($parameter (zeros 100)))
(defparameter *m2* (zeros 100))
(defparameter *v2* (ones 100))
(defun mnist-reset-parameters-bn ()
(setf *w1* ($parameter ($div (rnd 784 50) ($sqrt (/ 784 2)))))
(setf *b1* ($parameter (zeros 50)))
(setf *w2* ($parameter ($div (rnd 50 100) ($sqrt (/ 50 2)))))
(setf *b2* ($parameter (zeros 100)))
(setf *w3* ($parameter ($div (rnd 100 10) ($sqrt (/ 100 2)))))
(setf *b3* ($parameter (zeros 10)))
(setf *g1* ($parameter (ones 50)))
(setf *e1* ($parameter (zeros 50)))
(setf *m1* (zeros 50))
(setf *v1* (ones 50))
(setf *g2* ($parameter (ones 100)))
(setf *e2* ($parameter (zeros 100)))
(setf *m2* (zeros 100))
(setf *v2* (ones 100)))
(defun prn-and-pass (x)
(prn x)
x)
(defun mnist-predict-bn (x &optional (trainp t))
(-> x
($xwpb *w1* *b1*)
($bnorm *g1* *e1* *m1* *v1* trainp)
($relu)
($xwpb *w2* *b2*)
($bnorm *g2* *e2* *m2* *v2* trainp)
($relu)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-predict-bns (x &optional (trainp t))
(-> x
($xwpb *w1* *b1*)
($bnorm nil nil *m1* *v1* trainp)
($relu)
($xwpb *w2* *b2*)
($bnorm nil nil *m2* *v2* trainp)
($relu)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-loss (prediction truth) ($cee prediction truth))
(defun mnist-loss-wr (prediction truth &optional (l 0.1))
($+ (mnist-loss prediction truth)
($* ($+ ($sum ($* *w1* *w1*))
($sum ($* *b1* *b1*))
($sum ($* *w2* *w2*))
($sum ($* *b2* *b2*))
($sum ($* *w3* *w3*))
($sum ($* *b3* *b3*)))
(/ l (+ ($count *w1*)
($count *b1*)
($count *w2*)
($count *b2*)
($count *w3*)
($count *b3*))))))
(defun mnist-write-weight-to (w fname)
(let ((f (file.disk fname "w")))
($fwrite ($data w) f)
($fclose f)))
(defun mnist-write-weights ()
(mnist-write-weight-to *w1* "dlfs/mnist-w1.dat")
(mnist-write-weight-to *b1* "dlfs/mnist-b1.dat")
(mnist-write-weight-to *w2* "dlfs/mnist-w2.dat")
(mnist-write-weight-to *b2* "dlfs/mnist-b2.dat")
(mnist-write-weight-to *w3* "dlfs/mnist-w3.dat")
(mnist-write-weight-to *b3* "dlfs/mnist-b3.dat"))
(defun mnist-read-weight-from (w fname)
(let ((f (file.disk fname "r")))
($fread ($data w) f)
($fclose f)))
(defun mnist-read-weights ()
(mnist-read-weight-from *w1* "dlfs/mnist-w1.dat")
(mnist-read-weight-from *b1* "dlfs/mnist-b1.dat")
(mnist-read-weight-from *w2* "dlfs/mnist-w2.dat")
(mnist-read-weight-from *b2* "dlfs/mnist-b2.dat")
(mnist-read-weight-from *w3* "dlfs/mnist-w3.dat")
(mnist-read-weight-from *b3* "dlfs/mnist-b3.dat"))
;; write to file
(mnist-write-weights)
;; read from file
(mnist-read-weights)
;; code test
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(mnist-reset-parameters-xavier)
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(mnist-reset-parameters-he)
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(mnist-reset-parameters-bn)
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-bn xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(defun mnist-test-stat ()
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(defun mnist-test-stat-relu ()
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
;; compare sgd vs others
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-xavier)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($gd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01)
(a 0.9))
(mnist-reset-parameters-xavier)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($mgd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr a))))
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-xavier)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
;; relu model comparison
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($gd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01)
(a 0.9))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($mgd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr a))))
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
;; dropout
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01)
(p 0.4))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict-relu-do x t p)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels))
(p 0.4))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu-do xi nil p))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
;; without weight regularization
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 10
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
;; with weight regularization - does not work in 50 step
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01)
(l 0.1))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 100
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss-wr y* y l)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
;; test result
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu ($constant xi)))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
;; without batch normalization
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-he)
(loop :for i :from 1 :to 50
:for y* = (mnist-predict-relu x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr))))
;; test result
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-relu xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
;; batch normalization - does not converge
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 0.01))
(mnist-reset-parameters-bn)
(loop :for i :from 1 :to 40
:for y* = (mnist-predict-bn x)
:for loss = (mnist-loss y* y)
:do (progn
(prn (list i ($data loss)))
($gs! loss)
($agd! (list *w1* *b1* *w2* *b2* *w3* *b3* *g1* *e1* *g2* *m2*) lr))))
;; test result
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict-bn xi nil))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
| 14,929
|
Common Lisp
|
.lisp
| 407
| 27.651106
| 84
| 0.471267
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
1c4f6b4656856b06924fca397ccce13678bfdcd91fe096d2acf30f78b3813e08
| 3,222
|
[
-1
] |
3,223
|
dlfs02.lisp
|
chunsj_TH/examples/books/dlfs/dlfs02.lisp
|
(defpackage :dlfs-02
(:use #:common-lisp
#:mu
#:th))
(in-package :dlfs-02)
(defun and-gate (x1 x2)
(let ((w1 0.5)
(w2 0.5)
(theta 0.7))
(let ((tmp (+ (* x1 w1) (* x2 w2))))
(cond ((<= tmp theta) 0)
((> tmp theta) 1)))))
(prn (and-gate 0 0))
(prn (and-gate 1 0))
(prn (and-gate 0 1))
(prn (and-gate 1 1))
(defun and-gate (x1 x2)
(let ((x (tensor (list x1 x2)))
(w (tensor '(0.5 0.5)))
(b -0.7))
(let ((tmp ($+ ($sum ($* w x)) b)))
(if (<= tmp 0) 0 1))))
(prn (and-gate 0 0))
(prn (and-gate 1 0))
(prn (and-gate 0 1))
(prn (and-gate 1 1))
(defun nand-gate (x1 x2)
(let ((x (tensor (list x1 x2)))
(w (tensor '(-0.5 -0.5)))
(b 0.7))
(let ((tmp ($+ ($sum ($* w x)) b)))
(if (<= tmp 0) 0 1))))
(prn (nand-gate 0 0))
(prn (nand-gate 1 0))
(prn (nand-gate 0 1))
(prn (nand-gate 1 1))
(defun or-gate (x1 x2)
(let ((x (tensor (list x1 x2)))
(w (tensor '(0.5 0.5)))
(b -0.2))
(let ((tmp ($+ ($sum ($* w x)) b)))
(if (<= tmp 0) 0 1))))
(prn (or-gate 0 0))
(prn (or-gate 1 0))
(prn (or-gate 0 1))
(prn (or-gate 1 1))
(defun xor-gate (x1 x2)
(let ((s1 (nand-gate x1 x2))
(s2 (or-gate x1 x2)))
(and-gate s1 s2)))
(prn (xor-gate 0 0))
(prn (xor-gate 1 0))
(prn (xor-gate 0 1))
(prn (xor-gate 1 1))
| 1,349
|
Common Lisp
|
.lisp
| 54
| 20.888889
| 40
| 0.481308
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
0bf8f2dda7a067f7461d51607671d3b5b340cffb89c83b742fdc533cfc6e12a0
| 3,223
|
[
-1
] |
3,224
|
dlfs01.lisp
|
chunsj_TH/examples/books/dlfs/dlfs01.lisp
|
(defpackage :dlfs-01
(:use #:common-lisp
#:mu
#:th))
(in-package :dlfs-01)
;; creating tensor
(let ((x (tensor '(1 2 3))))
(prn x)
(prn (type-of x)))
;; generic operations
(let ((x (tensor '(1 2 3)))
(y (tensor '(2 4 6))))
(prn ($+ x y))
(prn ($- x y))
(prn ($* x y))
(prn ($/ x y)))
;; constant broadcasting - different from numpy
(let ((x (tensor '(1 2 3)))
(c 2))
(prn ($/ x ($broadcast c x)))
(prn ($/ x c)))
;; n-dimensional
(let ((a (tensor '((1 2) (3 4)))))
(prn a)
(prn ($size a))
(prn ($type a)))
;; operations on matrices
(let ((a (tensor '((1 2) (3 4))))
(b (tensor '((3 0) (0 6)))))
(prn ($+ a b))
(prn ($* a b)))
;; broadcasting again
(let ((a (tensor '((1 2) (3 4))))
(c 10))
(prn ($* a ($broadcast c a)))
(prn ($* a 10)))
;; however, different from numpy style, th needs explicit shape adjustments
(let ((a (tensor '((1 2) (3 4))))
(b (tensor '(10 20))))
(prn ($* a ($vv (ones ($size a 0)) b)))
(prn ($vv (ones ($size a 0)) b))
(prn ($vv b (ones ($size a 1)))))
;; with support functions
(let ((a (tensor '((1 2) (3 4))))
(b (tensor '(10 20))))
(prn ($* a ($krows b ($size a 0))))
(prn ($* a ($kcols b ($size a 1))))
(prn ($* a ($broadcast b a))))
;; accessing elements
(let ((x (tensor '((51 55) (14 19) (0 4)))))
(prn x)
(prn ($ x 0))
(prn ($ x 0 1))
(loop :for i :from 0 :below ($size x 0)
:do (prn ($ x i)))
(let ((x ($reshape x ($count x))))
(prn x)
(prn ($gather x 0 '(0 2 4)))))
| 1,533
|
Common Lisp
|
.lisp
| 58
| 23.155172
| 75
| 0.494198
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
aa1af1a457ace3a11d79a491a02ba7dd13c5a530bcf24c64d18a90131e6b77ea
| 3,224
|
[
-1
] |
3,225
|
dlfs07.lisp
|
chunsj_TH/examples/books/dlfs/dlfs07.lisp
|
(defpackage :dlfs-07
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :dlfs-07)
;; prepare data for later use, it takes some time to load
(defparameter *mnist* (read-mnist-data))
(prn *mnist*)
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below 60
:for rng = (loop :for k :from (* i 1000) :below (* (1+ i) 1000)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 rng))))
(defparameter *mnist-train-label-batches*
(loop :for i :from 0 :below 60
:for rng = (loop :for k :from (* i 1000) :below (* (1+ i) 1000)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-labels) 0 rng))))
;; run convolution with mnist data
(let* ((indices '(0 1 2 3 4))
(nbatch ($count indices))
(nch 1)
(x ($index ($ *mnist* :train-images) 0 indices))
(x ($reshape x nbatch nch 28 28))
(k (tensor '((((1 1 1) (1 1 1) (1 1 1))))))
(b (tensor '(1))))
(prn x)
(prn k)
(prn ($conv2d x k b)))
;; more systematic
(defun mkfilter (fn nc kw kh) (tensor fn nc kw kh))
(defun mkfbias (fn) (tensor fn))
;; use helper functions
(let* ((indices '(0 1 2 3 4))
(nbatch ($count indices))
(nfilter 1)
(x ($index ($ *mnist* :train-images) 0 indices))
(nch 1)
(x ($reshape x nbatch nch 28 28))
(k (-> (mkfilter nfilter nch 3 3) ($fill! 1)))
(b (-> (mkfbias nfilter) ($fill! 1))))
(prn x)
(prn k)
(prn ($conv2d x k b)))
;; with max pooling
(let* ((indices '(0 1 2 3 4))
(nbatch ($count indices))
(nfilter 1)
(x ($index ($ *mnist* :train-images) 0 indices))
(nch 1)
(x ($reshape x nbatch nch 28 28))
(k (-> (mkfilter nfilter nch 3 3) ($fill! 1)))
(b (-> (mkfbias nfilter) ($fill! 1)))
(c ($conv2d x k b))
(p ($maxpool2d c 2 2)))
(prn c)
(prn p))
;; constructing network - smaller samples and single step only
(let* ((indices '(0 1 2 3 4 5 6 7 8 9))
(nbatch ($count indices))
(nch 1)
(nfilter 30)
(imgw 28)
(imgh 28)
(kw 5)
(kh 5)
(pw 2)
(ph 2)
(nl2 100)
(nl3 10)
(k (-> (mkfilter nfilter nch kw kh)
($uniform! 0 1)
($div (sqrt (/ 2.0 (* imgw imgh))))
($parameter)))
(bk (-> (mkfbias nfilter)
($fill! 0)
($parameter)))
(w2 ($parameter (rndn (* nfilter 12 12) nl2)))
(b2 ($parameter (zeros nl2)))
(w3 ($parameter (rndn nl2 nl3)))
(b3 ($parameter (rndn nl3)))
(x ($index ($ *mnist* :train-images) 0 indices))
(y ($index ($ *mnist* :train-labels) 0 indices))
(c ($conv2d ($reshape x nbatch nch imgw imgh) k bk))
(l1 ($relu c))
(p1 ($maxpool2d l1 pw ph 2 2))
(o1 ($reshape p1 nbatch (* nfilter 12 12)))
(z2 ($xwpb o1 w2 b2))
(l2 ($relu z2))
(z3 ($xwpb l2 w3 b3))
(l3 ($softmax z3))
(er ($cee l3 y)))
(prn er))
;; with full data
(let* ((indices (loop :for i :from 0 :below 1000 :collect i))
(nbatch ($count indices))
(nch 1)
(nfilter 30)
(imgw 28)
(imgh 28)
(kw 5)
(kh 5)
(pw 2)
(ph 2)
(nl2 100)
(nl3 10)
(k (-> (mkfilter nfilter nch kw kh)
($uniform! 0 1)
($div (sqrt (/ 2.0 (* imgw imgh))))
($parameter)))
(bk (-> (mkfbias nfilter)
($fill! 0)
($parameter)))
(w2 ($parameter (rndn (* nfilter 12 12) nl2)))
(b2 ($parameter (zeros nl2)))
(w3 ($parameter (rndn nl2 nl3)))
(b3 ($parameter (rndn nl3)))
(x ($index ($ *mnist* :train-images) 0 indices))
(y ($index ($ *mnist* :train-labels) 0 indices))
(c (time ($conv2d ($reshape x nbatch nch imgw imgh) k bk)))
(l1 ($relu c))
(p1 (time ($maxpool2d l1 pw ph 2 2)))
(o1 (time ($reshape p1 nbatch (* nfilter 12 12))))
(z2 (time ($xwpb o1 w2 b2)))
(l2 ($relu z2))
(z3 (time ($xwpb l2 w3 b3)))
(l3 ($softmax z3))
(er ($cee l3 y)))
(prn er))
;; checking convolution speed
(let* ((indices (loop :for i :from 0 :below 10000 :collect i))
(nbatch ($count indices))
(nfilter 30)
(x ($index ($ *mnist* :train-images) 0 indices))
(nch 1)
(x ($reshape x nbatch nch 28 28))
(k (-> (mkfilter nfilter nch 3 3) ($fill! 1)))
(b (-> (mkfbias nfilter) ($fill! 1))))
(prn (time ($conv2d x k b))))
(defparameter *filter-number* 30)
(defparameter *channel-number* 1)
(defparameter *filter-width* 5)
(defparameter *filter-height* 5)
(defparameter *pool-width* 2)
(defparameter *pool-height* 2)
(defparameter *pool-stride-width* 2)
(defparameter *pool-stride-height* 2)
(defparameter *pool-out-width* 12)
(defparameter *pool-out-height* 12)
(defparameter *l2-output* 100)
(defparameter *l3-output* 10)
(defparameter *k* (-> (mkfilter *filter-number* *channel-number*
*filter-width* *filter-height*)
($uniform! 0 0.01)
($parameter)))
(defparameter *kb* (-> (mkfbias *filter-number*)
($zero!)
($parameter)))
(defparameter *w2* (-> (rnd (* *filter-number* *pool-out-width* *pool-out-height*)
*l2-output*)
($mul! 0.01)
($parameter)))
(defparameter *b2* (-> (zeros *l2-output*)
($parameter)))
(defparameter *w3* (-> (rnd *l2-output* *l3-output*)
($mul! 0.01)
($parameter)))
(defparameter *b3* (-> (zeros *l3-output*)
($parameter)))
(defun mnist-write-weight-to (w fname)
(let ((f (file.disk fname "w")))
($fwrite ($data w) f)
($fclose f)))
(defun mnist-cnn-write-weights ()
(mnist-write-weight-to *k* "dlfs/mnist-cnn-k.dat")
(mnist-write-weight-to *kb* "dlfs/mnist-cnn-kb.dat")
(mnist-write-weight-to *w2* "dlfs/mnist-cnn-w2.dat")
(mnist-write-weight-to *b2* "dlfs/mnist-cnn-b2.dat")
(mnist-write-weight-to *w3* "dlfs/mnist-cnn-w3.dat")
(mnist-write-weight-to *b3* "dlfs/mnist-cnn-b3.dat"))
(defun mnist-read-weight-from (w fname)
(let ((f (file.disk fname "r")))
($fread ($data w) f)
($fclose f)))
(defun mnist-cnn-read-weights ()
(mnist-read-weight-from *k* "dlfs/mnist-cnn-k.dat")
(mnist-read-weight-from *kb* "dlfs/mnist-cnn-kb.dat")
(mnist-read-weight-from *w2* "dlfs/mnist-cnn-w2.dat")
(mnist-read-weight-from *b2* "dlfs/mnist-cnn-b2.dat")
(mnist-read-weight-from *w3* "dlfs/mnist-cnn-w3.dat")
(mnist-read-weight-from *b3* "dlfs/mnist-cnn-b3.dat"))
;; x should have been reshaped before entering
(defun mnist-predict (x)
(-> x
($conv2d *k* *kb*)
($relu)
($maxpool2d *pool-width* *pool-height*
*pool-stride-width* *pool-stride-height*)
($reshape ($size x 0) (* *filter-number* *pool-out-width* *pool-out-height*))
($xwpb *w2* *b2*)
($relu)
($xwpb *w3* *b3*)
($softmax)))
;; use batches for performance
(loop :for epoch :from 1 :to 5
:do (loop :for i :from 0 :below 10
:for xi = ($ *mnist-train-image-batches* i)
:for x = (-> xi
($reshape ($size xi 0) *channel-number* 28 28))
:for y = (-> ($ *mnist-train-label-batches* i))
:for y* = (mnist-predict x)
:for loss = ($cee y* y)
:do (progn
(format t "[~A|~A]: ~A~%" (1+ i) epoch loss)
(finish-output)
($gs! loss)
($agd! (list *k* *kb* *w2* *b2* *w3* *b3*) 0.01))))
;; test
(let* ((xtest ($ *mnist* :test-images))
(ytest ($ *mnist* :test-labels)))
(prn ($cee (mnist-predict (-> xtest
($reshape ($size xtest 0) 1 28 28)))
ytest)))
;; write weights
(mnist-cnn-write-weights)
;; read weights
(mnist-cnn-read-weights)
;; test stats
(defun mnist-test-stat ()
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict ($reshape xi ($size xi 0) 1 28 28)))
:for err = (let ((e ($sum ($abs ($sub ($round yi*) yi)))))
(when (> e 0) (prn (list i e)))
e)
:when (> err 0)
:collect i))))
(prn (mnist-test-stat))
| 8,779
|
Common Lisp
|
.lisp
| 242
| 28.024793
| 87
| 0.514917
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
b91fc202e9252df4ce6da9a55e1e26b7be8e09bfd06758e4c7e9557492cb76f6
| 3,225
|
[
-1
] |
3,226
|
dlfs04.lisp
|
chunsj_TH/examples/books/dlfs/dlfs04.lisp
|
(defpackage :dlfs-04
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :dlfs-04)
;; mnist data loading - takes time, so load and set
(defparameter *mnist* (read-mnist-data))
(prn *mnist*)
;; network parameters
(defparameter *w1* ($parameter (rndn 784 50)))
(defparameter *b1* ($parameter (zeros 50)))
(defparameter *w2* ($parameter (rndn 50 100)))
(defparameter *b2* ($parameter (zeros 100)))
(defparameter *w3* ($parameter (rndn 100 10)))
(defparameter *b3* ($parameter (zeros 10)))
(defun mnist-predict (x)
(-> x
($xwpb *w1* *b1*)
($sigmoid)
($xwpb *w2* *b2*)
($sigmoid)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-loss (prediction trueth) ($cee prediction trueth))
;; write to file
(let ((f (file.disk "dlfs/mnist-w1.dat" "w")))
($fwrite ($data *w1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b1.dat" "w")))
($fwrite ($data *b1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w2.dat" "w")))
($fwrite ($data *w2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b2.dat" "w")))
($fwrite ($data *b2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w3.dat" "w")))
($fwrite ($data *w3*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b3.dat" "w")))
($fwrite ($data *b3*) f)
($fclose f))
;; read from file
(let ((f (file.disk "dlfs/mnist-w1.dat" "r")))
($fread ($data *w1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b1.dat" "r")))
($fread ($data *b1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w2.dat" "r")))
($fread ($data *w2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b2.dat" "r")))
($fread ($data *b2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w3.dat" "r")))
($fread ($data *w3*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b3.dat" "r")))
($fread ($data *b3*) f)
($fclose f))
;; backprop testing
(let* ((sels '(0 1 2 3 4))
(x (-> *mnist*
($ :train-images)
($index 0 sels)))
(y (-> *mnist*
($ :train-labels)
($index 0 sels)))
(lr 0.01))
(loop :for i :from 1 :below 100
:do (let* ((y* (mnist-predict x))
(loss (mnist-loss y* y)))
(prn loss)
($gs! loss)
($gd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr)))
(prn y)
(prn ($round ($data (mnist-predict x)))))
| 2,371
|
Common Lisp
|
.lisp
| 80
| 25.2375
| 63
| 0.540079
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
f44ff2ed07069bd79d577371f62d92f1f2c2686ce6030c8803222f07833b6637
| 3,226
|
[
-1
] |
3,227
|
dlfs03.lisp
|
chunsj_TH/examples/books/dlfs/dlfs03.lisp
|
(defpackage :dlfs-03
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :dlfs-03)
;; sigmoid function
(prn ($sigmoid 0))
;; step function
(defun step-function (x)
(if ($tensorp x)
(tensor ($gt x 0))
($gt x 0)))
;; testing step-function
(let ((x (tensor '(-1 1 2))))
(prn x)
(prn ($gt x 0))
(prn (step-function x)))
;; testing sigmoid
(let ((x (tensor '(-1 1 2))))
(prn ($sigmoid x)))
;; relu function
(prn ($relu 1))
(prn ($relu (tensor '(-2 1 2))))
;; multidimensional array
(let ((a (tensor '(1 2 3 4))))
(prn a)
(prn ($size a))
(prn ($size a 0)))
(let ((b (tensor '((1 2) (3 4) (5 6)))))
(prn b)
(prn ($ndim b))
(prn ($size b)))
;; matrix product
(let ((a (tensor '((1 2) (3 4))))
(b (tensor '((5 6) (7 8)))))
(prn ($size a))
(prn ($size b))
(prn ($@ a b)))
(let ((a (tensor '((1 2 3) (4 5 6))))
(b (tensor '((1 2) (3 4) (5 6)))))
(prn ($size a))
(prn ($size b))
(prn ($@ a b)))
;; mv
(let ((a (tensor '((1 2) (3 4) (5 6))))
(b (tensor '(7 8))))
(prn ($mv a b))
(prn ($@ a b)))
;; neural network - note that size of x is different from the book
(let ((x (tensor '((1 2))))
(w (tensor '((1 3 5) (2 4 6)))))
(prn w)
(prn ($size w))
(prn ($@ x w)))
(let ((x (tensor '((1.0 0.5))))
(w1 (tensor '((0.1 0.3 0.5) (0.2 0.4 0.6))))
(b1 (tensor '((0.1 0.2 0.3)))))
(prn ($size w1))
(prn ($size x))
(prn ($size b1))
(let* ((a1 ($+ ($@ x w1) b1))
(z1 ($sigmoid a1)))
(prn a1)
(prn z1)
(let ((w2 (tensor '((0.1 0.4) (0.2 0.5) (0.3 0.6))))
(b2 (tensor '((0.1 0.2)))))
(prn ($size z1))
(prn ($size w2))
(prn ($size b2))
(let* ((a2 ($+ ($@ z1 w2) b2))
(z2 ($sigmoid a2)))
(prn a2)
(prn z2)))))
;; softmax
(let* ((a (tensor '(0.3 2.9 4.0)))
(y ($softmax a)))
(prn y)
(prn ($sum y)))
(let ((a (tensor '(1010 1000 990))))
(prn ($softmax a)))
;; mnist data loading - takes time, so load and set
(defparameter *mnist* (read-mnist-data))
(prn *mnist*)
;; network parameters
(defparameter *w1* ($parameter (rndn 784 50)))
(defparameter *b1* ($parameter (zeros 50)))
(defparameter *w2* ($parameter (rndn 50 100)))
(defparameter *b2* ($parameter (zeros 100)))
(defparameter *w3* ($parameter (rndn 100 10)))
(defparameter *b3* ($parameter (zeros 10)))
(defun mnist-predict (x)
(-> x
($xwpb *w1* *b1*)
($sigmoid)
($xwpb *w2* *b2*)
($sigmoid)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-loss (prediction trueth) ($cee prediction trueth))
;; write to file
(let ((f (file.disk "dlfs/mnist-w1.dat" "w")))
($fwrite ($data *w1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b1.dat" "w")))
($fwrite ($data *b1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w2.dat" "w")))
($fwrite ($data *w2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b2.dat" "w")))
($fwrite ($data *b2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w3.dat" "w")))
($fwrite ($data *w3*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b3.dat" "w")))
($fwrite ($data *b3*) f)
($fclose f))
;; read from file
(let ((f (file.disk "dlfs/mnist-w1.dat" "r")))
($fread ($data *w1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b1.dat" "r")))
($fread ($data *b1*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w2.dat" "r")))
($fread ($data *w2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b2.dat" "r")))
($fread ($data *b2*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-w3.dat" "r")))
($fread ($data *w3*) f)
($fclose f))
(let ((f (file.disk "dlfs/mnist-b3.dat" "r")))
($fread ($data *b3*) f)
($fclose f))
;; train data
(prn ($ *mnist* :train-images))
;; run prediction - test
(prn (-> *mnist*
($ :train-images)
($index 0 '(0 1 2 3 4))
(mnist-predict)))
(let ((y (-> *mnist*
($ :train-images)
($index 0 '(0 1 2 3 4))
(mnist-predict)) )
(r (-> *mnist*
($ :train-labels)
($index 0 '(0 1 2 3 4)))))
(prn y)
(prn r)
(prn ($cee y r)))
| 4,130
|
Common Lisp
|
.lisp
| 155
| 22.825806
| 66
| 0.512022
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
38738eba49558c0e267cdca00db0b4f48163485ffcbd1e3c94f2a6ee0faf04a7
| 3,227
|
[
-1
] |
3,228
|
dlfs05.lisp
|
chunsj_TH/examples/books/dlfs/dlfs05.lisp
|
(defpackage :dlfs-05
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :dlfs-05)
;; mnist data loading - takes time, so load and set
(defparameter *mnist* (read-mnist-data))
(prn *mnist*)
;; network parameters
(defparameter *w1* ($parameter (rndn 784 50)))
(defparameter *b1* ($parameter (zeros 50)))
(defparameter *w2* ($parameter (rndn 50 100)))
(defparameter *b2* ($parameter (zeros 100)))
(defparameter *w3* ($parameter (rndn 100 10)))
(defparameter *b3* ($parameter (zeros 10)))
(defun mnist-predict (x)
(-> x
($xwpb *w1* *b1*)
($sigmoid)
($xwpb *w2* *b2*)
($sigmoid)
($xwpb *w3* *b3*)
($softmax)))
(defun mnist-loss (prediction trueth) ($cee prediction trueth))
(defun mnist-write-weight-to (w fname)
(let ((f (file.disk fname "w")))
($fwrite ($data w) f)
($fclose f)))
(defun mnist-write-weights ()
(mnist-write-weight-to *w1* "dlfs/mnist-w1.dat")
(mnist-write-weight-to *b1* "dlfs/mnist-b1.dat")
(mnist-write-weight-to *w2* "dlfs/mnist-w2.dat")
(mnist-write-weight-to *b2* "dlfs/mnist-b2.dat")
(mnist-write-weight-to *w3* "dlfs/mnist-w3.dat")
(mnist-write-weight-to *b3* "dlfs/mnist-b3.dat"))
(defun mnist-read-weight-from (w fname)
(let ((f (file.disk fname "r")))
($fread ($data w) f)
($fclose f)))
(defun mnist-read-weights ()
(mnist-read-weight-from *w1* "dlfs/mnist-w1.dat")
(mnist-read-weight-from *b1* "dlfs/mnist-b1.dat")
(mnist-read-weight-from *w2* "dlfs/mnist-w2.dat")
(mnist-read-weight-from *b2* "dlfs/mnist-b2.dat")
(mnist-read-weight-from *w3* "dlfs/mnist-w3.dat")
(mnist-read-weight-from *b3* "dlfs/mnist-b3.dat"))
;; write to file
(mnist-write-weights)
;; read from file
(mnist-read-weights)
;; running loaded model with test data
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
(prn ($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
(defun mnist-test-stat ()
(let ((xt ($ *mnist* :test-images))
(yt ($ *mnist* :test-labels)))
($count (loop :for i :from 0 :below ($size xt 0)
:for xi = ($index xt 0 (list i))
:for yi = ($index yt 0 (list i))
:for yi* = ($data (mnist-predict xi))
:for err = ($sum ($abs ($sub ($round yi*) yi)))
:when (> err 0)
:collect i))))
;; full training
(let* ((x (-> *mnist*
($ :train-images)))
(y (-> *mnist*
($ :train-labels)))
(lr 1.4)
(pwrcnt 526))
(loop :for i :from 1 :to 1000
:for y* = (mnist-predict x)
:for loss = (mnist-loss y* y)
:do (progn
(when (zerop (mod i 5))
(prn (list i ($data loss)))
(finish-output))
($gs! loss)
($gd! (list *w1* *b1* *w2* *b2* *w3* *b3*) lr)
(when (zerop (mod i 50))
(let ((wrcnt (mnist-test-stat)))
(prn (list i wrcnt 10000))
(when (< wrcnt pwrcnt)
(setf pwrcnt wrcnt)
(prn "Saving weights...")
(mnist-write-weights)
(prn "Done saving."))))))
(prn (mnist-test-stat)))
| 3,543
|
Common Lisp
|
.lisp
| 96
| 28.833333
| 68
| 0.528401
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
3b1762c34b8adee963a33d228aae1287863dbc3c6daa8a886941cc1183360aa2
| 3,228
|
[
-1
] |
3,229
|
tensor.lisp
|
chunsj_TH/examples/intro/tensor.lisp
|
(defpackage th.tensor-examples
(:use #:common-lisp
#:mu
#:th))
(in-package :th.tensor-examples)
;; plain storage
(prn (storage.float))
;; creates an empty tensor of default tensor class (float)
(prn (tensor))
;; creates a tensor of default tensor class type with specified sizes;
;; elements are not initialized.
(prn (tensor 2 2))
;; creates a tensor with contents
(prn (tensor '(1 2 3 4)))
(prn (tensor '((1 2 3 4))))
;; creates a tensor with multidimensional contents
(prn (tensor '((1 2) (3 4) (5 6))))
;; creates tensor with other tensor, content storage is shared.
(let ((x (tensor '(4 3 2 1))))
(prn "X = (4 3 2 1)")
(prn x)
(prn "SAME CONTENT AS X")
(prn (tensor x)))
;; create tensor with sizes and strides, elements are not initialized.
(prn (tensor '(2 2) '(2 1)))
;; type specific construction functions; same as above.
(prn (tensor.byte '(1 2 3 4)))
(prn (tensor.char '(1 2 3 4)))
(prn (tensor.short '(1 2 3 4)))
(prn (tensor.int '(1 2 3 4)))
(prn (tensor.long '(1 2 3 4)))
(prn (tensor.float '(1 2 3 4)))
(prn (tensor.double '(1 2 3 4)))
;; clone a tensor creates a new tensor with independent storage.
(let ((x (tensor '((1 2) (3 4)))))
(prn "'((1 2) (3 4))")
(prn ($clone x)))
;; make a tensor with contiguously allocated memory if it is not allocated contiguously.
(prn ($contiguous! (tensor.float '((1 2 3) (4 5 6)))))
;; tensor types can be changed to each other
(prn (tensor.byte (tensor.double '((1.234 2.345) (3.456 4.567)))))
(prn (tensor.double (tensor.int '((1 2 3) (4 5 6)))))
(prn (tensor.long (tensor.float '(1.2 3.4 5.6 7.8))))
;; check whether it is tensor or not
(prn ($tensorp 0))
(prn ($tensorp (tensor)))
;; query number of dimensions
(prn ($ndim (tensor)))
(prn ($ndim (tensor 2 3 4)))
;; query tensor size
(prn ($size (tensor 2 3 4)))
;; query tensor size along given dimension
(prn ($size (tensor 2 3 4) 2))
;; size of scalar value is nil
(prn ($size 1))
;; stride of a tensor
(prn ($stride (tensor 2 3 4)))
;; stride of a tensor along dimension
(prn ($stride (tensor 2 3 4) 2))
;; stride of a nil object
(prn ($stride nil))
;; storage of a tensor; a tensor is a specific view on the storage
(let* ((x (tensor 4 5))
(s ($storage x)))
(loop :for i :from 0 :below ($count s)
:do (setf ($ s i) i))
(prn "'((0 1 2 3 4) ... (15 16 17 18 19))")
(prn x))
;; contiguous or not
(prn ($contiguousp (tensor)))
(prn ($contiguousp (tensor 2 2 2)))
;; size comparison using size value
(prn (equal ($size (tensor 2 3)) ($size (tensor 2 3))))
(prn (equal ($size (tensor)) ($size (tensor 2 3))))
(prn (equal ($size (tensor 2 2)) '(2 2)))
;; size comparison
(prn ($sizep (tensor 2 2) (tensor 2 2)))
(prn ($sizep (tensor 2 3) (tensor)))
;; number of elements in a tensor
(prn ($count (tensor 3 3 4)))
;; query the element at the index location
(let ((x (tensor '((1 2 3) (4 5 6)))))
(prn "'((1 2 3) (4 5 6))")
(prn x)
(prn "1")
(prn ($ x 0 0)))
;; creates a new view on the same storage of the given tensor
(let ((x (tensor))
(y (tensor '((1 2) (3 4)))))
(prn "((1 2) (3 4))")
(prn ($set! x y))
(prn "T")
(prn ($setp x y)))
;; copy elements from other tensor; both should have the same number of elements.
(let ((x (tensor '(1 2 3 4)))
(y (tensor '((5 4) (3 2)))))
;; now the elements of x replaced with those of y
($copy! x y)
(prn "(5 4 3 2)")
(prn x)
(prn "((5 4) (3 2))")
(prn y)
;; new int tensor from x, then copies elements from list
;; new int tensor will have same size of x
(prn "(123 234 345 456)")
(prn ($copy! (tensor.int x) '(123 234 345 456)))
(prn "(5 4 3 2)")
;; storage is not shared
(prn x))
;; fill values
(let ((x (tensor 3 3)))
($fill x 123)
(prn "((123 123 123) ... (123 123 123))")
(prn x)
(prn "((0 0 0) ... (0 0 0))")
;; mutable method
(prn ($zero! x))
(prn "((0 0 0) ... (0 0 0))")
(prn x)
(prn "((1 1 1) ... (1 1 1))")
;; immutable method
(prn ($one x))
(prn "((0 0 0) ... (0 0 0))")
(prn x))
;; resizing a tensor allocates if more memory storage is required
;; note that $view only changes sizes or shape but without changing allocated memory.
(let ((x (tensor '((1 2 3) (3 4 5))))
(y (tensor 3 3)))
(prn "((1 2 3 3) (4 5 ? ?) ... (? ? ? ?)))")
(prn ($resize! x '(4 4)))
(prn "((1 2) (3 3))")
(prn ($resize! x '(2 2)))
;; resize as y
(prn "((1 2 3) (3 4 5) (? ? ?))")
(prn ($resize! x y)))
;; select - choose sub tensor at index along dimension
(let ((x (tensor '((1 2 3) (4 5 6) (7 8 9)))))
(prn "(4 5 6) - 2nd(1) row along 1st(0) dimension")
(prn ($select x 0 1))
(prn "(3 6 9) - 3rd(2) column along 2nd(1) dimension")
(prn ($select x 1 2))
(setf ($select x 0 1) '(-11 -22 -33))
(prn "x with 2nd row changed as (-11 -22 -33)")
(prn x))
;; subdimensional select using $
(let ((x (tensor '((1 2 3) (4 5 6) (7 8 9)))))
(prn "1st row - (1 2 3)")
(prn ($ x 0))
(setf ($ x 1) '(6 5 4))
(prn "2nd row changes as (6 5 4)")
(prn x))
;; narrow - start and size along dimension
;; returns a new tensor or view built with narrowing from start selected as size along dimension
(let ((x (tensor 5 6)))
($zero! x)
(prn "5x5 matrix filled with zeros.")
(prn x)
;; along 1st(0) dimension, from 2nd row, select 3 rows, then fill it as one.
(-> ($narrow x 0 1 3)
($fill! 1))
(prn "along 1st(0) dimension, from 2nd(1) row, total 3 rows are filled with one.")
(prn x)
(-> ($narrow x 1 1 4)
($fill! 2))
(prn "along 2nd(1) dimension, from 2nd(1) column, total 4 columns are filled with two.")
(prn x)
(setf ($narrow x 1 0 2) '(0 11 22 33 44 55 66 77 88 99))
(prn "along 2nd(1) dimension, from 1st(0) column, total 2 columns are copied from list.")
(prn x))
;; subview - multiple start and size per dimension, kind of multiple narrows
;; each pair of start and size along each dimensions
(let ((x (tensor '((1 2 3 4 5 6)
(2 3 4 5 6 7)
(3 4 5 6 7 8)
(4 5 6 7 8 9)
(0 0 0 0 0 0)))))
(prn "5x6 matrix.")
(prn x)
(prn "((4 5 6) (5 6 7) (6 7 8)) - from 2nd row, 3 rows, from 3rd column, 3 columns.")
(prn ($subview x 1 3 2 3))
(setf ($subview x 1 3 2 3) '(11 12 13 14 15 16 17 18 19))
(prn "((11 12 13) (14 15 16) (17 18 19))")
(prn ($subview x 1 3 2 3))
(prn "matrix changes.")
(prn x))
;; orderly subview using $
(let ((x (tensor '((1 2 3 4 5 6)
(2 3 4 5 6 7)
(3 4 5 6 7 8)
(4 5 6 7 8 9)
(0 0 0 0 0 0)))))
(prn "((4 5 6) (5 6 7) (6 7 8)) - from 2nd row, 3 rows, from 3rd column, 3 columns.")
(prn ($ x '(1 3) '(2 3)))
(setf ($ x '(1 3) '(2 3)) '(11 12 13 14 15 16 17 18 19))
(prn "((11 12 13) (14 15 16) (17 18 19)) in x")
(prn x))
;; query with dimension index size pairs or subview using $
(let ((x (tensor '((1 2 3 4 5 6)
(2 3 4 5 6 7)
(3 4 5 6 7 8)
(4 5 6 7 8 9)
(0 0 0 0 0 0)))))
(prn "original x")
(prn x)
(prn "subview of size 3x3, from 2nd row and 3rd column")
(prn ($ x '(1 3) '(2 3)))
(setf ($ x '(1 3) '(2 3)) '(11 12 13 14 15 16 17 18 19))
(prn "3x3 subview changed as 11 to 19")
(prn ($ x '(1 3) '(2 3)))
(prn "changed x")
(prn x))
;; general selection using $
(let ((x (zeros 5 6)))
(prn "5x6 zero matrix.")
(prn x)
(setf ($ x 0 2) 1)
(prn "1 at 1st row, 3rd column")
(prn x)
(setf ($ x 4) 9)
(prn "5th row as 9")
(prn x)
(setf ($ x '(:all (5 1))) 8)
(prn "6th column as 8")
(prn x)
(setf ($ x '((1 1) (1 3))) 2)
(prn "1x3 from 2nd row and 2nd column, filled with 2.")
(prn x)
(setf ($ x '((0 5) (3 1))) -1)
(prn "5x1 from 1st row and 4th column, filled with -1.")
(prn x)
(setf ($ x '((0 5) (1 1))) (range 1 5))
(prn "5x1 from 1st row and 2nd column, copied from (1 ... 5)")
(prn x)
(setf ($ x ($lt x 0)) 5)
(prn "element as 5 if oringinal one is less than 0.")
(prn x))
;; index-select - creates a new storage, not sharing
;; collects subtensor along dimension at indices
(let ((x (tensor '((1 2 3) (4 5 6) (7 8 9)))))
(prn "original x")
(prn x)
(prn "1st(0) and 2nd(1) along 1st(0) dimension")
(prn ($index x 0 '(0 1)))
(let ((y ($index x 1 '(1 2))))
(prn "2nd(1) and 3rd(2) along 2nd(1) dimension")
(prn y)
($fill! y 0)
(prn "zero filled")
(prn y)
(prn "x unchanged")
(prn x)))
;; index-copy - set copies elements into selected index location
(let ((x (tensor '((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7) (5 6 7 8))))
(y (tensor 5 2)))
($fill! ($select y 1 0) -1)
($fill! ($select y 1 1) -2)
(prn "original x")
(prn x)
(prn "original y")
(prn y)
(setf ($index x 1 '(3 0)) y)
(prn "4th(3) and 1st(0) columns along 2nd(1) dimension copied from y.")
(prn x))
;; index-fill
(let ((x (tensor '((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7) (5 6 7 8)))))
(prn "original x")
(prn x)
(setf ($index x 1 '(0 3)) 123)
(prn "1st(0) and 4th(3) columns along 2nd(1) dimension set as 123")
(prn x))
;; gather
(let ((x (tensor 5 5)))
(loop :for i :from 0 :below ($count x)
:do (setf ($ ($storage x) i) i))
(prn "original 5x5 matrix")
(prn x)
(prn "by incrementing index, collect nth 1st(0) dimensional values.")
(prn "1st row will be (0 0), (1 1), (2 2), (3 3), (4 4)")
(prn "2nd row will be (1 0), (2 1), (3 2), (4 3), (0 4)")
(prn ($gather x 0 '((0 1 2 3 4) (1 2 3 4 0))))
(prn "by incrementing index, collect nth 2nd(1) dimensional values.")
(prn "1st column will be (0 0), (1 1), (2 2), (3 3), (4 4)")
(prn "2nd column will be (1 0), (2 1), (3 2), (4 3), (0 4)")
(prn ($gather x 1 '((0 1) (1 2) (2 3) (3 4) (4 0)))))
;; scatter
(let ((x (tensor 5 5))
(y (tensor '((1 2 3 4 5) (-5 -4 -3 -2 -1)))))
($zero! x)
(prn "5x5 zeros")
(prn x)
($scatter! x 0 '((0 1 2 3 4) (1 2 3 4 0)) y)
(prn "as in gather, but set from y")
(prn x)
($scatter! x 1 '((0 1) (1 2) (2 3) (3 4) (4 0)) 9)
(prn "as in gather, but fill a value")
(prn x))
;; masked-select
(let ((x (tensor 3 4))
(mask '((1 0 1 0 0 0) (1 1 0 0 0 1)))
(z (tensor)))
(loop :for i :from 0 :below ($count x)
:do (setf ($ ($storage x) i) (1+ i)))
(prn "original x")
(prn x)
(prn "only at value 1(true)")
(prn ($masked x mask))
($set! z ($masked x mask))
(prn "same as above.")
(prn z)
($fill! z -99)
(prn "z value changed as -99")
(prn z)
(prn "x unchanged.")
(prn x))
;; masked-copy
(let ((x (tensor 3 4))
(mask '((1 0 1 0 0 0) (1 1 0 0 0 1)))
(z (tensor '(1 2 3 4 5))))
($zero! x)
(prn "x matrix, 3x4")
(prn x)
(setf ($masked x mask) z)
(prn "set by z")
(prn x))
;; masked-fill
(let ((x (tensor 3 4))
(mask '((1 0 1 0 0 0) (1 1 0 0 0 1))))
($zero! x)
(prn "original 3x4 matrix")
(prn x)
(setf ($masked x mask) 5)
(prn "filled as 5")
(prn x))
;; nonzero - returns locations of non zero elements
(prn ($nonzero (tensor '((1 2 0 3 4) (0 0 1 0 0)))))
;; repeat - repeat content as given times
(prn ($repeat (tensor '(1 2)) 3 2))
(prn ($repeat (tensor '((1 2) (3 4) (5 6))) 2 3))
;; squeeze - removes singletone dimensions
(let ((x (tensor 2 1 2 1 2)))
(prn "original size")
(prn ($size x))
(prn "no 1s")
(prn ($size ($squeeze x)))
(prn "no 1 in 2nd(1) dimension")
(prn ($size ($squeeze x 1))))
;; unsqueeze - add a singleton dimension
(let ((x (tensor '(1 2 3 4))))
(prn "vector")
(prn x)
(prn "along 1st(0) dimension")
(prn ($unsqueeze x 0))
(prn "along 2nd(1) dimension")
(prn ($unsqueeze x 1)))
;; view - different from resize(allocation), reshape(new storage), just a view
(let ((x (tensor '(0 0 0 0))))
(prn "original vector")
(prn x)
(prn "2x2")
(prn ($view x 2 2))
(prn "as the size of other 2x2 tensor")
(prn ($view x (tensor 2 2))))
;; transpose
(let ((x (tensor '((1 2 3) (4 5 6)))))
(prn "original 2x3")
(prn x)
(prn "transposed 3x2")
(prn ($transpose x)))
;; transpose - shares storage but different view(tensor)
(let ((x (tensor 3 4)))
($zero! x)
($fill! ($select x 1 2) 7)
(prn "original x")
(prn x)
(let ((y ($transpose x)))
($fill! ($select y 1 2) 8)
(prn "modified transposed x or y")
(prn y)
(prn "original x")
(prn x)))
;; permute - multidimensional transposing
(let ((x (ones 3 4 2 5)))
(prn "original size")
(prn ($size x))
(prn "permute size with 2nd, 3rd, 1st and 4th dimensions - 4,2,3,5")
(prn ($size ($permute x 1 2 0 3))))
;; unfold - slice with size by step along dimension
(let ((x (ones 7)))
(loop :for i :from 1 :to 7 :do (setf ($ x (1- i)) i))
(prn "vector, 1 to 7")
(prn x)
(prn "slice along 1st(0) dimension, size of 2, by step 1")
(prn ($unfold x 0 2 1))
(prn "slice along 1st(0) dimension, size of 2, by step 2")
(prn ($unfold x 0 2 2)))
;; fmap - just elementwise function application
(let ((x (zeros 3 3))
(n 0))
($fmap (lambda (v) (+ v (* 0.5 pi (incf n)))) x)
(prn x)
($fmap (lambda (v) (round (sin v))) x)
(prn x))
;; fmap - more, shape is irrelevant when they have same count.
(let ((x (tensor 3 3))
(y (tensor 9))
(z (tensor '(0 1 2 3 4 5 6 7 8))))
(loop :for i :from 1 :to 9 :do (setf ($ ($storage x) (1- i)) i
($ ($storage y) (1- i)) i))
(prn x)
(prn y)
(prn z)
(prn "1*1, 2*2, 3*3, ...")
(prn ($fmap (lambda (vx vy) (* vx vy)) x y))
(prn "1 + 1 + 0, 2 + 2 + 1, 3 + 3 + 2, ...")
(prn ($fmap (lambda (xx yy zz) (+ xx yy zz)) x y z))
(prn x))
;; split - split tenor with size along dimension
(let ((x (zeros 3 4 5)))
(prn "by 2 along 0 - 2x4x5, 1x4x5")
(prn ($split x 2 0))
(prn "by 2 along 1 - 3x2x5, 3x2x5")
(prn ($split x 2 1))
(prn "by 2 along 2 - 3x4x2, 3x4x2, 3x4x1")
(prn ($split x 2 2)))
;; chunk - n parition of approaximately same size along dimension
(let ((x (ones 3 4 5)))
(prn "2 partitions along 0 - 2x4x5, 1x4x5")
(prn ($chunk x 2 0))
(prn "2 partitions along 1 - 3x2x5, 3x2x5")
(prn ($chunk x 2 1))
(prn "2 paritions along 2 - 3x4x3, 3x4x2")
(prn ($chunk x 2 2)))
;; concat tensors
(prn ($cat (ones 3) (zeros 3)))
(prn ($cat (ones 1 3) (zeros 1 3) 1))
(prn ($cat (ones 1 3) (zeros 1 3) 0))
(prn ($cat (ones 3 3) (zeros 1 3)))
(prn ($cat (ones 3 4) (zeros 3 2) 1))
;; diagonal matrix
(prn ($diag (tensor '(1 2 3 4))))
(prn ($diag (ones 3 3)))
;; identity matrix
(prn (eye 2))
(prn (eye 3 4))
(prn ($eye (tensor.byte) 3))
(prn ($eye (tensor.byte 10 20) 3 4))
;; linspace
(prn (linspace 1 2))
(prn (linspace 1 2 11))
;; logspace
(prn (logspace 1 2))
(prn (logspace 1 2 11))
;; uniform random
(prn (rnd 3 3))
;; normal random
(prn (rndn 2 4))
;; range
(prn (range 2 5))
(prn (range 2 5 1.2))
;; arange
(prn (arange 2 5))
(prn (range 2 5))
;; randperm
(prn (rndperm 10))
(prn (rndperm 5))
;; reshape
(let ((x (ones 2 3))
(y nil))
(prn x)
(setf y ($reshape x 3 2))
(prn y)
($fill! y 2)
(prn y)
(prn x))
;; tril and triu
(let ((x (ones 4 4)))
(prn ($tril x))
(prn ($tril x -1))
(prn ($triu x))
(prn ($triu x 1)))
;; abs
(let ((x (tensor '((-1 2) (3 -4)))))
(prn x)
(prn ($abs x))
(prn x)
(prn ($abs! x))
(prn x))
;; sign
(let ((x (tensor '((-1 2) (3 -4)))))
(prn x)
(prn ($sign x))
(prn x)
(prn ($sign! x))
(prn x))
;; acos
(let ((x (tensor '((-1 1) (1 -1)))))
(prn x)
(prn ($acos x))
(prn x)
(prn ($acos! x))
(prn x))
;; asin
(let ((x (tensor '((-1 1) (1 -1)))))
(prn x)
(prn ($asin x))
(prn x)
(prn ($asin! x))
(prn x))
;; atan
(let ((y (tensor '((-11 1) (1 -11)))))
(prn y)
(prn ($atan y))
(prn y)
(prn ($atan! y))
(prn y))
;; atan2
(let ((y (tensor '((-11 1) (1 -11))))
(x (tensor '((1 1) (1 1)))))
(prn y)
(prn ($atan2 y x))
(prn y)
(prn ($atan2! y x))
(prn y))
;; ceil
(let ((x (tensor '((1 1.1 1.7) (-0.8 -1.1 -2.3)))))
(prn x)
(prn ($ceil x))
(prn x)
(prn ($ceil! x))
(prn x))
;; cos
(let ((x (tensor '((-3.14 0) (3.14 0)))))
(prn x)
(prn ($cos x))
(prn x)
(prn ($cos! x))
(prn x))
;; cosh
(let ((x (tensor '((-3.14 0) (3.14 0)))))
(prn x)
(prn ($cosh x))
(prn x)
(prn ($cosh! x))
(prn x))
;; exp
(let ((x (tensor '((0 1 2) (-1 -2 -3)))))
(prn x)
(prn ($exp x))
(prn x)
(prn ($exp! x))
(prn x))
;; floor
(let ((x (tensor '((1 1.1 1.7) (-0.8 -1.1 -2.3)))))
(prn x)
(prn ($floor x))
(prn x)
(prn ($floor! x))
(prn x))
;; log
(let ((x ($exp (tensor '((0 1 2) (-1 -2 -3))))))
(prn x)
(prn ($log x))
(prn x)
(prn ($log! x))
(prn x))
;; log1p
(let ((x ($exp (tensor '((0 1 2) (-1 -2 -3))))))
(prn x)
(prn ($log1p x))
(prn x)
(prn ($log1p! x))
(prn x))
;; neg
(let ((x (tensor '((0 1 2) (-1 -2 -3)))))
(prn x)
(prn ($neg x))
(prn x)
(prn ($neg! x))
(prn x))
;; cinv
(let ((x (tensor '((3 2 1) (-1 -2 -3)))))
(prn x)
(prn ($cinv x))
(prn x)
(prn ($cinv! x))
(prn x))
;; expt
(let ((x (tensor '((2 3) (1 2))))
(y (tensor '((2 2) (3 3))))
(n 2))
(prn x)
(prn ($expt x n))
(prn ($expt n x))
(prn ($expt x y))
(prn x)
(prn ($expt! x n))
(prn x)
(prn ($expt! n x))
(prn x))
;; round
(let ((x (tensor '((1.1 1.8) (-1.1 -1.8)))))
(prn x)
(prn ($round x))
(prn x)
(prn ($round! x))
(prn x))
;; sin
(let ((x (tensor '((-3.14 0) (3.14 0)))))
(prn x)
(prn ($sin x))
(prn x)
(prn ($sin! x))
(prn x))
;; sinh
(let ((x (tensor '((-3.14 0) (3.14 0)))))
(prn x)
(prn ($sinh x))
(prn x)
(prn ($sinh! x))
(prn x))
;; sqrt
(let ((x (tensor '((1 2 4) (3 5 9)))))
(prn x)
(prn ($sqrt x))
(prn x)
(prn ($sqrt! x))
(prn x))
;; rsqrt
(let ((x (tensor '((1 2 4) (3 5 9)))))
(prn x)
(prn ($rsqrt x))
(prn x)
(prn ($rsqrt! x))
(prn x))
;; tan
(let ((x (tensor '((1 2) (3 4)))))
(prn x)
(prn ($tan x))
(prn x)
(prn ($tan! x))
(prn x))
;; tanh
(let ((x (tensor '((1 2) (-3 -4)))))
(prn x)
(prn ($tanh x))
(prn x)
(prn ($tanh! x))
(prn x))
;; sigmoid
(let ((x (tensor '((-2 -1) (1 2)))))
(prn x)
(prn ($sigmoid x))
(prn x)
(prn ($sigmoid! x))
(prn x))
;; equal
(let ((x (tensor '(1 2 3)))
(y (tensor '(1 2 3))))
(prn ($equal x y)))
;; add, sub, and mul
(let ((x (tensor '((1 2) (3 4))))
(y (tensor '((2 3) (4 5))))
(a 10))
(prn ($add x a))
(prn ($add x y))
(prn ($sub x a))
(prn ($sub x y))
(prn ($mul x a))
(prn ($mul x y)))
;; clamp
(let ((x (tensor '((1 2 3 4 5) (2 3 4 5 6) (3 4 5 6 7))))
(min 2)
(max 5))
(prn x)
(prn ($clamp x min max))
(prn x)
(prn ($clamp! x min max))
(prn x))
;; add-cmul
(let ((x (tensor 2 2))
(y (tensor 4))
(z (tensor 2 2)))
($fill! x 1)
($fill! y 3)
($fill! z 5)
(prn ($addmul x y z 2)))
;; div
(let ((x (ones 2 2))
(y (range 1 4)))
(prn ($div x y)))
;; add-cdiv
(let ((x (-> (tensor 2 2) ($fill! 1)))
(y (range 1 4))
(z (-> (tensor 2 2) ($fill! 5))))
(prn ($adddiv x y z 2)))
;; fmod, remainder
(let ((x (tensor '(-3 3))))
(prn ($fmod x 2))
(prn ($fmod x -2))
(prn ($rem x 2))
(prn ($rem x -2))
(prn ($fmod (tensor '((3 3) (-3 -3))) (tensor '((2 -2) (2 -2)))))
(prn ($rem (tensor '((3 3) (-3 -3))) (tensor '((2 -2) (2 -2))))))
;; dot
(let ((x (-> (tensor 2 2) ($fill! 3)))
(y (-> (tensor 4) ($fill! 2))))
(prn ($dot x y)))
;; add-mv
(let ((y (ones 3))
(m (-> (tensor 3 2) ($fill! 3)))
(x (-> (tensor 2) ($fill! 2))))
(prn ($mv m x))
(prn ($addmv y m x)))
;; add-r
(let ((x (range 1 3))
(y (range 1 2))
(m (ones 3 2)))
(prn ($addr m x y))
(prn ($addr! m x y 2 1)))
;; add-mm
(let ((c (ones 4 4))
(a (-> (range 1 12) ($resize! '(4 3))))
(b (-> (range 1 12) ($resize! '(3 4)))))
(prn ($addmm c a b)))
;; add-bmm
(let ((c (ones 4 4))
(ba (-> (range 1 24) ($resize! '(2 4 3))))
(bb (-> (range 1 24) ($resize! '(2 3 4)))))
(prn ($addbmm c ba bb)))
;; badd-bmm
(let ((bc (ones 2 4 4))
(ba (-> (range 1 24) ($resize! '(2 4 3))))
(bb (-> (range 1 24) ($resize! '(2 3 4)))))
(prn ($baddbmm bc ba bb)))
;; operators
(prn ($+ 5 (rnd 3)))
(let ((x (-> (tensor 2 2) ($fill! 2)))
(y (-> (tensor 4) ($fill! 3))))
(prn ($+ x y))
(prn ($- y x))
(prn ($+ x 3))
(prn ($- x)))
(let ((m (-> (tensor 2 2) ($fill! 2)))
(n (-> (tensor 2 4) ($fill! 3)))
(x (-> (tensor 2) ($fill! 4)))
(y (-> (tensor 2) ($fill! 5))))
(prn ($* x y))
(prn ($@ m x))
(prn ($@ m n)))
(prn ($/ (ones 2 2) 3))
;; cross
(let ((x (rndn 4 3))
(y (rndn 4 3))
(z (tensor)))
(prn x)
(prn y)
(prn ($xx x y))
(prn ($xx! z x y 1))
(prn z))
;; cumulative product
(let ((x (range 1 5))
(m (tensor.long '((1 4 7) (2 5 8) (3 6 9)))))
(prn x)
(prn ($cumprd x))
(prn m)
(prn ($cumprd m))
(prn ($cumprd m 0)))
;; cumulative sum
(let ((x (range 1 5))
(m (tensor.long '((1 4 7) (2 5 8) (3 6 9)))))
(prn x)
(prn ($cumsum x))
(prn m)
(prn ($cumsum m))
(prn ($cumsum m 1)))
;; max and min
(let ((x (rndn 4 4))
(vals (tensor))
(indices (tensor.long)))
(prn x)
(prn ($max x))
(prn ($min x))
(prn ($max! vals indices x))
(prn ($max! vals indices x 1)))
;; mean
(let ((x (rndn 3 4)))
(prn x)
(prn ($mean x))
(prn ($mean x 0))
(prn ($mean x 1)))
;; cmax
(let ((a (tensor '(1 2 3)))
(b (tensor '(3 2 1))))
(prn ($cmax a b))
(prn ($cmax a b 2 3)))
;; cmin
(let ((a (tensor '(1 2 3)))
(b (tensor '(3 2 1))))
(prn ($cmin a b))
(prn ($cmin a b 2 3)))
;; median
(let ((x (rndn 3 4))
(vals (tensor))
(indices (tensor.long)))
(prn x)
(prn ($median x))
(prn ($median! vals indices x))
(prn ($median! vals indices x 1)))
;; product
(let ((a (tensor '(((1 2) (3 4)) ((5 6) (7 8))))))
(prn a)
(prn ($prd a))
(prn ($prd a 0))
(prn ($prd a 1)))
;; sort
(let ((x (rndn 3 3))
(vals (tensor))
(indices (tensor.long)))
(prn x)
(prn ($sort! vals indices x)))
;; conv2
(let ((x (rnd 100 100))
(k (rnd 10 10)))
(prn ($size ($conv2 x k)))
(prn ($size ($conv2 x k :full))))
(let ((x (rnd 500 100 100))
(k (rnd 500 10 10)))
(prn ($size ($conv2 x k)))
(prn ($size ($conv2 x k :full))))
;; conv3 - slow, in this laptop, it takes ~6secs
(let ((x (rnd 100 100 100))
(k (rnd 10 10 10)))
(prn ($size ($conv3 x k)))
(prn ($size ($conv3 x k :full))))
;; gesv
(let ((a (-> (tensor '((6.80 -2.11 5.66 5.97 8.23)
(-6.05 -3.30 5.36 -4.44 1.08)
(-0.45 2.58 -2.70 0.27 9.04)
(8.32 2.71 4.35 -7.17 2.14)
(-9.67 -5.14 -7.26 6.08 -6.87)))
($transpose)))
(b (-> (tensor '((4.02 6.19 -8.22 -7.57 -3.03)
(-1.56 4.00 -8.67 1.75 2.86)
(9.81 -4.09 -4.57 -8.61 8.99)))
($transpose)))
(x (tensor))
(lu (tensor)))
(prn a)
(prn b)
(prn ($gesv! x lu b a))
(prn x)
(prn ($@ a x))
(prn ($dist b ($@ a x))))
;; trtrs
(let ((a (-> (tensor '((6.80 -2.11 5.66 5.97 8.23)
(0 -3.30 5.36 -4.44 1.08)
(0 0 -2.70 0.27 9.04)
(0 0 0 -7.17 2.14)
(0 0 0 0 -6.87)))))
(b (-> (tensor '((4.02 6.19 -8.22 -7.57 -3.03)
(-1.56 4.00 -8.67 1.75 2.86)
(9.81 -4.09 -4.57 -8.61 8.99)))
($transpose)))
(x (tensor)))
(prn a)
(prn b)
(prn ($trtrs! x b a))
(prn x)
(prn ($@ a x))
(prn ($dist b ($@ a x))))
;; potrf
(let ((a (tensor '((1.2705 0.9971 0.4948 0.1389 0.2381)
(0.9971 0.9966 0.6752 0.0686 0.1196)
(0.4948 0.6752 1.1434 0.0314 0.0582)
(0.1389 0.0686 0.0314 0.0270 0.0526)
(0.2381 0.1196 0.0582 0.0526 0.3957))))
(chu (tensor))
(chl (tensor)))
(prn ($potrf! chu a))
(prn ($@ ($transpose chu) chu))
(prn ($potrf! chl a nil))
(prn ($@ chl ($transpose chl))))
;; pstrf
(let ((a (tensor '((1.2705 0.9971 0.4948 0.1389 0.2381)
(0.9971 0.9966 0.6752 0.0686 0.1196)
(0.4948 0.6752 1.1434 0.0314 0.0582)
(0.1389 0.0686 0.0314 0.0270 0.0526)
(0.2381 0.1196 0.0582 0.0526 0.3957))))
(chu (tensor))
(chl (tensor))
(piv (tensor.int))
(ap nil))
(prn ($pstrf! chu piv a))
(setf ap ($@ ($transpose chu) chu))
(prn ap)
(prn a)
(setf ($index ap 0 piv) ($clone ap))
(setf ($index ap 1 piv) ($clone ap))
(prn ap)
(prn ($norm ($- a ap)))
(prn ($pstrf! chl piv a nil))
(setf ap ($@ chl ($transpose chl)))
(prn ap)
(prn a)
(setf ($index ap 0 piv) ($clone ap))
(setf ($index ap 1 piv) ($clone ap))
(prn ap)
(prn ($norm ($- a ap))))
;; potrs
(let ((a (tensor '((1.2705 0.9971 0.4948 0.1389 0.2381)
(0.9971 0.9966 0.6752 0.0686 0.1196)
(0.4948 0.6752 1.1434 0.0314 0.0582)
(0.1389 0.0686 0.0314 0.0270 0.0526)
(0.2381 0.1196 0.0582 0.0526 0.3957))))
(b (tensor '((0.6219 0.3439 0.0431)
(0.5642 0.1756 0.0153)
(0.2334 0.8594 0.4103)
(0.7556 0.1966 0.9637)
(0.1420 0.7185 0.7476))))
(cholesky (tensor))
(solve (tensor)))
($potrf! cholesky a)
(prn cholesky)
(prn a)
(prn ($@ ($transpose cholesky) cholesky))
(prn ($dist a ($@ ($transpose cholesky) cholesky)))
($potrs! solve b cholesky)
(prn solve)
(prn b)
(prn ($@ a solve))
(prn ($dist b ($@ a solve))))
;; potri
(let ((a (tensor '((1.2705 0.9971 0.4948 0.1389 0.2381)
(0.9971 0.9966 0.6752 0.0686 0.1196)
(0.4948 0.6752 1.1434 0.0314 0.0582)
(0.1389 0.0686 0.0314 0.0270 0.0526)
(0.2381 0.1196 0.0582 0.0526 0.3957))))
(cholesky (tensor))
(inv (tensor)))
($potrf! cholesky a)
($potri! inv cholesky)
(prn ($@ a inv))
(prn ($dist (eye 5 5) ($@ a inv))))
;; gels
(let ((a (-> (tensor '((1.44 -9.96 -7.55 8.34 7.08 -5.45)
(-7.84 -0.28 3.24 8.09 2.52 -5.70)
(-4.39 -3.24 6.27 5.28 0.74 -1.19)
(4.53 3.83 -6.64 2.06 -2.47 4.70)))
($transpose)))
(b (-> (tensor '((8.58 8.26 8.48 -5.28 5.72 8.93)
(9.35 -4.43 -0.70 -0.26 -7.36 -2.52)))
($transpose)))
(x (tensor)))
($gels! x b a)
(prn x)
(prn ($dist b ($@ a ($narrow x 0 0 4)))))
;; syev
(let ((a (-> (tensor '((1.96 0.00 0.00 0.00 0.00)
(-6.49 3.80 0.00 0.00 0.00)
(-0.47 -6.39 4.17 0.00 0.00)
(-7.20 1.50 -1.51 5.70 0.00)
(-0.65 -6.34 2.67 1.80 -7.10)))
($transpose)))
(e (tensor))
(v (tensor)))
(prn a)
($syev! e v a)
(prn e)
($syev! e v a t)
(prn e)
(prn v)
(prn ($@ v ($diag e) ($transpose v)))
(prn ($dist a ($triu ($@ v ($diag e) ($transpose v))))))
;; ev
(let ((a (-> (tensor '((1.96 0.00 0.00 0.00 0.00)
(-6.49 3.80 0.00 0.00 0.00)
(-0.47 -6.39 4.17 0.00 0.00)
(-7.20 1.50 -1.51 5.70 0.00)
(-0.65 -6.34 2.67 1.80 -7.10)))
($transpose)))
(b nil)
(e (tensor))
(v (tensor)))
(setf b ($+ a ($transpose ($triu a 1))))
(prn b)
($ev! e v b)
(prn e)
($ev! e v b t)
(prn e)
(prn v)
(prn ($@ v ($diag ($select e 1 0)) ($transpose v)))
(prn ($dist b ($@ v ($diag ($select e 1 0)) ($transpose v)))))
;; svd
(let ((a (-> (tensor '((8.79 6.11 -9.15 9.57 -3.49 9.84)
(9.93 6.91 -7.93 1.64 4.02 0.15)
(9.83 5.04 4.86 8.83 9.80 -8.99)
(5.45 -0.27 4.85 0.74 10.00 -6.02)
(3.16 7.98 3.01 5.80 4.27 -5.31)))
($transpose)))
(u (tensor))
(s (tensor))
(v (tensor)))
(prn a)
($svd! u s v a)
(prn u)
(prn s)
(prn v)
(prn ($@ u ($diag s) ($transpose v)))
(prn ($dist a ($@ u ($diag s) ($transpose v)))))
;; randomized svd
(let ((a (-> (tensor '((8.79 6.11 -9.15 9.57 -3.49 9.84)
(9.93 6.91 -7.93 1.64 4.02 0.15)
(9.83 5.04 4.86 8.83 9.80 -8.99)
(5.45 -0.27 4.85 0.74 10.00 -6.02)
(3.16 7.98 3.01 5.80 4.27 -5.31)))
($transpose))))
(prn ($svd a))
(prn ($rsvd a)))
(let ((a ($+ ($* (- 40 -40) (rnd 10 100)) -40)))
(prn a)
(prn ($rsvd a 5)))
;; inverse
(let ((a (rnd 10 10)))
(prn a)
(prn ($@ a ($inverse a)))
(prn ($dist (eye 10 10) ($@ a ($inverse a)))))
;; qr
(let ((a (tensor '((12 -51 4) (6 167 -68) (-4 24 -41))))
(q (tensor))
(r (tensor)))
(prn a)
($qr! q r a)
(prn q)
(prn r)
(prn ($round ($@ q r)))
(prn ($@ ($transpose q) q)))
;; lt
(prn ($lt (tensor '((1 2) (3 4))) (tensor '((2 1) (4 3)))))
(let ((a (rnd 10))
(b (rnd 10)))
(prn a)
(prn b)
(prn ($lt a b))
(prn ($ a ($gt a b)))
(setf ($ a ($gt a b)) 123)
(prn a))
| 29,324
|
Common Lisp
|
.lisp
| 1,046
| 23.808795
| 96
| 0.500124
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
915c415e1a1af0dfce3a72823341124fe1d4806ff2828bb6c373bc011e681851
| 3,229
|
[
-1
] |
3,230
|
conv.lisp
|
chunsj_TH/examples/intro/conv.lisp
|
(defpackage :conv
(:use #:common-lisp
#:mu
#:th))
(in-package :conv)
;; some basic test on convolution
(let ((x (rnd 100 100))
(k (rnd 10 10)))
(prn ($size ($conv2 x k)))
(prn ($size ($conv2 x k :full))))
(let ((x (rnd 50 100 100))
(k (rnd 50 10 10)))
(prn ($size ($conv2 x k)))
(prn ($size ($conv2 x k :full))))
;; base conv2 operation
(let* ((x (tensor '(((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43)))))
(k (tensor '(((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(prn x)
(prn ($conv2 x k))
(prn ($sum ($conv2 x k) 0))
(prn ($reshape ($sum ($conv2 x k) 0) 1 2 2)))
(let* ((x (tensor '(((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43)))))
(k ($parameter '(((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1)))))
(c ($sum ($conv2 x k) 0))
(g (tensor '((300 270) (281 311)))))
(prn c)
($gs! c g)
(prn ($gradient k)))
(let* ((x (tensor '(((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43)))))
(k (tensor '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b (tensor '(1))))
(prn ($conv2d x k b)))
(let* ((x (tensor '((((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43)))
(((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43))))))
(k (tensor '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1)))
(((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b (tensor '(1 1))))
(prn ($conv2d x k b)))
(let* ((x (tensor '(((10 20 30 40) (41 31 21 11) (12 22 32 42) (43 33 23 13))
((40 30 20 10) (11 21 31 41) (42 32 22 12) (13 23 33 43)))))
(k ($parameter '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b ($parameter '(1)))
(o ($conv2d x k b))
(g (tensor '(((1 1) (1 1))))))
(prn o)
($gs! o g)
(prn ($reshape ($gradient k) 2 3 3))
(prn ($gradient b))
($gd! (list k b) 0.01)
(prn k)
(prn ($reshape k 2 3 3))
(prn b)
(prn ($conv2d x k b)))
(let* ((x (tensor '(((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54))
((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54)))))
(k (tensor '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b (tensor '(1)))
(c ($conv2d x k b))
(p ($maxpool2d c 2 2)))
(prn p))
(let* ((x (tensor '(((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54))
((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54)))))
(k ($parameter '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b ($parameter '(1)))
(c ($conv2d x k b))
(p ($maxpool2d c 2 2))
(g (tensor '(((1 1) (1 1))))))
(prn c)
(prn p)
($gs! p g)
(prn ($gradient c))
(prn ($gradient k)))
(let* ((x (tensor '(((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54))
((10 20 30 40 50)
(51 41 31 21 11)
(12 22 32 42 52)
(53 43 33 23 13)
(14 24 34 44 54)))))
(k ($parameter '((((1 0 1) (0 1 0) (1 0 1))
((1 1 1) (0 0 0) (1 1 1))))))
(b ($parameter '(1)))
(c ($conv2d x k b))
(p ($avgpool2d c 2 2))
(g (tensor '(((1 1) (1 1))))))
(prn p)
($gs! p g)
(prn ($gradient c))
(prn ($gradient k)))
;; compare 2 implementations - one from th and the other from thnn
(let* ((x (tensor '((1 2 3 4)
(2 3 4 1)
(2 4 1 2)
(4 1 2 3))))
(k (tensor '((1 1)
(1 1))))
(c ($conv2 x k))
(g (tensor '((1 1 1)
(1 1 1)
(1 1 1)))))
(prn c)
(prn ($conv2 x g))
(prn ($xcorr2 g k :full))
(prn ($add k ($mul ($conv2 x g) -0.01))))
(let* ((x ($parameter '(((1 2 3 4)
(2 3 4 1)
(2 4 1 2)
(4 1 2 3)))))
(k ($parameter '((((1 1)
(1 1))))))
(c ($conv2d x k))
(g (tensor '(((1 1 1)
(1 1 1)
(1 1 1))))))
(prn c)
($gs! c g)
(prn ($gradient k))
(prn ($gradient x))
($gd! (list x k) 0.01)
(prn k))
(let* ((x ($parameter '((1 2 3 4)
(2 3 4 1)
(2 4 1 2)
(4 1 2 3))))
(k ($parameter '((1 1)
(1 1))))
(c ($conv2 x k))
(g (tensor '((1 1 1)
(1 1 1)
(1 1 1)))))
(prn c)
($gs! c g)
(prn ($gradient k))
(prn ($gradient x))
($gd! (list x k) 0.01)
(prn k))
;; conv2, xcorr2 test - every prn should be 0.0
(let* ((x (tensor '((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))))
(k (tensor '((1 2 3) (4 5 6) (7 8 9))))
(ki ($clone k))
(ks ($storage k))
(kis ($storage ki)))
(loop :for i :from (1- ($count ks)) :downto 0
:do (setf ($ kis (- ($count ks) i 1)) ($ ks i)))
(let ((imvc ($conv2 x k))
(imvc2 ($conv2 x k :valid))
(imfc ($conv2 x k :full))
(imvx ($xcorr2 x ki))
(imvx2 ($xcorr2 x ki :valid))
(imfx ($xcorr2 x ki :full)))
(prn ($sum ($sub imvc imvc2)))
(prn ($sum ($sub imvc imvx)))
(prn ($sum ($sub imvc imvx2)))
(prn ($sum ($sub imfc imfx)))
(prn (- ($dot x x) ($ ($xcorr2 x x) 0 0)))
(let ((xx (tensor 2 ($size x 0) ($size x 1)))
(kk (tensor 2 ($size k 0) ($size k 1))))
($copy! ($ xx 0) x)
($copy! ($ xx 1) x)
($copy! ($ kk 0) k)
($copy! ($ kk 1) k)
(let ((immvc ($conv2 xx kk))
(immvc2 ($conv2 xx kk :valid))
(immfc ($conv2 xx kk :full)))
(prn ($sum ($sub ($ immvc 0) ($ immvc 1))))
(prn ($sum ($sub ($ immvc 0) imvc)))
(prn ($sum ($sub ($ immvc2 0) imvc2)))
(prn ($sum ($sub ($ immfc 0) ($ immfc 1))))
(prn ($sum ($sub ($ immfc 0) imfc)))))))
;; conv3, xcorr3 test - should be 0.0 as well
(let* ((x (tensor '(((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))
((9 8 7 6) (8 7 6 5) (7 6 5 4) (6 5 4 3)))))
(k (tensor '(((1 2 3) (2 3 4) (3 4 5))
((9 8 7) (8 7 6) (7 6 5)))))
(ki ($clone k))
(ks ($storage k))
(kis ($storage ki)))
(loop :for i :from (1- ($count ks)) :downto 0
:do (setf ($ kis (- ($count ks) i 1)) ($ ks i)))
(let ((imvc ($conv3 x k))
(imvc2 ($conv3 x k :valid))
(imfc ($conv3 x k :full))
(imvx ($xcorr3 x ki))
(imvx2 ($xcorr3 x ki :valid))
(imfx ($xcorr3 x ki :full)))
(prn ($sum ($sub imvc imvc2)))
(prn ($sum ($sub imvc imvx)))
(prn ($sum ($sub imvc imvx2)))
(prn ($sum ($sub imfc imfx)))
(prn (- ($dot x x) ($ ($xcorr3 x x) 0 0 0)))
(let ((xx (tensor 2 ($size x 0) ($size x 1) ($size x 2)))
(kk (tensor 2 ($size k 0) ($size k 1) ($size k 2))))
($copy! ($ xx 0) x)
($copy! ($ xx 1) x)
($copy! ($ kk 0) k)
($copy! ($ kk 1) k)
(let ((immvc ($conv3 xx kk))
(immvc2 ($conv3 xx kk :valid))
(immfc ($conv3 xx kk :full)))
(prn ($sum ($sub ($ immvc 0) ($ immvc 1))))
(prn ($sum ($sub ($ immvc 0) imvc)))
(prn ($sum ($sub ($ immvc2 0) imvc2)))
(prn ($sum ($sub ($ immfc 0) ($ immfc 1))))
(prn ($sum ($sub ($ immfc 0) imfc)))))))
;; xcorr3 and xcorr2, valid
(let* ((x (tensor '(((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))
((9 8 7 6) (8 7 6 5) (7 6 5 4) (6 5 4 3)))))
(k (tensor '(((1 2 3) (2 3 4) (3 4 5))
((9 8 7) (8 7 6) (7 6 5)))))
(o3 ($xcorr3 x k))
(o32 ($zero o3)))
(loop :for i :from 0 :below ($size o3 0)
:do (loop :for j :from 0 :below ($size k 0)
:do ($add! ($ o32 i) ($xcorr2 ($ x (+ i j)) ($ k j)))))
(prn ($sum ($sub o3 o32))))
;; xcorr3 and xcorr2, full
(let* ((x (tensor '(((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))
((9 8 7 6) (8 7 6 5) (7 6 5 4) (6 5 4 3)))))
(k (tensor '(((1 2 3) (2 3 4) (3 4 5))
((9 8 7) (8 7 6) (7 6 5)))))
(o3 ($xcorr3 x k :full))
(o32 ($zero o3)))
(loop :for i :from 0 :below ($size x 0)
:do (loop :for j :from 0 :below ($size k 0)
:do ($add! ($ o32 (+ i j)) ($xcorr2 ($ x i) ($ k (- ($size k 0) j 1)) :full))))
(prn ($sum ($sub o3 o32))))
;; conv3 and conv2, valid
(let* ((x (tensor '(((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))
((9 8 7 6) (8 7 6 5) (7 6 5 4) (6 5 4 3)))))
(k (tensor '(((1 2 3) (2 3 4) (3 4 5))
((9 8 7) (8 7 6) (7 6 5)))))
(o3 ($conv3 x k))
(o32 ($zero o3)))
(loop :for i :from 0 :below ($size o3 0)
:do (loop :for j :from 0 :below ($size k 0)
:do ($add! ($ o32 i) ($conv2 ($ x (+ i j)) ($ k (- ($size k 0) j 1))))))
(prn ($sum ($sub o3 o32))))
;; conv3 and conv2, full
(let* ((x (tensor '(((1 2 3 4) (2 3 4 5) (3 4 5 6) (4 5 6 7))
((9 8 7 6) (8 7 6 5) (7 6 5 4) (6 5 4 3)))))
(k (tensor '(((1 2 3) (2 3 4) (3 4 5))
((9 8 7) (8 7 6) (7 6 5)))))
(o3 ($conv3 x k :full))
(o32 ($zero o3)))
(loop :for i :from 0 :below ($size x 0)
:do (loop :for j :from 0 :below ($size k 0)
:do ($add! ($ o32 (+ i j)) ($conv2 ($ x i) ($ k j) :full))))
(prn ($sum ($sub o3 o32))))
(let* ((inp 5)
(kw 3)
(dw 1)
(w (rnd inp 1 kw))
(b (rnd inp))
(x (rnd 8 inp)))
(prn ($rowconv1d x w b kw dw)))
(let* ((inp 5)
(outp 1)
(kw 1)
(dw 1)
(w (rnd outp (* inp kw)))
(b (rnd outp))
(x (rnd 7 inp)))
(prn ($conv1d x w b kw dw)))
| 10,925
|
Common Lisp
|
.lisp
| 295
| 27.132203
| 97
| 0.387821
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
4d5ca73af5e86500945d096bc4790e571cfa794245b0e4efb2b78aeac1be8e14
| 3,230
|
[
-1
] |
3,231
|
file.lisp
|
chunsj_TH/examples/intro/file.lisp
|
(defpackage th.file-examples
(:use #:common-lisp
#:mu
#:th))
(in-package :th.file-examples)
(let ((f (file.disk "thfile.dat" "w"))
(s (storage.double '(1 2 3 4))))
($fwrite s f)
($fclose f))
(let ((f (file.disk "thfile.dat" "r"))
(s (storage.double)))
($fread s f)
($fclose f)
(print s))
(let ((f (file.disk "thfile.dat" "w"))
(s (storage.double '(1 2 3 4))))
(setf ($fbinaryp f) t)
($fwrite s f)
($fclose f))
(let ((f (file.disk "thfile.dat" "r"))
(s (storage.double)))
(setf ($fbinaryp f) t)
($fread s f)
($fclose f)
(print s))
(let ((f (file.disk "thfile.dat" "w"))
(x (tensor.double '((1 2) (3 4)))))
($fwrite x f)
($fclose f))
(let ((f (file.disk "thfile.dat" "r"))
(x (tensor.double)))
($fread x f)
($fclose f)
(print x))
(let ((f (file.disk "thfile.dat" "w"))
(x (tensor.double '((1 2) (3 4)))))
(setf ($fbinaryp f) t)
($fwrite x f)
($fclose f))
(let ((f (file.disk "thfile.dat" "r"))
(x (tensor.double)))
(setf ($fbinaryp f) t)
($fread x f)
($fclose f)
(print x))
(let ((f (file.disk "thfile.dat" "w"))
(x (tensor)))
($fwrite x f)
($fclose f))
(let ((f (file.disk "thfile.dat" "r"))
(x (tensor)))
($fread x f)
($fclose f)
(print x))
| 1,288
|
Common Lisp
|
.lisp
| 54
| 20.12963
| 41
| 0.533933
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
e4e0340da411e9f984c419777abab450b227fdc003a20f76d98ba1d979fd08b6
| 3,231
|
[
-1
] |
3,232
|
etc.lisp
|
chunsj_TH/examples/intro/etc.lisp
|
(defpackage :etc-examples
(:use #:common-lisp
#:mu
#:th))
;; some examples from numcl project
;; https://github.com/numcl/numcl/blob/master/example.lisp
(in-package :etc-examples)
;; creation
(prn (arange 0 10))
;; reshaping
(prn ($reshape (arange 0 10) 2 5))
;; arange with negative step
(prn (arange 10 -10 -3))
;; concatenation
(prn ($cat (zeros 10) (ones 10)))
(prn ($cat ($reshape (zeros 10) 2 5)
($reshape (ones 10) 2 5)))
(prn ($cat ($reshape (zeros 10) 2 5)
($reshape (ones 10) 2 5)
1))
| 552
|
Common Lisp
|
.lisp
| 20
| 23.75
| 58
| 0.61597
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
4306073bef04dcc4d281753238cb5423b03fb8eb828347c6f9f3a4fe41ea0350
| 3,232
|
[
-1
] |
3,233
|
bp.lisp
|
chunsj_TH/examples/intro/bp.lisp
|
(defpackage th.ad-example
(:use #:common-lisp
#:mu
#:th))
(in-package :th.ad-example)
;; broadcast
(let* ((x ($parameter 5))
(y (tensor '(1 2 3)))
(out ($broadcast x y)))
($gs! out (tensor '(1 2 3)))
(prn ($gradient x)))
(let* ((a (tensor '(5 5 5)))
(c ($parameter 5))
(out ($broadcast c a)))
($gs! out)
(prn out)
(prn "3" ($gradient c)))
;; add
(let* ((a ($parameter (tensor '(1 1 1))))
(b ($parameter (tensor '(1 1 1))))
(out ($add a b)))
($gs! out (tensor '(1 2 3)))
(prn ($gradient a))
(prn ($gradient b)))
(let* ((a ($parameter '(1 1 1)))
(b ($parameter '(1 1 1)))
(out ($+ a b)))
($gs! out (tensor '(1 2 3)))
(prn ($gradient a))
(prn ($gradient b)))
;; sub
(let* ((x (tensor '(1 2 3)))
(y ($parameter (tensor '(3 2 1))))
(out ($sub x y)))
($gs! out (tensor '(1 1 1)))
(prn ($gradient y)))
(let* ((x ($parameter '(1 2 3)))
(y ($parameter '(3 2 1)))
(out ($- x y)))
($gs! out (tensor '(1 1 1)))
(prn ($gradient x))
(prn ($gradient y)))
;; dot
(let* ((x ($parameter (tensor '(1 2 3))))
(y (tensor '(1 2 3)))
(out ($dot x y)))
(prn out)
(prn ($gradient x)))
;; update
(let* ((a (tensor '(1 1 1)))
(b ($parameter (tensor '(1 2 3))))
(out ($dot a b)))
(prn out)
($gd! b)
(prn b))
(let* ((a (tensor '(1 1 1)))
(b ($parameter '(1 2 3)))
(out ($@ a b)))
(prn out)
($gd! b)
(prn b))
;; mv
(let* ((X (tensor '((1) (3))))
(b ($parameter '(10)))
(out ($mv X b)))
(prn out)
(prn ($gradient b)))
(let* ((m ($parameter (tensor '((2 0) (0 2)))))
(v (tensor '(2 3)))
(out ($mv m v)))
($gs! out)
(prn (tensor '((2.0 3.0) (2.0 3.0))))
(prn ($gradient m)))
(let* ((a ($parameter '((1 1 1) (1 1 1))))
(b ($parameter '((0.1) (0.1) (0.1))))
(c ($mm a b)))
($gs! c)
(prn ($gradient a))
(prn ($gradient b)))
(let* ((a ($parameter '((1 1 1) (1 1 1))))
(b ($parameter '((0.1) (0.1) (0.1))))
(out ($sigmoid ($mm a b))))
($gs! out)
(prn "2.44458...")
(prn ($gradient a)))
;; update effect for sum
(let* ((x ($parameter '((1 2 3) (4 5 6) (7 8 9))))
(y 40)
(out ($sum x))
(delta ($sub out y))
(loss ($dot delta delta)))
(prn out)
(prn loss)
($gs! loss)
(prn ($gradient x))
($gd! x 0.01)
(prn x)
(prn "decreased")
(prn ($sum x)))
;; mean
(let* ((x ($parameter '((1 2 3) (4 5 6) (7 8 9))))
(y 6)
(out ($mean x)))
(prn out)
(loop :for i :from 1 :to 50
:for y* = ($mean x)
:for d = ($sub y* y)
:for l = ($dot d d)
:do (progn
($gs! l)
($gd! x 0.9)))
(prn x)
(prn "closer to 6")
(prn ($mean x)))
;; max
(let* ((x ($parameter '((1 2) (3 4) (5 6))))
(y 5)
(out ($max x)))
(prn out)
(loop :for i :from 1 :to 50
:for y* = ($max x)
:for d = ($sub y* y)
:for l = ($dot d d)
:do (progn
($gs! l)
($gd! x 0.1)))
(prn x)
(prn "closer to 5")
(prn ($max x)))
;; min
(let* ((x ($parameter '((1 2) (3 4) (5 6))))
(y 5)
(out ($min x)))
(prn out)
(loop :for i :from 1 :to 50
:for y* = ($min x)
:for d = ($sub y* y)
:for l = ($dot d d)
:do (progn
($gs! l)
($gd! x 0.5)))
(prn x)
(prn "closer to 5")
(prn ($min x)))
;; reshape
(let* ((x ($parameter '((1 2) (3 4))))
(a (tensor '(1 2 3 4)))
(y 20)
(out ($dot ($reshape x 1 4) a))
(delta ($sub out y))
(loss ($dot delta delta)))
(prn out)
(prn delta)
(prn loss)
($gs! loss)
(prn ($gradient x))
(prn x)
($gd! x)
(prn x)
(prn "supposed to be decreased")
(prn ($sub ($dot ($reshape x 1 4) a) y)))
;; with transpose
(let* ((x ($parameter '((1 2) (3 4))))
(a (tensor '(1 2 3 4)))
(y 20)
(out ($dot ($reshape ($transpose x) 1 4) a))
(delta ($sub out y))
(loss ($dot delta delta)))
(prn out)
(prn loss)
($gs! loss)
(prn ($gradient x))
($gd! x)
(prn x)
(prn ($dot ($reshape ($transpose x) 1 4) a))
(loop :for i :from 1 :to 10
:for y* = ($dot ($reshape ($transpose x) 1 4) a)
:for d = ($sub y* y)
:for l = ($dot d d)
:do (progn
($gs! l)
($gd! x 0.01)))
(prn x)
(prn ($dot ($reshape ($transpose x) 1 4) a)))
;; from chainer ad example
(let* ((x ($parameter '(5)))
(y ($+ ($expt x 2) ($* -2 x) 1))
(y1 ($expt x 2))
(y2 ($* -2 x))
(y3 ($+ y1 y2 1)))
(prn y)
($gs! y)
(prn "DY/DX" ($gradient x))
(prn y3)
($gs! y3)
(prn "DY3/DX" ($gradient x)))
(let* ((x ($parameter '(5)))
(z ($* -2 x))
(y ($+ ($expt x 2) z 1)))
(prn y)
($gs! y)
(prn "DY/DX" ($gradient x)))
(let* ((x ($parameter '((1 2 3) (4 5 6))))
(y ($+ ($expt x 2) ($* -2 x) 1)))
(prn y)
($gs! y)
(prn "DY/DX" ($gradient x)))
;; supports function
(defun muladd (x y z) ($+ ($* x y) z))
(let* ((x ($parameter ($- ($* 2 (rnd 3 2)) 1)))
(y ($parameter ($- ($* 2 (rnd 3 2)) 1)))
(z ($parameter ($- ($* 2 (rnd 3 2)) 1)))
(r (muladd x y z)))
(prn r)
($gs! r)
(prn "X" x)
(prn "Y" y)
(prn "DR/DX=Y" ($gradient x))
(prn "DR/DY=X" ($gradient y))
(prn "DR/DZ=1" ($gradient z)))
;; linear mapping
(let* ((X (tensor '((1) (3))))
(Y (tensor '(-10 -30)))
(c ($parameter 0))
(b ($parameter '(10))))
(loop :for i :from 0 :below 2000
:do (let* ((d ($sub ($add ($mv X b) ($broadcast c Y)) Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c)
($gd! b)))
(prn b))
(let* ((X (tensor '((1) (3))))
(Y (tensor '(-10 -30)))
(c ($parameter 0))
(b ($parameter '(10))))
(loop :for i :from 0 :below 2000
:do (let* ((d ($- ($+ ($@ X b) ($broadcast c Y)) Y))
(out ($@ d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c)
($gd! b)))
(prn b))
(let* ((X ($transpose! (range 0 10)))
(Y (range 0 10))
(c ($parameter 0))
(b ($parameter '(0)))
(a 0.001))
(loop :for i :from 0 :below 2000
:do (let* ((Y* ($add ($mv X b) ($broadcast c Y)))
(d ($sub Y* Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c a)
($gd! b a)))
(prn b))
(let* ((X ($transpose! (range 0 10)))
(Y (range 0 10))
(c ($parameter 0))
(b ($parameter '(0)))
(a 0.001))
(loop :for i :from 0 :below 2000
:do (let* ((Y* ($+ ($@ X b) ($broadcast c Y)))
(d ($- Y* Y))
(out ($@ d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c a)
($gd! b a)))
(prn b))
(let* ((X (-> (tensor '((1 1 2)
(1 3 1)))
($transpose!)))
(Y (tensor '(1 2 3)))
(c ($parameter 0))
(b ($parameter '(1 1)))
(a 0.05))
(loop :for i :from 0 :below 1000
:do (let* ((d ($sub ($add ($mv X b) ($broadcast c Y)) Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c a)
($gd! b a)))
(prn b)
(prn c))
(let* ((X (-> (tensor '((1 1 2)
(1 3 1)))
($transpose!)))
(Y (tensor '(1 2 3)))
(c ($parameter 0))
(b ($parameter '(1 1)))
(a 0.05))
(loop :for i :from 0 :below 1000
:do (let* ((d ($- ($+ ($@ X b) ($broadcast c Y)) Y))
(out ($@ d d)))
(when (zerop (mod i 100)) (prn (list i ($data out))))
($gd! c a)
($gd! b a)))
(prn b)
(prn c))
;; regressions
(let* ((X (-> (tensor '(1 3))
($transpose!)))
(Y (tensor '(-10 -30)))
(c ($parameter 0))
(b ($parameter (tensor '(10))))
(a 0.02))
(loop :for i :from 0 :below 1000
:do (let* ((d ($sub ($add ($mv X b) ($broadcast c Y)) Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! c a)
($gd! b a)))
(prn b)
(prn ($add ($mv X b) ($broadcast c Y))))
(let* ((X (-> (tensor '(1 3))
($transpose!)))
(Y (tensor '(-10 -30)))
(c ($parameter 0))
(b ($parameter '(10)))
(a 0.02))
(loop :for i :from 0 :below 1000
:do (let* ((d ($- ($+ ($@ X b) ($broadcast c Y)) Y))
(out ($@ d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! c a)
($gd! b a)))
(prn ($+ ($@ X b) ($broadcast c Y))))
(let* ((X (tensor '((5 2) (-1 0) (5 2))))
(Y (tensor '(1 0 1)))
(c ($parameter (tensor '(0 0 0))))
(b ($parameter (tensor '(0 0))))
(a 0.1))
(loop :for i :from 0 :below 1000
:do (let* ((Y* ($sigmoid ($add ($mv X b) c)))
(out ($bce Y* Y)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! c a)
($gd! b a)))
(prn ($sigmoid ($add ($mv X b) c))))
;; xor
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 1.0))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)))
(prn w1)
(prn w2)
(prn (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2))))
l2)))
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(b1 ($parameter (zeros 3)))
(b2 ($parameter (ones 1)))
(o1 (ones 4))
(o2 (ones 4))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 1))
(loop :for i :from 0 :below 1000
:do (let* ((xw1 ($mm X w1))
(xwb1 ($add xw1 ($vv o1 b1)))
(l1 ($sigmoid xwb1))
(lw2 ($mm l1 w2))
(lwb2 ($add lw2 ($vv o2 b2)))
(l2 ($sigmoid lwb2))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)
($gd! b1 a)
($gd! b2 a)))
(prn w1)
(prn b1)
(prn w2)
(prn (let* ((l1 ($sigmoid ($add ($mm X w1) ($vv o1 b1))))
(l2 ($sigmoid ($add ($mm l1 w2) ($vv o2 b2)))))
l2)))
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(b1 ($parameter (ones 3)))
(b2 ($parameter (ones 1)))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 5))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)
($gd! b1 a)
($gd! b2 a)))
(prn (let* ((l1 ($sigmoid ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2))))
l2)))
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($tanh ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)))
(prn (let* ((l1 ($tanh ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2))))
l2)))
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(b1 ($parameter (ones 3)))
(b2 ($parameter (ones 1)))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($tanh ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)
($gd! b1 a)
($gd! b2 a)))
(prn (let* ((l1 ($tanh ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2))))
l2)))
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(b1 ($parameter (ones 3)))
(b2 ($parameter (ones 1)))
(o1 (ones 4))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 0.2))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($tanh ($xwpb X w1 b1 o1)))
(l2 ($sigmoid ($xwpb l1 w2 b2 o1)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($gd! w1 a)
($gd! w2 a)
($gd! b1 a)
($gd! b2 a)))
(prn (let* ((l1 ($tanh ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2))))
l2)))
;; momentum
(let* ((w1 ($parameter (rndn 2 3)))
(w2 ($parameter (rndn 3 1)))
(b1 ($parameter (ones 3)))
(b2 ($parameter (ones 1)))
(o1 (ones 4))
(X (tensor '((0 0) (0 1) (1 0) (1 1))))
(Y (tensor '(0 1 1 0)))
(a 0.2))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($tanh ($xwpb X w1 b1 o1)))
(l2 ($sigmoid ($xwpb l1 w2 b2 o1)))
(d ($sub l2 Y))
(out ($dot d d)))
(when (zerop (mod i 100)) (prn ($data out)))
($mgd! w1 a)
($mgd! w2 a)
($mgd! b1 a)
($mgd! b2 a)))
(prn (let* ((l1 ($tanh ($xwpb X w1 b1)))
(l2 ($sigmoid ($xwpb l1 w2 b2))))
l2)))
(defun fwd (input weight) ($sigmoid! ($@ input weight)))
(defun dwb (delta output) ($* delta output ($- 1 output)))
(let* ((X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '((0) (1) (1) (0))))
(w1 (rndn 3 3))
(w2 (rndn 3 1))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 (fwd X w1))
(l2 (fwd l1 w2))
(l2d (dwb ($- l2 y) l2))
(l1d (dwb ($@ l2d ($transpose w2)) l1))
(dw2 ($@ ($transpose l1) l2d))
(dw1 ($@ ($transpose X) l1d)))
($sub! w1 ($* lr dw1))
($sub! w2 ($* lr dw2))))
(prn (fwd (fwd X w1) w2)))
(let* ((w1 ($parameter (rndn 3 3)))
(w2 ($parameter (rndn 3 1)))
(X (tensor '((0 0 1) (0 1 1) (1 0 1) (1 1 1))))
(Y (tensor '(0 1 1 0)))
(lr 1))
(loop :for i :from 0 :below 1000
:do (let* ((l1 ($sigmoid ($mm X w1)))
(l2 ($sigmoid ($mm l1 w2)))
(d ($sub l2 Y))
(out ($dot d d)))
($gs! out 1)
($gd! w1 lr)
($gd! w2 lr)))
(prn ($sigmoid ($mm ($sigmoid ($mm X w1)) w2))))
| 15,621
|
Common Lisp
|
.lisp
| 517
| 22.073501
| 67
| 0.398964
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
11e00705df677ef445f24c70dd7da144a7e308b07c7a6093a5338e1a1173fee7
| 3,233
|
[
-1
] |
3,234
|
dcgan2-layers.lisp
|
chunsj_TH/examples/gan/dcgan2-layers.lisp
|
;; https://medium.com/@jonathan_hui/gan-dcgan-deep-convolutional-generative-adversarial-networks-df855c438f
(defpackage :dcgan2-layers
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.layers
#:th.db.mnist))
(in-package :dcgan2-layers)
(defun build-batches (batch-size batch-count)
(let ((mnist (read-mnist-data)))
(loop :for i :from 0 :below batch-count
:for s = (* i batch-size)
:for e = (* (1+ i) batch-size)
:for r = (loop :for k :from s :below e :collect k)
:collect ($contiguous! ($reshape! ($- ($* 2 ($index ($ mnist :train-images) 0 r)) 1)
batch-size 1 28 28)))))
(defparameter *batch-size* 120)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-batches* (build-batches *batch-size* *batch-count*))
(defparameter *latent-dim* 100)
(defparameter *generator* (sequential-layer
(reshape-layer *latent-dim* 1 1)
(full-convolution-2d-layer *latent-dim* 128 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 128 64 3 3
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 64 32 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 32 1 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :tanh)))
(defparameter *discriminator* (sequential-layer
(convolution-2d-layer 1 64 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 64 32 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 32 16 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 16 1 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :sigmoid)
(reshape-layer 1)))
(defparameter *lr* 1E-3)
(defparameter *real-labels* (ones *batch-size*))
(defparameter *fake-labels* (zeros *batch-size*))
(defun optim (model)
($amgd! model *lr* 0.5 0.999)
($cg! *generator*)
($cg! *discriminator*))
(defun generate (&key (trainp t))
"generates fake images from random normal inputs"
($execute *generator* (rndn *batch-size* *latent-dim*) :trainp trainp))
(defun discriminate (xs &key (trainp t))
"check whether inputs are real or fake"
($execute *discriminator* xs :trainp trainp))
(defun train-discriminator (xs &optional verbose)
"teaching discriminator how to discriminate reals from fakes"
(let* ((fake-scores (discriminate (generate)))
(real-scores (discriminate xs))
(fake-loss ($bce fake-scores *fake-labels*))
(real-loss ($bce real-scores *real-labels*))
(dloss ($+ fake-loss real-loss)))
(when verbose (prn " DL:" (if ($parameterp dloss) ($data dloss) dloss)))
(optim *discriminator*)))
(defun train-generator (&optional verbose)
"teaching generator how to create more real fakes"
(let* ((fake-scores (discriminate (generate)))
(gloss ($bce fake-scores *real-labels*)))
(when verbose (prn " GL:" (if ($parameterp gloss) ($data gloss) gloss)))
(optim *generator*)))
(defun write-tensor-at (img x y tx)
(let* ((h ($size tx 1))
(w ($size tx 2))
(sx (* x w))
(sy (* y h)))
(loop :for j :from 0 :below h
:do (loop :for i :from 0 :below w
:for v = ($ tx 0 j i)
:for px = (round (* 255 (* 0.5 (1+ v))))
:do (setf (aref img (+ sy j) (+ sx i)) px)))))
(defun outpngs (data fname)
(let* ((h ($size ($ data 0) 1))
(w ($size ($ data 0) 2))
(dn ($size data 0))
(n (ceiling (sqrt dn)))
(nc n)
(nr (ceiling (/ dn n)))
(img (opticl:make-8-bit-gray-image (* nr h) (* nc w))))
(loop :for sy :from 0 :below nr
:do (loop :for sx :from 0 :below nc
:for idx = (+ (* sy nr) sx)
:for tx = (when (< idx dn) ($ data idx))
:when tx
:do (write-tensor-at img sx sy tx)))
(opticl:write-png-file fname img)))
(defun train (xs epoch idx)
(let ((verbose (zerop (rem idx 50))))
(when verbose (prn "EPOCH/IDX>" epoch ":" idx))
(loop :for k :from 0 :below 1
:do (train-discriminator xs verbose))
(train-generator verbose)
(when (zerop (rem idx 500))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/~A-~A.png" (namestring (user-homedir-pathname))
epoch idx)))
(outpngs generated fname)))
(when verbose (th::th-get-current-heap-size))))
(defparameter *epochs* 20)
($reset! *generator*)
($reset! *discriminator*)
(time
(loop :for epoch :from 1 :to *epochs*
:do (loop :for xs :in *mnist-batches*
:for idx :from 0
:do (train xs epoch idx))))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/images.png" (namestring (user-homedir-pathname)))))
(outpngs generated fname))
(let ((xs (car *mnist-batches*))
(fname (format nil "~A/Desktop/ixs.png" (namestring (user-homedir-pathname)))))
(outpngs xs fname))
(gcf)
(setf *epochs* 1)
| 7,749
|
Common Lisp
|
.lisp
| 147
| 33.387755
| 107
| 0.463122
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
b8dc5ef3baba7785d5b33edc48699c6ce2da87c603fc75e8567190c3b31ac850
| 3,234
|
[
-1
] |
3,235
|
wgan.lisp
|
chunsj_TH/examples/gan/wgan.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2017/02/04/wasserstein-gan/
(defpackage :wgan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :wgan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun lossd (dr df) ($neg ($- ($mean dr) ($mean df))))
(defun lossg (df) ($neg ($mean df)))
(defun optm (params) ($rmgd! params 5E-5))
(defun outpng (data fname &optional (w 28) (h 28))
(let ((img (opticl:make-8-bit-gray-image w h))
(d ($reshape data w h)))
(loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img i j) (round (* 255 ($ d i j)))))))
(opticl:write-png-file fname img)))
;; training data - uses batches for performance, 30, 60 works well
(defparameter *batch-size* 60)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *gen-size* 10)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list *gen-size* *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
;; you can apply leaky relu, $lrelu
(defun generate (z)
(-> z
($affine *gw1* *gb1* *os*)
($relu)
($affine *gw2* *gb2* *os*)
($clamp -10 10)
($sigmoid)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list *img-size* *hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
;; you can apply leaky relu, $lrelu
(defun discriminate (x)
(-> x
($affine *dw1* *db1* *os*)
($relu)
($affine *dw2* *db2* *os*)))
(defun samplez () (rndn *batch-size* *gen-size*))
(defparameter *epoch* 100)
(defparameter *k* 5)
($cg! *discriminator*)
($cg! *generator*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(defun clipwv ()
(loop :for v :in (th::$parameters *discriminator*)
:do ($clamp ($data v) -0.01 0.01)))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for bidx :from 0
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
(clipwv)
($cg! *discriminator*)
($cg! *generator*)))
;; generator
(let* ((df (discriminate (generate z)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*))
(when (zerop (rem bidx 100))
(prn " D/L:" bidx dlv dgv))))
(when (zerop (rem epoch 1))
(let ((g (generate (samplez))))
($cg! *discriminator*)
($cg! *generator*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss *train-count* *k*) (/ gloss *train-count*)))))
(defun outpngs25 (data81 fname &optional (w 28) (h 28))
(let* ((n 5)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(datas (mapcar (lambda (data) ($reshape data w h)) data81)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ datas (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let ((generated ($data (generate (samplez)))))
(outpngs25 (loop :for i :from 0 :below 25
:collect ($index generated 0 i))
(format nil "~A/49.png" *output*))
($cg! *discriminator*)
($cg! *generator*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 6,204
|
Common Lisp
|
.lisp
| 146
| 31.205479
| 89
| 0.500249
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
f29f2835f1363a5e6d17ec9d9729abef44e1b60c944fe37df18bc5a49fdd5a4d
| 3,235
|
[
-1
] |
3,236
|
regan.lisp
|
chunsj_TH/examples/gan/regan.lisp
|
;; https://ajolicoeur.wordpress.com/RelativisticGAN/
(defpackage :regan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.layers
#:th.db.mnist))
(in-package :regan)
(defun build-batches (batch-size batch-count)
(let ((mnist (read-mnist-data)))
(loop :for i :from 0 :below batch-count
:for s = (* i batch-size)
:for e = (* (1+ i) batch-size)
:for r = (loop :for k :from s :below e :collect k)
:collect ($contiguous! ($reshape! ($- ($* 2 ($index ($ mnist :train-images) 0 r)) 1)
batch-size 1 28 28)))))
(defparameter *batch-size* 120)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-batches* (build-batches *batch-size* *batch-count*))
(defparameter *latent-dim* 100)
(defparameter *generator* (sequential-layer
(reshape-layer *latent-dim* 1 1)
(full-convolution-2d-layer *latent-dim* 128 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 128 64 3 3
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 64 32 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 32 1 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :tanh)))
(defparameter *discriminator* (sequential-layer
(convolution-2d-layer 1 64 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 64 32 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 32 16 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 16 1 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :nil)
(reshape-layer 1)))
(defparameter *lr* 1E-3)
(defparameter *real-labels* (ones *batch-size*))
(defun optim (model)
($amgd! model *lr* 0.5 0.999)
($cg! *generator*)
($cg! *discriminator*))
(defun generate (&key (trainp t))
"generates fake images from random normal inputs"
($execute *generator* (rndn *batch-size* *latent-dim*) :trainp trainp))
(defun discriminate (xs &key (trainp t))
"check whether inputs are real or fake"
($execute *discriminator* xs :trainp trainp))
(defun train-discriminator (xs &optional verbose)
"teaching discriminator how to discriminate reals from fakes"
(let* ((fake-scores (discriminate (generate)))
(real-scores (discriminate xs))
(dloss ($bce* ($- real-scores fake-scores) *real-labels*)))
(when verbose (prn " DL:" (if ($parameterp dloss) ($data dloss) dloss)))
(optim *discriminator*)))
(defun train-generator (xs &optional verbose)
"teaching generator how to create more real fakes"
(let* ((fake-scores (discriminate (generate)))
(real-scores (discriminate xs))
(gloss ($bce* ($- fake-scores real-scores) *real-labels*)))
(when verbose (prn " GL:" (if ($parameterp gloss) ($data gloss) gloss)))
(optim *generator*)))
(defun train (xs epoch idx)
(let ((verbose (zerop (rem idx 50))))
(when verbose (prn "EPOCH/IDX>" epoch ":" idx))
(loop :for k :from 0 :below 1
:do (train-discriminator xs verbose))
(train-generator xs verbose)
(when (zerop (rem idx 500))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/~A-~A.png" (namestring (user-homedir-pathname))
epoch idx)))
(write-tensors-png-file generated fname)))
(when verbose (th::th-get-current-heap-size))))
(defparameter *epochs* 20)
($reset! *generator*)
($reset! *discriminator*)
(time
(loop :for epoch :from 1 :to *epochs*
:do (loop :for xs :in *mnist-batches*
:for idx :from 0
:do (train xs epoch idx))))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/images.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file generated fname))
(let ((xs (car *mnist-batches*))
(fname (format nil "~A/Desktop/ixs.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file xs fname))
(gcf)
(setf *epochs* 1)
| 6,678
|
Common Lisp
|
.lisp
| 120
| 34.45
| 94
| 0.466269
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
d3b7550dec4bbf5388854ff801967c5c8868081499e6a7e79fc069be9512c412
| 3,236
|
[
-1
] |
3,237
|
gan2.lisp
|
chunsj_TH/examples/gan/gan2.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2017/01/20/gan-pytorch/
;; with tanh generator
(defpackage :gan2
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :gan2)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun bced (dr df) ($+ ($bce dr ($one dr)) ($bce df ($zero df))))
(defun bceg (df) ($bce df ($one df)))
(defun lossd (dr df) (bced dr df))
(defun lossg (df) (bceg df))
(defun optm (params) ($amgd! params 1E-3))
(defun outpng (data fname &optional (w 28) (h 28))
(let ((img (opticl:make-8-bit-gray-image w h))
(d ($reshape data w h)))
(loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img i j) (round (* 255 (* 0.5 (+ 1 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; training data - uses batches for performance, it affects quantity as well; 60 works well
(defparameter *batch-size* 50)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($- ($* 2 ($index ($ *mnist* :train-images) 0 range)) 1))))
;; check range of -1 to 1
(prn (car *mnist-train-image-batches*))
(prn ($max (car *mnist-train-image-batches*)))
(prn ($min (car *mnist-train-image-batches*)))
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *gen-size* 100)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list *gen-size* *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
(defun generate (z)
(-> z
($affine *gw1* *gb1* *os*)
($lrelu 0.2)
($affine *gw2* *gb2* *os*)
($tanh)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list *img-size* *hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
(defun discriminate (x)
(-> x
($affine *dw1* *db1* *os*)
($lrelu 0.2)
($affine *dw2* *db2* *os*)
($clamp -10 10)
($sigmoid)))
(defun samplez () (rndn *batch-size* *gen-size*))
(defparameter *epoch* 20)
(defparameter *k* 1)
($cg! *discriminator*)
($cg! *generator*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for bidx :from 0
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *discriminator*)
($cg! *generator*)))
;; generator
(let* ((df (discriminate (generate z)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*))
(when (zerop (rem bidx 100))
(prn " D/L:" bidx dlv dgv))))
(when (zerop (rem epoch 1))
(let ((g (generate (samplez))))
($cg! *discriminator*)
($cg! *generator*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss *train-count*) (/ gloss *train-count*)))))
(defun outpngs (data fname &optional (w 28) (h 28))
(let* ((n 7)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(data (mapcar (lambda (data) ($reshape data w h)) data)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ data (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 (* 0.5 (+ 1 ($ d i j)))))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let ((generated (generate (samplez))))
(outpngs (loop :for i :from 0 :below 49
:collect ($index ($data generated) 0 i))
(format nil "~A/samples.png" *output*))
($cg! *discriminator*)
($cg! *generator*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 6,295
|
Common Lisp
|
.lisp
| 147
| 31.904762
| 97
| 0.504413
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
57415e35bdd05acfed25ae3a220c857c875de1223f7da6aced4e2d41c37946f8
| 3,237
|
[
-1
] |
3,238
|
lsgan.lisp
|
chunsj_TH/examples/gan/lsgan.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2017/03/02/least-squares-gan/
(defpackage :lsgan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :lsgan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun lossd (dr df) ($* 0.5 ($+ ($mean ($expt ($- dr ($one dr)) 2))
($mean ($expt df 2)))))
(defun lossg (df) ($* 0.5 ($mean ($expt ($- df ($one df)) 2))))
(defun optm (params) ($amgd! params 1E-3))
(defun outpng (data fname &optional (w 28) (h 28))
(let ((img (opticl:make-8-bit-gray-image w h))
(d ($reshape data w h)))
(loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img i j) (round (* 255 ($ d i j)))))))
(opticl:write-png-file fname img)))
;; training data - uses batches for performance, it affects quantity as well
(defparameter *batch-size* 30)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(setf *mnist* nil)
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *gen-size* 10)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list *gen-size* *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
(defun generate (z)
(-> z
($affine *gw1* *gb1* *os*)
($lrelu)
($affine *gw2* *gb2* *os*)
($clamp -10 10)
($sigmoid)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list *img-size* *hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
(defun discriminate (x)
(-> x
($affine *dw1* *db1* *os*)
($lrelu)
($affine *dw2* *db2* *os*)))
(defun samplez () (rndn *batch-size* *gen-size*))
(defparameter *epoch* 50)
(defparameter *k* 5)
($cg! *discriminator*)
($cg! *generator*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for bidx :from 0
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *discriminator*)
($cg! *generator*)))
;; generator
(let* ((df (discriminate (generate z)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*))
(when (zerop (rem bidx 100))
(prn " D/L:" bidx dlv dgv))))
(when (zerop (rem epoch 1))
(let ((g (generate (samplez))))
($cg! *discriminator*)
($cg! *generator*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss (* *k* *train-count*)) (/ gloss *train-count*)))))
(defun outpngs (data49 fname &optional (w 28) (h 28))
(let* ((n 4)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(datas (mapcar (lambda (data) ($reshape data w h)) data49)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ datas (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let ((generated (generate (samplez))))
(outpngs (loop :for i :from 0 :below 16
:collect ($index ($data generated) 0 i))
(format nil "~A/49.png" *output*))
($cg! *discriminator*)
($cg! *generator*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 6,100
|
Common Lisp
|
.lisp
| 141
| 31.687943
| 92
| 0.495363
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
dd6bed6ecadc5a72e5efab0ae041401df7b3ef8f967faf26d2407df71c7ccc2e
| 3,238
|
[
-1
] |
3,239
|
dcgan.lisp
|
chunsj_TH/examples/gan/dcgan.lisp
|
;; from
;; https://github.com/soumith/dcgan.torch
;; https://towardsdatascience.com/having-fun-with-deep-convolutional-gans-f4f8393686ed
(defpackage :dcgan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :dcgan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
;; png output directory
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
;; 7x7 png output function
(defun outpngs (data fname &optional (w 28) (h 28))
(let* ((n 7)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(data (mapcar (lambda (data) ($reshape data w h)) data)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ data (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 (* 0.5 (+ 1 ($ d i j)))))))))))
(opticl:write-png-file fname img)))
(defparameter *nz* 100)
(defparameter *imgw* 28)
(defparameter *imgh* 28)
(defparameter *nimg* (* *imgw* *imgh*))
(defparameter *hidden-size* 128)
(defparameter *batch-size* 120)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *generator* (parameters))
(defparameter *gw1* ($push *generator* (vxavier (list *nz* *nimg*))))
(defparameter *gb1* ($push *generator* (zeros *nimg*)))
(defparameter *gk2* ($push *generator* ($* 0.01 (rndn 16 32 4 4))))
(defparameter *gb2* ($push *generator* ($* 0.01 (rndn 32))))
(defparameter *gk3* ($push *generator* ($* 0.04 (rndn 32 1 4 4))))
(defparameter *gb3* ($push *generator* ($* 0.04 (rndn 1))))
(defun generate (z)
(let ((nbatch ($size z 0)))
(-> z
($affine *gw1* *gb1*)
($reshape nbatch 16 7 7) ;; 16 plane, 7x7
($selu)
($dconv2d *gk2* *gb2* 2 2 1 1) ;; 32 plane, 14x14
($selu)
($dconv2d *gk3* *gb3* 2 2 1 1) ;; 1 plane, 28x28
($tanh))))
;; generator shape checking
(let* ((nbatch 10)
(noise (rndn nbatch *nz*)))
($cg! *generator*)
(prn noise)
(prn (generate noise))
($cg! *generator*))
(defparameter *discriminator* (parameters))
(defparameter *dk1* ($push *discriminator* ($* 0.04 (rndn 32 1 4 4))))
(defparameter *db1* ($push *discriminator* ($* 0.04 (rndn 32))))
(defparameter *dk2* ($push *discriminator* ($* 0.01 (rndn 16 32 4 4))))
(defparameter *db2* ($push *discriminator* ($* 0.01 (rndn 16))))
(defparameter *dw3* ($push *discriminator* ($* 0.03 (rndn *nimg* *hidden-size*))))
(defparameter *db3* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw4* ($push *discriminator* ($* 0.04 (rndn *hidden-size* 1))))
(defparameter *db4* ($push *discriminator* (zeros 1)))
(defun discriminate (x)
(let ((nbatch ($size x 0)))
(-> x
($conv2d *dk1* *db1* 2 2 1 1) ;; 32 plane, 14x14
($lrelu)
($conv2d *dk2* *db2* 2 2 1 1) ;; 16 plane, 7x7
($selu)
($reshape nbatch *nimg*) ;; 1x784, flatten
($affine *dw3* *db3*)
($selu)
($affine *dw4* *db4*) ;; 1x1
($sigmoid))))
;; discriminator shape checking
(let* ((nbatch 10)
(x (rnd nbatch 1 *imgh* *imgw*)))
($cg! *discriminator*)
(prn x)
(prn (discriminate x))
($cg! *discriminator*))
(defun samplez () (rndn *batch-size* *nz*))
(defun bced (dr df) ($+ ($bce dr ($one dr)) ($bce df ($zero df))))
(defun bceg (df) ($bce df ($one df)))
(defun lossd (dr df) (bced dr df))
(defun lossg (df) (bceg df))
(defun optm (params) ($amgd! params 1E-3))
(defparameter *epoch* 20)
(defparameter *k* 1)
($cg! *generator*)
($cg! *discriminator*)
;; renormalize values between -1 and 1.
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($- ($* 2 ($index ($ *mnist* :train-images) 0 range)) 1))))
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *generator*)
($cg! *discriminator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for data :in *train-data-batches*
:for bidx :from 0
:for x = ($reshape data *batch-size* 1 *imgh* *imgw*)
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *generator*)
($cg! *discriminator*)))
;; generator
(let* ((df (discriminate (generate z)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *generator*)
($cg! *discriminator*))
(when (zerop (rem bidx 10))
(prn " D/G:" bidx dlv dgv))))
;; output at every epoch
(prn " LOSS:" epoch (/ dloss *train-count* *k*) (/ gloss *train-count*))
(let ((generated (generate (samplez))))
(outpngs (loop :for i :from 0 :below 49
:collect ($index ($data generated) 0 (random *batch-size*)))
(format nil "~A/samples-~A.png" *output* epoch))
($cg! *generator*)
($cg! *discriminator*)))))
;; generate samples
(let ((generated (generate (samplez))))
(outpngs (loop :for i :from 0 :below 49
:collect ($index ($data generated) 0 (random *batch-size*)))
(format nil "~A/samples.png" *output*))
($cg! *generator*)
($cg! *discriminator*))
;; check training data
(let ((x (car *train-data-batches*)))
(outpngs (loop :for i :from 0 :below 49
:collect ($index x 0 i))
(format nil "~A/images.png" *output*)))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 7,066
|
Common Lisp
|
.lisp
| 170
| 31.582353
| 97
| 0.514049
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
c6197ac36173da67159c3678684cce7c21dfaeabaff7fe9eb12d1959f1092a95
| 3,239
|
[
-1
] |
3,240
|
gan.lisp
|
chunsj_TH/examples/gan/gan.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2017/01/20/gan-pytorch/
(defpackage :gan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :gan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun bced (dr df) ($+ ($bce dr ($one dr)) ($bce df ($zero df))))
(defun bceg (df) ($bce df ($one df)))
(defun lossd (dr df) (bced dr df))
(defun lossg (df) (bceg df))
;; XXX cannot figure out why adam works best (adadelta does not work well)
(defun optm (params) ($amgd! params 1E-3))
(defun outpng (data fname) (write-tensor-png-file ($reshape data 28 28) fname))
;; training data - uses batches for performance, 30, 60 works well
(defparameter *batch-size* 60)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *gen-size* 100)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list *gen-size* *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
;; you can apply leaky relu, $lrelu
(defun generate (z)
(-> z
($affine *gw1* *gb1* *os*)
($relu)
($affine *gw2* *gb2* *os*)
($clamp -10 10)
($sigmoid)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list *img-size* *hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
;; you can apply leaky relu, $lrelu
(defun discriminate (x)
(-> x
($affine *dw1* *db1* *os*)
($relu)
($affine *dw2* *db2* *os*)
($clamp -10 10)
($sigmoid)))
(defun samplez () (rndn *batch-size* *gen-size*))
(defparameter *epoch* 50)
(defparameter *k* 1)
($cg! *discriminator*)
($cg! *generator*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for bidx :from 0
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *discriminator*)
($cg! *generator*)))
;; generator
(let* ((df (discriminate (generate z)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*))
(when (zerop (rem bidx 100))
(prn " D/L:" bidx dlv dgv))))
(when (zerop (rem epoch 1))
(let ((g (generate (samplez))))
($cg! *discriminator*)
($cg! *generator*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss *train-count*) (/ gloss *train-count*)))))
(defun outpngs49 (data81 fname &optional (w 28) (h 28))
(let* ((n 7)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(datas (mapcar (lambda (data) ($reshape data w h)) data81)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ datas (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let ((generated (generate (samplez))))
(outpngs49 (loop :for i :from 0 :below 49
:collect ($index ($data generated) 0 i))
(format nil "~A/49.png" *output*))
($cg! *discriminator*)
($cg! *generator*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 5,953
|
Common Lisp
|
.lisp
| 140
| 31.514286
| 89
| 0.508126
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
0c6ad5317397200910a63a1ad14b85844152d3e4861a8ea8ce9e56a45376f75e
| 3,240
|
[
-1
] |
3,241
|
lsgan-layers.lisp
|
chunsj_TH/examples/gan/lsgan-layers.lisp
|
(defpackage :lsgan-layers
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.layers
#:th.db.mnist))
(in-package :lsgan-layers)
(defun build-batches (batch-size batch-count)
(let ((mnist (read-mnist-data)))
(loop :for i :from 0 :below batch-count
:for s = (* i batch-size)
:for e = (* (1+ i) batch-size)
:for r = (loop :for k :from s :below e :collect k)
:collect ($contiguous! ($reshape! ($- ($* 2 ($index ($ mnist :train-images) 0 r)) 1)
batch-size 1 28 28)))))
(defparameter *batch-size* 120)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-batches* (build-batches *batch-size* *batch-count*))
(defparameter *latent-dim* 100)
(defparameter *generator* (sequential-layer
(reshape-layer *latent-dim* 1 1)
(full-convolution-2d-layer *latent-dim* 128 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 128 64 3 3
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 64 32 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(full-convolution-2d-layer 32 1 2 2
:stride-width 2 :stride-height 2
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :tanh)))
(defparameter *discriminator* (sequential-layer
(convolution-2d-layer 1 64 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 64 32 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 32 16 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(convolution-2d-layer 16 1 3 3
:weight-initializer :random-normal
:weight-initialization '(0 0.02)
:activation :selu)
(reshape-layer 1)))
(defparameter *lr* 4E-4)
(defun optim (model) ($amgd! model *lr* 0.5 0.999))
(defun generate (&key (trainp t))
"generates fake images from random normal inputs"
($execute *generator* (rndn *batch-size* *latent-dim*) :trainp trainp))
(defun discriminate (xs &key (trainp t))
"check whether inputs are real or fake"
($execute *discriminator* xs :trainp trainp))
(defun discriminator-loss (fake-scores real-scores)
($* 0.5 ($+ ($mean ($expt ($- real-scores 1) 2))
($mean ($expt fake-scores 2)))))
(defun train-discriminator (xs &optional verbose)
"teaching discriminator how to discriminate reals from fakes"
(let* ((fake-scores (discriminate (generate :trainp nil)))
(real-scores (discriminate xs))
(dloss (discriminator-loss fake-scores real-scores)))
(when verbose (prn " DL:" (if ($parameterp dloss) ($data dloss) dloss)))
(optim *discriminator*)))
(defun generator-loss (fake-scores)
($* 0.5 ($mean ($expt ($- fake-scores 1) 2))))
(defun train-generator (&optional verbose)
"teaching generator how to create more real fakes"
(let* ((fake-scores (discriminate (generate) :trainp nil))
(gloss (generator-loss fake-scores)))
(when verbose (prn " GL:" (if ($parameterp gloss) ($data gloss) gloss)))
(optim *generator*)))
(defun train-batch (xs epoch idx)
(let ((verbose (zerop (rem idx 100))))
(when verbose (prn "EPOCH/IDX>" epoch ":" idx))
(loop :for k :from 0 :below 1
:do (train-discriminator xs verbose))
(train-generator verbose)
(when (zerop (rem idx 500))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/~A-~A.png" (namestring (user-homedir-pathname))
epoch idx)))
(write-tensors-png-file generated fname)))
(when verbose (th::th-get-current-heap-size))))
(defun train (epochs batches)
(loop :for epoch :from 1 :to epochs
:do (loop :for xs :in batches
:for idx :from 0
:do (train-batch xs epoch idx))))
(defparameter *epochs* 40)
($reset! (list *generator* *discriminator*))
(time (train *epochs* *mnist-batches*))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/images.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file generated fname))
(let ((xs (car *mnist-batches*))
(fname (format nil "~A/Desktop/ixs.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file xs fname))
(gcf)
(setf *epochs* 1)
(th::th-get-current-heap-size)
| 6,828
|
Common Lisp
|
.lisp
| 120
| 35.625
| 94
| 0.470905
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
ca326d0c05192a2a1709bff06afa2f70a925d0af12b24caba8bf659347bd08b7
| 3,241
|
[
-1
] |
3,242
|
gan-simple.lisp
|
chunsj_TH/examples/gan/gan-simple.lisp
|
;; from
;; https://medium.com/@devnag/generative-adversarial-networks-gans-in-50-lines-of-code-pytorch-e81b79659e3f
(defpackage :gan-simple
(:use #:common-lisp
#:mu
#:th))
(in-package :gan-simple)
;; this simple - in terms of execution speed and complexity - gan example fits gaussian
;; random distribution defined by following parameters
(defparameter *data-mean* 4)
(defparameter *data-sd* 1.25)
(defparameter *g-input-size* 1)
(defparameter *g-hidden-size* 50)
(defparameter *g-output-size* 1)
(defparameter *d-input-size* 100)
(defparameter *d-hidden-size* 50)
(defparameter *d-output-size* 1)
(defparameter *minibatch-size* *d-input-size*)
(defparameter *d-learning-rate* 2E-4)
(defparameter *g-learning-rate* 2E-4)
(defparameter *beta1* 0.9)
(defparameter *beta2* 0.999)
(defparameter *num-epochs* 30000)
(defparameter *print-interval* 500)
(defparameter *d-steps* 1)
(defparameter *g-steps* 1)
(defun get-distribution-sampler (mu sigma)
(lambda (n)
($+ mu ($* (rndn 1 n) sigma))))
(defun get-generator-input-sampler ()
(lambda (m n) (rnd m n)))
(defparameter *generator* (parameters))
(defparameter *gw1* ($push *generator* (vxavier (list *g-input-size* *g-hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *g-hidden-size*)))
(defparameter *gw2* ($push *generator* (vxavier (list *g-hidden-size* *g-hidden-size*))))
(defparameter *gb2* ($push *generator* (zeros *g-hidden-size*)))
(defparameter *gw3* ($push *generator* (vxavier (list *g-hidden-size* *g-output-size*))))
(defparameter *gb3* ($push *generator* (zeros *g-output-size*)))
(defun generate (x)
(let* ((z1 ($affine x *gw1* *gb1*))
(a1 ($elu z1))
(z2 ($affine a1 *gw2* *gb2*))
(a2 ($elu z2)))
($affine a2 *gw3* *gb3*)))
(defparameter *discriminator* (parameters))
(defparameter *dw1* ($push *discriminator* (vxavier (list *d-input-size* *d-hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *d-hidden-size*)))
(defparameter *dw2* ($push *discriminator* (vxavier (list *d-hidden-size* *d-hidden-size*))))
(defparameter *db2* ($push *discriminator* (zeros *d-hidden-size*)))
(defparameter *dw3* ($push *discriminator* (vxavier (list *d-hidden-size* *d-output-size*))))
(defparameter *db3* ($push *discriminator* (zeros *d-output-size*)))
(defun discriminate (x)
(let* ((z1 ($affine x *dw1* *db1*))
(a1 ($elu z1))
(z2 ($affine a1 *dw2* *db2*))
(a2 ($elu z2))
(z3 ($affine a2 *dw3* *db3*)))
($sigmoid z3)))
(defparameter *d-sampler-fn* (get-distribution-sampler *data-mean* *data-sd*))
(defun d-sampler (n) (funcall *d-sampler-fn* n))
(defparameter *gi-sampler-fn* (get-generator-input-sampler))
(defun gi-sampler (m n) (funcall *gi-sampler-fn* m n))
(time
(loop :for epoch :from 1 :to *num-epochs*
:do (progn
(loop :for dstep :from 0 :below *d-steps*
:do (progn
($cg! *generator*)
($cg! *discriminator*)
(let* ((d-real-data (d-sampler *d-input-size*))
(d-real-decision (discriminate d-real-data))
(d-real-error ($bce d-real-decision (ones 1)))
(d-gen-input (gi-sampler *minibatch-size* *g-input-size*))
(d-fake-data (generate d-gen-input))
(d-fake-decision (discriminate ($transpose d-fake-data)))
(d-fake-error ($bce d-fake-decision (zeros 1))))
(when (zerop (rem epoch *print-interval*))
(prn "EPOCH =>" epoch)
(prn "DRE/DFE:" d-real-error d-fake-error)
(prn " DSTAT:" ($mean d-real-data) ($sd d-real-data))
(prn " FSTAT:" ($mean d-fake-data) ($sd d-fake-data)))
($amgd! *discriminator* *d-learning-rate* *beta1* *beta2*)
($cg! *generator*)
($cg! *discriminator*))))
(loop :for gstep :from 0 :below *g-steps*
:do (progn
($cg! *generator*)
($cg! *discriminator*)
(let* ((gen-input (gi-sampler *minibatch-size* *g-input-size*))
(g-fake-data (generate gen-input))
(dg-fake-decision (discriminate ($transpose g-fake-data)))
(g-error ($bce dg-fake-decision (ones 1))))
(when (zerop (rem epoch *print-interval*))
(prn "GE:" ($data g-error)))
($amgd! *generator* *d-learning-rate* *beta1* *beta2*)
($cg! *generator*)
($cg! *discriminator*)))))))
(gcf)
| 4,931
|
Common Lisp
|
.lisp
| 98
| 39.214286
| 107
| 0.555348
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
cec5967414ff2c01168966b2ea0b0cf855e339ed3c176251ffda73ee5fd93684
| 3,242
|
[
-1
] |
3,243
|
dcgan-layers.lisp
|
chunsj_TH/examples/gan/dcgan-layers.lisp
|
(defpackage :dcgan-layers
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.layers
#:th.db.mnist))
(in-package :dcgan-layers)
(defun build-batches (batch-size batch-count)
(let ((mnist (read-mnist-data)))
(loop :for i :from 0 :below batch-count
:for s = (* i batch-size)
:for e = (* (1+ i) batch-size)
:for r = (loop :for k :from s :below e :collect k)
:collect ($contiguous! ($reshape! ($- ($* 2 ($index ($ mnist :train-images) 0 r)) 1)
batch-size 1 28 28)))))
(defparameter *batch-size* 120)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-batches* (build-batches *batch-size* *batch-count*))
(defparameter *latent-dim* 100)
(defparameter *imgsz* (* 28 28))
(defparameter *hidden-size* 128)
(defparameter *generator* (sequential-layer
(affine-layer *latent-dim* *imgsz*
:weight-initializer :xavier-normal
:activation :selu)
(reshape-layer 16 7 7)
(full-convolution-2d-layer 16 32 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.01)
:activation :selu)
(full-convolution-2d-layer 32 1 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.04)
:activation :tanh)))
(defparameter *discriminator* (sequential-layer
(convolution-2d-layer 1 32 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.04)
:activation :lrelu)
(convolution-2d-layer 32 16 4 4
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:weight-initializer :random-normal
:weight-initialization '(0 0.01)
:activation :selu)
(reshape-layer *imgsz*)
(affine-layer *imgsz* *hidden-size*
:weight-initializer :random-normal
:weight-initialization '(0 0.03)
:activation :selu)
(affine-layer *hidden-size* 1
:weight-initializer :random-normal
:weight-initialization '(0 0.04)
:activation :sigmoid)))
(defparameter *lr* 1E-3)
(defparameter *real-labels* (ones *batch-size*))
(defparameter *fake-labels* (zeros *batch-size*))
(defun optim (model)
($amgd! model *lr* 0.5 0.999)
;;($amgd! model *lr*)
($cg! *generator*)
($cg! *discriminator*))
(defun generate (&key (trainp t))
"generates fake images from random normal inputs"
($execute *generator* (rndn *batch-size* *latent-dim*) :trainp trainp))
(defun discriminate (xs &key (trainp t))
"check whether inputs are real or fake"
($execute *discriminator* xs :trainp trainp))
(defun train-discriminator (xs &optional verbose)
"teaching discriminator how to discriminate reals from fakes"
(let* ((fake-scores (discriminate (generate)))
(real-scores (discriminate xs))
(fake-loss ($bce fake-scores *fake-labels*))
(real-loss ($bce real-scores *real-labels*))
(dloss ($+ fake-loss real-loss)))
(when verbose (prn " DL:" (if ($parameterp dloss) ($data dloss) dloss)))
(optim *discriminator*)))
(defun train-generator (&optional verbose)
"teaching generator how to create more real fakes"
(let* ((fake-scores (discriminate (generate)))
(gloss ($bce fake-scores *real-labels*)))
(when verbose (prn " GL:" (if ($parameterp gloss) ($data gloss) gloss)))
(optim *generator*)))
(defun train (xs epoch idx)
(let ((verbose (zerop (rem idx 50))))
(when verbose (prn "EPOCH/IDX>" epoch ":" idx))
(loop :for k :from 0 :below 1
:do (train-discriminator xs verbose))
(train-generator verbose)
(when (zerop (rem idx 500))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/~A-~A.png" (namestring (user-homedir-pathname))
epoch idx)))
(write-tensors-png-file generated fname)))
(when verbose (th::th-get-current-heap-size))))
(defparameter *epochs* 20)
($reset! *generator*)
($reset! *discriminator*)
(time
(loop :for epoch :from 1 :to *epochs*
:do (loop :for xs :in *mnist-batches*
:for idx :from 0
:do (train xs epoch idx))))
(let ((generated (generate :trainp nil))
(fname (format nil "~A/Desktop/images.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file generated fname))
(let ((xs (car *mnist-batches*))
(fname (format nil "~A/Desktop/ixs.png" (namestring (user-homedir-pathname)))))
(write-tensors-png-file xs fname))
(gcf)
(setf *epochs* 1)
| 6,228
|
Common Lisp
|
.lisp
| 118
| 34.279661
| 94
| 0.487192
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
694fc486cf48ab2e9f21996cbb3419e73749b6fddb26a12849cd922045848d06
| 3,243
|
[
-1
] |
3,244
|
cgan.lisp
|
chunsj_TH/examples/gan/cgan.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2016/12/24/conditional-gan-tensorflow/
;; https://github.com/wiseodd/generative-models/blob/master/GAN/conditional_gan/cgan_pytorch.py
(defpackage :cgan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :cgan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defun bced (dr df) ($+ ($bce dr ($one dr)) ($bce df ($zero df))))
(defun bceg (df) ($bce df ($one df)))
(defun lossd (dr df) (bced dr df))
(defun lossg (df) (bceg df))
;; XXX cannot figure out why adam works best (adadelta does not work well)
(defun optm (params) ($amgd! params 1E-3))
(defun outpng (data fname) (write-tensor-png-file ($reshape data 28 28) fname))
;; training data - uses batches for performance, 30, 60 works well
(defparameter *batch-size* 60)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(defparameter *mnist-train-image-labels*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-labels) 0 range))))
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *gen-size* 100)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defparameter *lbl-size* 10)
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list (+ *gen-size* *lbl-size*) *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
;; you can apply leaky relu, $lrelu
(defun generate (z c)
(-> ($cat z c 1)
($affine *gw1* *gb1* *os*)
($relu)
($affine *gw2* *gb2* *os*)
($sigmoid)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list (+ *img-size* *lbl-size*)
*hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
;; you can apply leaky relu, $lrelu
(defun discriminate (x c)
(-> ($cat x c 1)
($affine *dw1* *db1* *os*)
($relu)
($affine *dw2* *db2* *os*)
($sigmoid)))
(defun samplez () (rndn *batch-size* *gen-size*))
(defparameter *epoch* 50)
(defparameter *k* 1)
($cg! *discriminator*)
($cg! *generator*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-data-labels* (subseq *mnist-train-image-labels* 0))
(defparameter *train-count* ($count *train-data-batches*))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for c :in *train-data-labels*
:for bidx :from 0
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x c))
(df (discriminate (generate z c) c))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *discriminator*)
($cg! *generator*)))
;; generator
(let* ((df (discriminate (generate z c) c))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*))
(when (zerop (rem bidx 100))
(prn " D/L:" bidx dlv dgv))))
(when (zerop (rem epoch 1))
(let* ((c (car *train-data-labels*))
(g (generate (samplez) c)))
($cg! *discriminator*)
($cg! *generator*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss *train-count*) (/ gloss *train-count*)))))
(defun outpngs49 (data81 fname &optional (w 28) (h 28))
(let* ((n 7)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(datas (mapcar (lambda (data) ($reshape data w h)) data81)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ datas (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let* ((c (car *train-data-labels*))
(generated (generate (samplez) c))
(data (car *train-data-batches*)))
(outpngs49 (loop :for i :from 0 :below 49
:collect ($index ($data generated) 0 i))
(format nil "~A/G49.png" *output*))
(outpngs49 (loop :for i :from 0 :below 49
:collect ($index data 0 i))
(format nil "~A/D49.png" *output*))
($cg! *discriminator*)
($cg! *generator*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*mnist-train-image-labels* nil
*train-data-batches* nil
*train-data-labels* nil)
(gcf)
| 6,930
|
Common Lisp
|
.lisp
| 156
| 33.230769
| 95
| 0.515268
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
f8c788ad9a8049ff9567c9b7cb4684044d66504675d0dbadedd5d6c8f5da5c4f
| 3,244
|
[
-1
] |
3,245
|
infogan.lisp
|
chunsj_TH/examples/gan/infogan.lisp
|
;; from
;; https://wiseodd.github.io/techblog/2017/01/29/infogan/
;; https://github.com/wiseodd/generative-models/blob/master/GAN/infogan/infogan_pytorch.py
(defpackage :infogan
(:use #:common-lisp
#:mu
#:th
#:th.image
#:th.db.mnist))
(in-package :infogan)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
(defparameter *output* (format nil "~A/Desktop" (user-homedir-pathname)))
(defparameter *eps* 1E-8)
(defun lossd (dr df)
(let ((dr ($+ dr *eps*))
(df ($+ df *eps*)))
($neg ($mean ($+ ($log dr) ($log ($- 1 df)))))))
(defun lossg (df)
(let ((df ($+ df *eps*)))
($neg ($mean ($log df)))))
(defun lossq (c qc)
(let ((qc ($+ qc *eps*)))
($mean ($neg ($sum ($* c ($log qc)) 1)))))
;; XXX cannot figure out why adam works best (adadelta does not work well)
(defun optm (params) ($amgd! params 1E-3))
(defun outpng (data fname &optional (w 28) (h 28))
(let ((img (opticl:make-8-bit-gray-image w h))
(d ($reshape data w h)))
(loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img i j) (round (* 255 ($ d i j)))))))
(opticl:write-png-file fname img)))
;; training data - uses batches for performance, 30, 60 works well
(defparameter *batch-size* 30)
(defparameter *batch-count* (/ 60000 *batch-size*))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for range = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 range))))
(defparameter *discriminator* (parameters))
(defparameter *generator* (parameters))
(defparameter *qnet* (parameters))
(defparameter *gen-size* 100)
(defparameter *hidden-size* 128)
(defparameter *img-size* (* 28 28))
(defparameter *lbl-size* 10)
(defun xinit (size) ($* (apply #'rndn size) (/ 1 (sqrt (/ ($ size 0) 2)))))
(defparameter *os* (ones *batch-size*))
;; generator network
(defparameter *gw1* ($push *generator* (xinit (list (+ *gen-size* *lbl-size*) *hidden-size*))))
(defparameter *gb1* ($push *generator* (zeros *hidden-size*)))
(defparameter *gw2* ($push *generator* (xinit (list *hidden-size* *img-size*))))
(defparameter *gb2* ($push *generator* (zeros *img-size*)))
;; you can apply leaky relu, $lrelu
(defun generate (z c)
(-> ($cat z c 1)
($affine *gw1* *gb1* *os*)
($relu)
($affine *gw2* *gb2* *os*)
($clamp -10 10)
($sigmoid)))
;; discriminator network
(defparameter *dw1* ($push *discriminator* (xinit (list *img-size* *hidden-size*))))
(defparameter *db1* ($push *discriminator* (zeros *hidden-size*)))
(defparameter *dw2* ($push *discriminator* (xinit (list *hidden-size* 1))))
(defparameter *db2* ($push *discriminator* (zeros 1)))
;; you can apply leaky relu, $lrelu
(defun discriminate (x)
(-> x
($affine *dw1* *db1* *os*)
($relu)
($affine *dw2* *db2* *os*)
($clamp -10 10)
($sigmoid)))
;; q(c|X) network
(defparameter *qw1* ($push *qnet* (xinit (list *img-size* *hidden-size*))))
(defparameter *qb1* ($push *qnet* (zeros *hidden-size*)))
(defparameter *qw2* ($push *qnet* (xinit (list *hidden-size* *lbl-size*))))
(defparameter *qb2* ($push *qnet* (zeros *lbl-size*)))
(defun qnet (x)
(-> x
($affine *qw1* *qb1* *os*)
($relu)
($affine *qw2* *qb2* *os*)
($softmax)))
(defun rones (nrows cprobs)
(let* ((indices ($multinomial cprobs nrows))
(res (zeros ($size indices 0) ($size cprobs 0))))
(loop :for i :from 0 :below nrows
:for j = ($ indices i)
:do (setf ($ res i j) 1))
res))
(defparameter *cprobs* ($/ (ones 10) *lbl-size*))
(defun samplez () (rndn *batch-size* *gen-size*))
(defun samplec () (rones *batch-size* *cprobs*))
(defparameter *epoch* 20)
(defparameter *k* 1)
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*)
(defparameter *train-data-batches* (subseq *mnist-train-image-batches* 0))
(defparameter *train-count* ($count *train-data-batches*))
(gcf)
(time
(loop :for epoch :from 1 :to *epoch*
:for dloss = 0
:for gloss = 0
:for qloss = 0
:do (progn
($cg! *discriminator*)
($cg! *generator*)
(prn "*****")
(prn "EPOCH:" epoch)
(loop :for x :in *train-data-batches*
:for bidx :from 0
:for c = (samplec)
:for z = (samplez)
:do (let ((dlv nil)
(dgv nil)
(dqv nil))
;; discriminator
(dotimes (k *k*)
(let* ((dr (discriminate x))
(df (discriminate (generate z c)))
(l ($data (lossd dr df))))
(incf dloss l)
(setf dlv l)
(optm *discriminator*)
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*)))
;; generator
(let* ((df (discriminate (generate z c)))
(l ($data (lossg df))))
(incf gloss l)
(setf dgv l)
(optm *generator*)
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*))
;; q network
(let* ((g-sample (generate z c))
(qc (qnet g-sample))
(l ($data (lossq c qc))))
(incf qloss l)
(setf dqv l)
(optm *generator*)
(optm *qnet*)
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*))
(when (zerop (rem bidx 200))
(prn " D/L/Q:" bidx dlv dgv dqv))))
(when (zerop (rem epoch 1))
(let ((g (generate (samplez) (samplec))))
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*)
(loop :for i :from 1 :to 1
:for s = (random *batch-size*)
:for fname = (format nil "~A/i~A-~A.png" *output* epoch i)
:do (outpng ($index ($data g) 0 s) fname))))
(prn " LOSS:" epoch (/ dloss *train-count*) (/ gloss *train-count*)
(/ qloss *train-count*)))))
(defun outpngs25 (data81 fname &optional (w 28) (h 28))
(let* ((n 5)
(img (opticl:make-8-bit-gray-image (* n w) (* n h)))
(datas (mapcar (lambda (data) ($reshape data w h)) data81)))
(loop :for i :from 0 :below n
:do (loop :for j :from 0 :below n
:for sx = (* j w)
:for sy = (* i h)
:for d = ($ datas (+ (* j n) i))
:do (loop :for i :from 0 :below h
:do (loop :for j :from 0 :below w
:do (progn
(setf (aref img (+ sx i) (+ sy j))
(round (* 255 ($ d i j)))))))))
(opticl:write-png-file fname img)))
;; generate samples
(let* ((c (let ((c (zeros *batch-size* *lbl-size*)))
(loop :for i :from 0 :below *batch-size*
:do (if (< i 10)
(setf ($ c i i) 1)
(setf ($ c i (rem i 10)) 1)))
c))
(generated (generate (samplez) c)))
(outpngs25 (loop :for i :from 0 :below 25
:collect ($index ($data generated) 0 i))
(format nil "~A/G9.png" *output*))
($cg! *discriminator*)
($cg! *generator*)
($cg! *qnet*))
(setf *mnist* nil
*mnist-train-image-batches* nil
*train-data-batches* nil)
(gcf)
| 8,418
|
Common Lisp
|
.lisp
| 203
| 30.014778
| 95
| 0.484111
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
bd1b852e4064305af02c1f90b51381a002775bb0aa72004f92f635ba67d35229
| 3,245
|
[
-1
] |
3,246
|
vae.lisp
|
chunsj_TH/examples/autoencoder/vae.lisp
|
(defpackage :vae-example
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.db.mnist))
(in-package :vae-example)
(defparameter *mnist* (read-mnist-data))
(defparameter *batch-size* 32)
(defparameter *max-batch-count* 10)
(defparameter *batch-count* (min *max-batch-count*
(/ ($size ($ *mnist* :train-images) 0) *batch-size*)))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:for xs = ($index ($ *mnist* :train-images) 0 rng)
:collect ($contiguous! ($reshape xs ($size xs 0) 1 28 28))))
(setf *mnist* nil)
(defun sample-function (mu log-var &key (trainp t))
(declare (ignore trainp))
(let ((epsilon (apply #'rndn ($size mu))))
($+ mu ($* ($exp ($/ log-var 2)) epsilon))))
;; define autoencoder = encoder + decoder
(defparameter *encoder* (sequential-layer
(convolution-2d-layer 1 32 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:batch-normalization-p t
:activation :relu)
(convolution-2d-layer 32 64 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:batch-normalization-p t
:activation :relu)
(flatten-layer)
(affine-layer 3136 16
:batch-normalization-p t
:activation :nil)
(parallel-layer (affine-layer 16 2 :activation :nil)
(affine-layer 16 2 :activation :nil))
(functional-layer #'sample-function)))
(defparameter *decoder* (sequential-layer
(affine-layer 2 3136 :activation :relu)
(reshape-layer 64 7 7)
(full-convolution-2d-layer 64 64 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:adjust-width 1 :adjust-height 1
:batch-normalization-p t
:activation :relu)
(full-convolution-2d-layer 64 32 3 3
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:adjust-width 1 :adjust-height 1
:batch-normalization-p t
:activation :relu)
(full-convolution-2d-layer 32 1 3 3
:padding-width 1 :padding-height 1
:batch-normalization-p t
:activation :sigmoid)))
(defparameter *model* (sequential-layer *encoder* *decoder*))
(defun vae-loss (model xs &optional (usekl t) (trainp t))
(let* ((ys ($execute model xs :trainp trainp))
(recon-loss ($bce ys xs)))
(if usekl
(let* ((args ($function-arguments ($ ($ model 0) 5)))
(m ($size xs 0))
(mu ($ args 0))
(log-var ($ args 1))
(kl ($* ($sum ($+ ($exp log-var) ($* mu mu) -1 ($- log-var)))
(/ 1 m)
0.5)))
(list recon-loss kl))
(list recon-loss 0))))
(defun update-params (model gd)
(cond ((eq gd :adam) ($amgd! model 1E-3))
((eq gd :rmsprop) ($rmgd! model))
(t ($adgd! model))))
(defun update-kl-params (model gd)
(cond ((eq gd :adam) ($amgd! ($ model 0) 1E-3))
((eq gd :rmsprop) ($rmgd! ($ model 0)))
(t ($adgd! ($ model 0))))
($cg! model))
(defun vae-train-step (model xs st gd)
(let* ((ntr 10)
(beta 0.01)
(pstep 10))
(loop :for i :from 0 :to ntr
:do (progn
(vae-loss model xs nil)
(update-params model gd)))
(let* ((losses (vae-loss model xs t))
(lr (car losses))
(lkl (cadr losses))
(l ($+ lr ($* beta lkl))))
(when (zerop (rem st pstep))
(prn st ":"
(format nil "~,4E" (if ($parameterp l) ($data l) l))
(format nil "~,4E" (if ($parameterp lr) ($data lr) lr))
(format nil "~,4E" (if ($parameterp lkl) ($data lkl) lkl))))
(update-params model gd))))
(defun vae-train-step-2 (model xs st gd)
(let* ((ntr 10)
(beta 0.05)
(pstep 10))
(loop :for i :from 0 :to ntr
:do (progn
(vae-loss model xs nil)
(update-params model gd)))
(let* ((losses (vae-loss model xs t))
(lr (car losses))
(lkl (cadr losses))
(l ($+ lr ($* beta lkl))))
(when (zerop (rem st pstep))
(prn st ":"
(format nil "~,4E" (if ($parameterp l) ($data l) l))
(format nil "~,4E" (if ($parameterp lr) ($data lr) lr))
(format nil "~,4E" (if ($parameterp lkl) ($data lkl) lkl))))
(update-kl-params model gd))))
(defun vae-train (epochs model batches)
(let ((nbs ($count batches)))
(loop :for epoch :from 1 :to epochs
:do (loop :for xs :in batches
:for idx :from 1
:do (vae-train-step-2 model xs (+ idx (* nbs (1- epoch))) :rmsprop)))))
(defparameter *epochs* 1000)
($reset! *model*)
;; train
(time
(vae-train *epochs* *model* *mnist-train-image-batches*))
;; test model
($execute *model* (car *mnist-train-image-batches*) :trainp nil)
;; trained weights
($load-weights "./examples/weights/vae" *model*)
;; ($save-weights "./examples/weights/vae" *model*)
;; check results
(defun compare-xy (encoder decoder bs)
(let* ((nb ($count bs))
(bidx (random nb))
(xs ($ bs bidx))
(bn ($size xs 0))
(es ($execute encoder xs :trainp nil))
(ds ($execute decoder es :trainp nil))
(ys ($reshape! ds bn 1 28 28))
(idx (random bn))
(x ($ xs idx))
(y ($ ys idx))
(inf ($concat (namestring (user-homedir-pathname)) "Desktop/input.png"))
(ouf ($concat (namestring (user-homedir-pathname)) "Desktop/output.png")))
(prn "BIDX:" bidx)
(prn "ENCODED:" es)
(prn "INDEX:" idx)
(th.image:write-tensor-png-file x inf)
(th.image:write-tensor-png-file y ouf)))
(compare-xy *encoder* *decoder* *mnist-train-image-batches*)
;; generate images
(defun genimg (decoder)
(let* ((bn *batch-size*)
(xs (rndn bn 2))
(mn ($mean xs 0))
(ds ($execute decoder xs :trainp nil))
(ys ($reshape! ds bn 1 28 28))
(fs ($concat (namestring (user-homedir-pathname)) "Desktop/gen~A.png")))
(prn "XS:" ($ mn 0 0) ($exp ($ mn 0 1)))
(loop :for i :from 0 :below (min 10 bn)
:for filename = (format nil fs (1+ i))
:do (th.image:write-tensor-png-file ($ ys i) filename))))
(genimg *decoder*)
;; generate a patch image for latent space
(defun genpatch (&optional (n 21))
(let* ((minv -1E0)
(maxv 1E0)
(sv (/ (- maxv minv) n))
(xs (tensor (1+ n) (1+ n) 2)))
(loop :for i :from 0 :to n
:for vi = (- maxv (* i sv))
:do (loop :for j :from 0 :to n
:for vj = (+ minv (* j sv))
:do (setf ($ xs i j 0) vj
($ xs i j 1) vi)))
(let* ((xs ($reshape! xs (* (1+ n) (1+ n)) 2))
(mn ($mean xs 0))
(ds ($execute *decoder* xs :trainp nil))
(ys ($reshape! ds (1+ n) (1+ n) 1 28 28))
(img (opticl:make-8-bit-gray-image (* (1+ n) 28) (* (1+ n) 28)))
(fs ($concat (namestring (user-homedir-pathname)) "Desktop/patch.png")))
(prn xs)
(prn "MN:" ($ mn 0 0) ($exp ($ mn 0 1)))
(loop :for ti :from 0 :to n
:for sy = (* ti 28)
:do (loop :for tj :from 0 :to n
:for tx = ($ ys ti tj)
:for sx = (* tj 28)
:do (loop :for ii :from 0 :below 28
:do (loop :for ij :from 0 :below 28
:do (setf (aref img (+ sy ii) (+ sx ij))
(round (* 255 ($ tx 0 ii ij))))))))
(prn ys)
(opticl:write-png-file fs img))))
(genpatch)
(defun showimg (xs)
(let* ((bn ($size xs 0))
(fs ($concat (namestring (user-homedir-pathname)) "Desktop/in~A.png")))
(loop :for i :from 1 :to (min bn 40)
:for filename = (format nil fs i)
:do (th.image:write-tensor-png-file ($ xs (1- i)) filename))))
(showimg ($ *mnist-train-image-batches* 0))
| 9,452
|
Common Lisp
|
.lisp
| 208
| 30.860577
| 91
| 0.460938
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
e161bace7148b77f5756ee79f429b594e68cd25f5d832800789ef86e651eaca3
| 3,246
|
[
-1
] |
3,247
|
cae.lisp
|
chunsj_TH/examples/autoencoder/cae.lisp
|
(defpackage :cae-example
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.db.mnist))
(in-package :cae-example)
(defparameter *mnist* (read-mnist-data))
(defparameter *batch-size* 32)
(defparameter *max-batch-count* 100)
(defparameter *batch-count* (min *max-batch-count*
(/ ($size ($ *mnist* :train-images) 0) *batch-size*)))
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below *batch-count*
:for rng = (loop :for k :from (* i *batch-size*) :below (* (1+ i) *batch-size*)
:collect k)
:for xs = ($index ($ *mnist* :train-images) 0 rng)
:collect ($contiguous! ($reshape xs ($size xs 0) 1 28 28))))
(setf *mnist* nil)
;; define autoencoder = encoder + decoder
(defparameter *encoder* (sequential-layer
(convolution-2d-layer 1 32 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:activation :relu)
(convolution-2d-layer 32 64 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:activation :relu)
(flatten-layer)
(affine-layer 3136 2
:batch-normalization-p t
:activation :nil)))
(defparameter *decoder* (sequential-layer
(affine-layer 2 3136 :activation :relu)
(reshape-layer 64 7 7)
(full-convolution-2d-layer 64 64 3 3
:padding-width 1 :padding-height 1
:stride-width 2 :stride-height 2
:adjust-width 1 :adjust-height 1
:activation :relu)
(full-convolution-2d-layer 64 32 3 3
:stride-width 2 :stride-height 2
:padding-width 1 :padding-height 1
:adjust-width 1 :adjust-height 1
:activation :relu)
(full-convolution-2d-layer 32 1 3 3
:padding-width 1 :padding-height 1
:batch-normalization-p t
:activation :sigmoid)))
(defparameter *model* (sequential-layer *encoder* *decoder*))
(defun loss (y x) ($bce y x))
(defun update-params (model gd)
(cond ((eq gd :adam) ($amgd! model 1E-3))
((eq gd :rmsprop) ($rmgd! model))
(t ($adgd! model))))
(defun train (model xs epoch idx gd)
(let* ((pstep 20)
(ys ($execute model xs))
(l (loss ys xs)))
(when (zerop (rem idx pstep))
(prn (format nil "~5,D" idx) "/" (format nil "~5,D" epoch) ":" ($data l)))
(update-params model gd)))
(defparameter *epochs* 100)
($reset! *model*)
(time
(loop :for epoch :from 1 :to *epochs*
:do (loop :for xs :in *mnist-train-image-batches*
:for idx :from 1
:do (train *model* xs epoch idx :rmsprop))))
;; save/load trained weights
;; ($save-weights "examples/weights/cae" *model*)
($load-weights "examples/weights/cae" *model*)
;; check results
(defun compare-xy (encoder decoder bs)
(let* ((xs ($ bs (random ($count bs))))
(bn ($size xs 0))
(es ($execute encoder xs :trainp nil))
(ds ($execute decoder es :trainp nil))
(ys ($reshape! ds bn 1 28 28))
(idx (random bn))
(x ($ xs idx))
(y ($ ys idx))
(inf ($concat (namestring (user-homedir-pathname)) "Desktop/input.png"))
(ouf ($concat (namestring (user-homedir-pathname)) "Desktop/output.png")))
(prn "ENCODED:" es)
(prn "INDEX:" idx)
(th.image:write-tensor-png-file x inf)
(th.image:write-tensor-png-file y ouf)))
(compare-xy *encoder* *decoder* *mnist-train-image-batches*)
;; test model
($execute *model* (car *mnist-train-image-batches*) :trainp nil)
| 4,516
|
Common Lisp
|
.lisp
| 92
| 31.934783
| 87
| 0.473899
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
4809d0eb6feb819af2d76d47ea97d0edb3f631b21681ecb8d8662dbc6b89dcd3
| 3,247
|
[
-1
] |
3,248
|
autoenc.lisp
|
chunsj_TH/examples/autoencoder/autoenc.lisp
|
;; from
;; https://github.com/Abhipanda4/Sparse-Autoencoders
(defpackage :autoencoder
(:use #:common-lisp
#:mu
#:th
#:th.db.mnist))
(in-package :autoencoder)
;; load mnist data, takes ~22 secs in macbook 2017
(defparameter *mnist* (read-mnist-data))
;; mnist data has following dataset
;; train-images, train-labels and test-images, test-labels
(prn *mnist*)
;; training data - uses batches for performance
(defparameter *mnist-train-image-batches*
(loop :for i :from 0 :below 60
:for rng = (loop :for k :from (* i 1000) :below (* (1+ i) 1000)
:collect k)
:collect ($contiguous! ($index ($ *mnist* :train-images) 0 rng))))
(defparameter *num-input* 784)
(defparameter *num-hidden* 300)
(defparameter *num-batch* 1000)
(defparameter *epochs* 30)
(defparameter *rho* 0.01)
(defparameter *beta* 3)
(defparameter *ae* (parameters))
(defparameter *wenc* ($push *ae* (vxavier (list *num-input* *num-hidden*))))
(defparameter *benc* ($push *ae* (ones *num-hidden*)))
(defparameter *wdec* ($push *ae* (vxavier (list *num-hidden* *num-input*))))
(defparameter *bdec* ($push *ae* (ones *num-input*)))
(defparameter *os* (ones *num-batch*))
(defparameter *p* ($fill! (tensor *num-hidden*) *rho*))
(defun validate ()
(let ((we ($data *wenc*))
(be ($data *benc*))
(wd ($data *wdec*))
(bd ($data *bdec*)))
(let* ((x ($ *mnist* :test-images))
(os (ones ($size x 0)))
(encoded (-> x
($affine we be os)
($sigmoid)))
(decoded (-> encoded
($affine wd bd os)
($sigmoid)))
(d ($- decoded x))
(loss ($/ ($dot d d) ($size x 0))))
loss)))
(defun kl-divergence (q &optional (usesf t))
(let* ((q (if usesf ($softmax q) q))
(p (if usesf ($softmax *p*) *p*))
(lpq ($log ($div p q)))
(mp ($- 1 p))
(mq ($- 1 q))
(lmpq ($log ($div mp mq)))
(s1 ($sum ($* p lpq)))
(s2 ($sum ($* mp lmpq))))
($+ s1 s2)))
(progn
($cg! *ae*)
(prn (validate))
($cg! *ae*))
($cg! *ae*)
(gcf)
;; train without sparsity consideration
(time
(loop :for epoch :from 1 :to *epochs*
:do (progn
($cg! *ae*)
(loop :for x :in *mnist-train-image-batches*
:for bidx :from 1
:for encoded = (-> x
($affine *wenc* *benc* *os*)
($sigmoid))
:for decoded = (-> encoded
($affine *wdec* *bdec* *os*)
($sigmoid))
:for d = ($- decoded x)
:for mse = ($/ ($dot d d) *num-batch*)
:for loss = mse
:do (progn
($adgd! *ae*)
(when (zerop (rem bidx 10))
(prn "LOSS:" bidx "/" epoch ($data loss)))))
(prn "[TEST]" epoch (validate)))))
;; train with sparsity penalty
(time
(loop :for epoch :from 1 :to *epochs*
:do (progn
($cg! *ae*)
(loop :for x :in *mnist-train-image-batches*
:for bidx :from 1
:for encoded = (-> x
($affine *wenc* *benc* *os*)
($sigmoid))
:for decoded = (-> encoded
($affine *wdec* *bdec* *os*)
($sigmoid))
:for d = ($- decoded x)
:for mse = ($/ ($dot d d) *num-batch*)
:for rho-hat = ($mean encoded 0)
:for kld = (kl-divergence rho-hat)
:for esparsity = ($* kld *beta*)
:for loss = ($+ mse esparsity)
:do (progn
($adgd! *ae*)
(when (zerop (rem bidx 10))
(prn "LOSS:" bidx "/" epoch
($data loss)
($data mse)
($data esparsity)))))
(prn "[TEST]" epoch (validate)))))
(setf *mnist* nil)
(setf *mnist-train-image-batches* nil)
(gcf)
| 4,385
|
Common Lisp
|
.lisp
| 115
| 25.643478
| 76
| 0.444601
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
c1b44d6588966f416909a6b31d60df88c802873179cdf9c44583e55be0b920b8
| 3,248
|
[
-1
] |
3,249
|
cartpole-reinforce.lisp
|
chunsj_TH/examples/rl/cartpole-reinforce.lisp
|
(defpackage :cartpole-reinforce
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :cartpole-reinforce)
;; most simpliest implementation using CartPole-v0
;; this does not uses auto differentiation of TH.
(defun policy (state w) ($softmax ($@ ($unsqueeze state 0) w)))
(defun softmax-grad (sm)
(let ((s ($transpose sm)))
($- ($diagflat s) ($@ s ($transpose s)))))
(defun policy-grad (ps action state)
"computing d/dw of log P(action)"
(let* ((ds ($ (softmax-grad ps) action))
(dl ($/ ds ($ ps 0 action))) ;; note that we're differentiating log(P(action))
(dw ($@ ($transpose state) ($unsqueeze dl 0))))
dw))
(let* ((w0 (tensor '((0.01 -0.01) (-0.01 0.01) (0.01 -0.01) (-0.01 0.01))))
(w1 ($clone w0))
(w2 ($parameter ($clone w0))))
(let* ((s (tensor '(0.1 -0.1 0.2 -0.2)))
(a 0)
(p1 (policy s w1))
(p2 (policy s w2))
(lp2 ($log ($ p2 0 a))))
(setf lp2 ($* lp2 1)) ;; XXX dummy operation
;;(list ($mse p1 ($data p2)) ($mse (policy-grad p1 0 s) ($gradient w2)))
($log ($ (policy s w2) 0 a))
(list (policy-grad p1 0 s)
($gradient w2))))
(defun mean (xs) (/ (reduce #'+ xs) (length xs)))
(defun variance (xs)
(let* ((n (length xs))
(m (/ (reduce #'+ xs) n)))
(/ (reduce #'+ (mapcar (lambda (x)
(expt (abs (- x m)) 2))
xs))
n)))
(defun sd (xs) (sqrt (variance xs)))
(defun z-scored (vs fl)
(if fl
(let ((m (mean vs))
(s (sd vs)))
(mapcar (lambda (v) (/ (- v m) (if (> s 0) s 1))) vs))
vs))
(defun returns (rewards gamma &optional standardizep)
(let ((running 0))
(-> (loop :for r :in (reverse rewards)
:collect (progn
(setf running ($+ r (* gamma running)))
running))
(reverse)
(z-scored standardizep))))
(defun reinforce-simple (w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.001)
(env (cartpole-env :easy :reward 300))
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for grads = '()
:for rewards = '()
:for score = 0
:for done = nil
:do (progn
(loop :while (not done)
:for probs = (policy state w)
:for action = ($scalar ($multinomial probs 1))
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let ((grad (policy-grad probs action state)))
(push grad grads)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for grad :in (reverse grads)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:do ($set! w ($+ w ($* lr gm gt grad))))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
(defparameter *w* (rnd 4 2))
(reinforce-simple *w*)
(evaluate (cartpole-env :eval) (lambda (state) ($scalar ($argmax (policy state *w*) 1))))
;; using auto differentiation of TH.
(defun policy (state w) ($softmax ($@ ($unsqueeze state 0) w)))
(defun select-action (state w)
(let* ((probs (policy state w))
(action ($multinomial ($data probs) 1)))
(list ($scalar action) ($gather probs 1 action))))
(defun reinforce-bp (w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.01)
(env (cartpole-env :easy :reward 300))
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for score = 0
:for done = nil
:do (let ((losses nil))
(loop :while (not done)
:for (action prob) = (select-action state w)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let* ((logP ($log prob)))
(push logP logPs)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for logP :in (reverse logPs)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:do (push ($- ($* gm logP gt)) losses)) ;; not sure on this
(reduce #'$+ losses)
($amgd! w lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
(defparameter *w* ($parameter (rnd 4 2)))
(reinforce-bp *w*)
(evaluate (cartpole-env :eval) (lambda (state) ($scalar ($argmax (policy state ($data *w*)) 1))))
| 5,649
|
Common Lisp
|
.lisp
| 137
| 29.364964
| 97
| 0.477248
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
82dba5b86f86cdf562a60e3bbf921c30a103847e0809f2079d2e8c7c499f6932
| 3,249
|
[
-1
] |
3,250
|
pole.lisp
|
chunsj_TH/examples/rl/pole.lisp
|
(defpackage :cartpole
(:use #:common-lisp
#:mu
#:th))
(in-package :cartpole)
;; following code is mechanically translated from pole.c from Sutton/Barto book.
(defconstant +one-degree+ 0.0174532)
(defconstant +six-degrees+ 0.1047192)
(defconstant +twelve-degrees+ 0.2094384)
(defconstant +fifty-degrees+ 0.87266)
(defun get-box (x x-dot theta theta-dot)
(if (or (< x -2.4) (> x 2.4) (< theta (- +twelve-degrees+)) (> theta +twelve-degrees+))
-1
(let ((box 0))
(cond ((< x -0.8) (setf box 0))
((< x 0.8) (setf box 1))
(T (setf box 2)))
(cond ((< x-dot -0.5))
((< x-dot 0.5) (incf box 3))
(T (incf box 6)))
(cond ((< theta (- +six-degrees+)))
((< theta (- +one-degree+)) (incf box 9))
((< theta 0) (incf box 18))
((< theta +one-degree+) (incf box 27))
((< theta +six-degrees+) (incf box 36))
(T (incf box 45)))
(cond ((< theta-dot (- +fifty-degrees+)))
((< theta-dot +fifty-degrees+) (incf box 54))
(T (incf box 108)))
box)))
(defconstant +gravity+ 9.8)
(defconstant +masscart+ 1.0)
(defconstant +masspole+ 0.1)
(defconstant +total-mass+ (+ +masspole+ +masscart+))
(defconstant +length+ 0.5) ;; half the pole's length
(defconstant +polemass-length+ (* +masspole+ +length+))
(defconstant +force-mag+ 10.0)
(defconstant +tau+ 0.02) ;; seconds between state updates
(defconstant +fourthirds+ 1.3333333333333)
(defmacro cart-pole (action x x-dot theta theta-dot)
`(let (xacc
thetaacc
(force (if (> ,action 0) +force-mag+ (- +force-mag+)))
(costheta (cos ,theta))
(sintheta (sin ,theta))
temp)
(setf temp (/ (+ force (* +polemass-length+ ,theta-dot ,theta-dot sintheta))
+total-mass+))
(setf thetaacc (/ (- (* +gravity+ sintheta) (* costheta temp))
(* +length+ (- +fourthirds+ (/ (* +masspole+ costheta costheta)
+total-mass+)))))
(setf xacc (- temp (/ (* +polemass-length+ thetaacc costheta) +total-mass+)))
(incf ,x (* +tau+ ,x-dot))
(incf ,x-dot (* +tau+ xacc))
(incf ,theta (* +tau+ ,theta-dot))
(incf ,theta-dot (* +tau+ thetaacc))))
(defvar *n-boxes* 162)
(defvar *alpha* 1000)
(defvar *beta* 0.5)
(defvar *gamma* 0.95)
(defvar *lambda-w* 0.9)
(defvar *lambda-v* 0.8)
(defvar *max-failures* 100)
(defvar *max-steps* 100000)
(defun prob-push-right (s) (/ 1D0 (+ 1D0 (exp (- (max -50D0 (min s 50D0)))))))
(defun main ()
(let (x
x-dot
theta
theta-dot
(w (zeros *n-boxes*))
(v (zeros *n-boxes*))
(e (zeros *n-boxes*))
(xbar (zeros *n-boxes*))
p
oldp
rhat
r
box
y
(steps 0)
(failures 0)
failed)
;; starting state is 0 0 0 0
(setf x 0.0
x-dot 0.0
theta 0.0
theta-dot 0.0)
;; find box in state space containing start state
(setf box (get-box x x-dot theta theta-dot))
;; iterate through the action-learn loop
(loop :while (and (< steps *max-steps*) (< failures *max-failures*))
:do (progn
(incf steps)
;; choose action randomly, biased by current weight
(setf y (if (< (random 1D0) (prob-push-right ($ w box))) 1 0))
;; update traces
(incf ($ e box) (* (- 1D0 *lambda-w*) (- y 0.5D0)))
(incf ($ xbar box) (- 1D0 *lambda-v*))
;; remember prediction of failuter for current state
(setf oldp ($ v box))
;; apply action to the simulated cart-pole
(cart-pole y x x-dot theta theta-dot)
;; get box of state space containing the resulting state
(setf box (get-box x x-dot theta theta-dot))
(if (< box 0)
(progn
;; failure occurred
(setf failed 1)
(incf failures)
(format T "TRIAL ~D WAS ~D STEPS.~%" failures steps)
(finish-output T)
(setf steps 0)
;; reset state to 0 0 0 0
(setf x 0
x-dot 0
theta 0
theta-dot 0)
(setf box (get-box x x-dot theta theta-dot))
;; reinforcement upon failure is -1, prediction of failure is 0
(setf r -1.0
p 0.0))
(progn
;; not a failure
(setf failed 0)
;; reinforcement is 0, prediction of failure given by v weight
(setf r 0
p ($ v box))))
;; heuristic reinforcement = current + gamma * new pred - prev pred
(setf rhat (+ r (* *gamma* p) (- oldp)))
(loop :for i :from 0 :below *n-boxes*
:do (progn
;; update all weights
(incf ($ w i) (* *alpha* rhat ($ e i)))
(incf ($ v i) (* *beta* rhat ($ xbar i)))
(if (< ($ v i) -1) (setf ($ v i) -1))
(if (eq failed 1)
(setf ($ e i) 0.0
($ xbar i) 0.0)
(progn
(setf ($ e i) (* ($ e i) *lambda-w*)
($ xbar i) (* ($ xbar i) *lambda-v*))))))))
(if (eq failures *max-failures*)
(format T "POLE NOT BALANCED. STOPPING AFTER ~D FAILURES~%" failures)
(format T "POLE BALANCED SUCCESSFULLY FOR AT LEAST ~D STEPS~%" steps))
(finish-output T)))
;; run it
(main)
| 6,044
|
Common Lisp
|
.lisp
| 148
| 28.101351
| 89
| 0.46729
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
b57c9fd8b79075080f08d9484379efe4bcadab3b3973e968410efaefd821ff48
| 3,250
|
[
-1
] |
3,251
|
cartpole-duel-ddqn.lisp
|
chunsj_TH/examples/rl/cartpole-duel-ddqn.lisp
|
(defpackage :cartpole-duel-ddqn
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :cartpole-duel-ddqn)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model-common (&optional (ni 4))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform))))
(defun model-value (&optional (ni 5))
(sequential-layer
(affine-layer ni 1 :weight-initializer :random-uniform)))
(defun model-advantage (&optional (ni 5) (no 2))
(sequential-layer
(affine-layer ni no :weight-initializer :random-uniform)))
(defclass duel-ddqn-model (layer)
((cm :initform (model-common))
(vm :initform (model-value))
(am :initform (model-advantage))))
(defun model () (make-instance 'duel-ddqn-model))
(defmethod $execute ((m duel-ddqn-model) x &key (trainp T))
(with-slots (cm vm am) m
(let* ((hc ($execute cm x :trainp trainp))
(hv ($execute vm hc :trainp trainp))
(ha ($execute am hc :trainp trainp))
(sa ($size ha))
(ma ($mean ha 1)))
($add ($expand hv sa) ($sub ha ($expand ma sa))))))
(defmethod $train-parameters ((m duel-ddqn-model))
(with-slots (cm vm am) m
(append ($train-parameters cm)
($train-parameters vm)
($train-parameters am))))
(defun best-action-selector (model &optional (epsilon 0))
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(q ($evaluate model state)))
($scalar ($argmin q 1)))
(random 2))))
(defun sample-experiences (experiences nbatch)
(let ((nr ($count experiences)))
(if (> nr nbatch)
(loop :repeat nbatch :collect ($ experiences (random nr)))
experiences)))
(defun train-model (model-online model-target experiences &optional (gamma 0.95D0) (lr 0.003))
(let ((nr ($count experiences)))
(let ((states (-> (apply #'$concat (mapcar #'$0 experiences))
($reshape! nr 4)))
(actions (-> (tensor.long (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1))))
(let* ((argmins (-> ($evaluate model-online next-states)
($argmin 1)))
(qns (-> ($evaluate model-target next-states)
($gather 1 argmins)))
(xs states)
(ts ($+ costs ($* gamma qns ($- 1 dones))))
(ys (-> ($execute model-online xs)
($gather 1 actions)))
(loss ($mse ys ts)))
($rmgd! model-online lr)
($data loss)))))
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 2000)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun polyak-averaging (target online &optional (tau 0.1D0))
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:for a = ($* tau ($data po))
:for b = ($* (- 1 tau) ($data pt))
:do ($set! ($data pt) ($+ a b))))
(defun sync-models (target online)
(polyak-averaging target online))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun duel-ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain))
(let* ((loss (train-model model-online model-target
(sample-experiences experiences *batch-size*)
0.95D0 0.008))
(eres (evaluate eval-env (best-action-selector model-online)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success))
(sync-models model-target model-online)))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defparameter *m* nil)
(setf *m* (duel-ddqn *m*))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m*)))
| 6,463
|
Common Lisp
|
.lisp
| 149
| 32.704698
| 95
| 0.532814
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
778ed2e941d1cc4509765a933a9fc461fa3f510481a3d8e51cb909146fd64e89
| 3,251
|
[
-1
] |
3,252
|
cartpole-per.lisp
|
chunsj_TH/examples/rl/cartpole-per.lisp
|
(defpackage :cartpole-per
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :cartpole-per)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model-common (&optional (ni 4))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform))))
(defun model-value (&optional (ni 5))
(sequential-layer
(affine-layer ni 1 :weight-initializer :random-uniform)))
(defun model-advantage (&optional (ni 5) (no 2))
(sequential-layer
(affine-layer ni no :weight-initializer :random-uniform)))
(defclass duel-ddqn-model (layer)
((cm :initform (model-common))
(vm :initform (model-value))
(am :initform (model-advantage))))
(defun model () (make-instance 'duel-ddqn-model))
(defmethod $execute ((m duel-ddqn-model) x &key (trainp T))
(with-slots (cm vm am) m
(let* ((hc ($execute cm x :trainp trainp))
(hv ($execute vm hc :trainp trainp))
(ha ($execute am hc :trainp trainp))
(sa ($size ha))
(ma ($mean ha 1)))
($add ($expand hv sa) ($sub ha ($expand ma sa))))))
(defmethod $train-parameters ((m duel-ddqn-model))
(with-slots (cm vm am) m
(append ($train-parameters cm)
($train-parameters vm)
($train-parameters am))))
(defun best-action-selector (model &optional (epsilon 0))
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(q ($evaluate model state)))
($scalar ($argmin q 1)))
(random 2))))
(defclass replay-buffer ()
((entries :initform nil)
(deltas :initform nil)
(nsz :initform 0)
(idx :initform -1)
(alpha :initform 0.6)
(beta :initform 0.1)
(beta-rate :initform 0.99992)))
(defun replay-buffer (size)
(let ((n (make-instance 'replay-buffer)))
(with-slots (entries deltas nsz idx) n
(setf entries (make-array size :initial-element nil)
deltas ($fill! (tensor size) 1D0)
nsz 0
idx 0))
n))
(defun add-sample (buffer sample)
(with-slots (idx nsz entries deltas) buffer
(let ((maxsz ($count entries))
(maxd ($max deltas)))
(setf ($ entries idx) sample
($ deltas idx) maxd)
(setf nsz (min (1+ nsz) maxsz))
(incf idx)
(setf idx (rem idx maxsz)))
buffer))
(defun update-deltas (buffer idcs tderrs)
(with-slots (entries deltas) buffer
(setf ($index deltas 0 idcs) ($abs ($reshape tderrs ($count tderrs))))))
(defun update-beta! (buffer)
(with-slots (beta beta0 beta-rate) buffer
(setf beta (min 1D0 (/ beta beta-rate)))))
(defconstant +eps+ 1E-6)
(defun sample-experiences (buffer nbatch)
(with-slots (entries nsz deltas alpha beta) buffer
(if (>= nsz nbatch)
(let* ((prs ($expt ($+ ($subview deltas 0 nsz) +eps+) alpha))
(pbs ($/ prs ($sum prs)))
(wts ($expt ($* pbs nsz) beta))
(nwts ($/ wts ($max wts)))
(indices (tensor.long (loop :for i :from 0 :below nsz :collect i)))
(idcs (loop :repeat nbatch :collect ($choice indices pbs))))
(update-beta! buffer)
(list idcs
($reshape ($index nwts 0 idcs) nbatch 1)
(loop :for i :in idcs :collect ($ entries i))))
(list (loop :for i :from 0 :below nsz :collect i)
($reshape (tensor (loop :repeat nsz :collect 1)) nsz 1)
(loop :for i :from 0 :below nsz :collect ($ entries i))))))
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 1000)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(defun train-model (model-online model-target buffer &optional (gamma 0.95D0) (lr 0.003))
(let* ((experiences0 (sample-experiences buffer *batch-size*))
(indices ($ experiences0 0))
(nweights ($ experiences0 1))
(experiences ($ experiences0 2))
(nr ($count experiences)))
(let ((states (-> (apply #'$concat (mapcar #'$0 experiences))
($reshape! nr 4)))
(actions (-> (tensor.long (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1))))
(let* ((argmins (-> ($evaluate model-online next-states)
($argmin 1)))
(qns (-> ($evaluate model-target next-states)
($gather 1 argmins)))
(xs states)
(ts ($+ costs ($* gamma qns ($- 1 dones))))
(ys (-> ($execute model-online xs)
($gather 1 actions)))
(tderrs ($- ys ts))
(loss ($mean ($square ($* nweights tderrs)))))
($rmgd! model-online lr)
(update-deltas buffer indices ($data tderrs))
($data loss)))))
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun polyak-averaging (target online &optional (tau 0.1D0))
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:for a = ($* tau ($data po))
:for b = ($* (- 1 tau) ($data pt))
:do ($set! ($data pt) ($+ a b))))
(defun sync-models (target online)
(polyak-averaging target online))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun duel-ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(buffer (replay-buffer *max-buffer-size*))
(excount 0)
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(incf excount ntrain)
(loop :for e :in exs :do (add-sample buffer e))
(incf total-cost ctrain))
(let* ((loss (train-model model-online model-target
buffer
0.95D0 0.008))
(eres (evaluate eval-env (best-action-selector model-online)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success))
(sync-models model-target model-online)))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" excount total-cost)))
model-online))
(defparameter *m* nil)
(setf *m* (duel-ddqn *m*))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m*)))
| 8,245
|
Common Lisp
|
.lisp
| 197
| 32.456853
| 91
| 0.549557
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
10e140494b281ed3fd6af8dba42148f8ee9fd3d6141e86edc143c1933a3e521d
| 3,252
|
[
-1
] |
3,253
|
cartpole-dqn.lisp
|
chunsj_TH/examples/rl/cartpole-dqn.lisp
|
(defpackage :cartpole-dqn
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :cartpole-dqn)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model (&optional (ni 5) (no 1))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform)
(affine-layer h2 no :weight-initializer :random-uniform))))
(defun best-action-selector (model epsilon)
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(qleft ($evaluate model ($concat state (zeros 1 1) 1)))
(qright ($evaluate model ($concat state (ones 1 1) 1))))
(if (>= ($ qleft 0 0) ($ qright 0 0)) 1 0))
(random 2))))
(defun sample-experiences (experiences nbatch)
(let ((nr ($count experiences)))
(if (> nr nbatch)
(loop :repeat nbatch :collect ($ experiences (random nr)))
experiences)))
(defun generate-dataset (model experiences &optional (gamma 0.95D0))
(let* ((nr ($count experiences))
(state-list (mapcar #'$0 experiences))
(states (-> (apply #'$concat state-list)
($reshape! nr 4)))
(actions (-> (tensor (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1)))
(xs ($concat states actions 1))
(qleft ($evaluate model ($concat next-states (zeros nr 1) 1)))
(qright ($evaluate model ($concat next-states (ones nr 1) 1)))
(qns ($min ($concat qleft qright 1) 1))
(tqvs ($+ costs ($* gamma qns ($- 1 dones)))))
(list xs tqvs)))
(defun train (model xs ts)
(let* ((ys ($execute model xs))
(loss ($mse ys ts)))
($rmgd! model 0.003)
($data loss)))
(defvar *init-experience* nil)
(defvar *increment-experience* T)
(defvar *hint-to-goal* nil)
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 2000)
(defvar *sync-period* 15)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(setf *init-experience* nil
*hint-to-goal* nil)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun sync-models (target online)
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:do ($set! ($data pt) ($data po))))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun dqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(when *init-experience*
(let* ((exsi (collect-experiences train-env))
(exs (car exsi))
(ecost (cadr exsi)))
(setf experiences exs)
(incf total-cost ecost)))
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(when *increment-experience*
(let* ((exsi (collect-experiences train-env
(best-action-selector model-target eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain)))
(let* ((xys (generate-dataset model-target
(sample-experiences experiences *batch-size*)
0.95D0))
(xs (car xys))
(ys (cadr xys)))
(when *hint-to-goal*
(let ((gxys (generate-goal-patterns)))
(setf xs ($concat xs (car gxys) 0))
(setf ys ($concat ys (cadr gxys) 0))))
(let* ((loss (train model-online xs ys))
(eres (evaluate eval-env (best-action-selector model-online 0D0)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))
(when (zerop (rem epoch *sync-period*))
(sync-models model-target model-online))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defun ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(when *init-experience*
(let* ((exsi (collect-experiences train-env))
(exs (car exsi))
(ecost (cadr exsi)))
(setf experiences exs)
(incf total-cost ecost)))
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(when *increment-experience*
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain)))
(let* ((xys (generate-dataset model-target
(sample-experiences experiences *batch-size*)
0.95D0))
(xs (car xys))
(ys (cadr xys)))
(when *hint-to-goal*
(let ((gxys (generate-goal-patterns)))
(setf xs ($concat xs (car gxys) 0))
(setf ys ($concat ys (cadr gxys) 0))))
(let* ((loss (train model-online xs ys))
(eres (evaluate eval-env (best-action-selector model-online 0D0)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))
(when (zerop (rem epoch *sync-period*))
(sync-models model-target model-online))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defparameter *m* nil)
(let ((strategy #'ddqn)) (setf *m* (funcall strategy *m*)))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m* 0)))
| 8,924
|
Common Lisp
|
.lisp
| 198
| 31.479798
| 97
| 0.496039
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
cdf6783cc84ecb29bdc5dfe784ce9210ffb1aab061261b6964c553c6bf3475e6
| 3,253
|
[
-1
] |
3,254
|
cartpole-ddqn.lisp
|
chunsj_TH/examples/rl/cartpole-ddqn.lisp
|
(defpackage :cartpole-ddqn
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole))
(in-package :cartpole-ddqn)
(defun decay-schedule (v0 minv decay-ratio max-steps &key (log-start -2) (log-base 10))
(let* ((decay-steps (round (* max-steps decay-ratio)))
(rem-steps (- max-steps decay-steps))
(vs (-> ($/ (logspace log-start 0 decay-steps) (log log-base 10))
($list)
(reverse)
(tensor)))
(minvs ($min vs))
(maxvs ($max vs))
(rngv (- maxvs minvs))
(vs ($/ ($- vs minvs) rngv))
(vs ($+ minv ($* vs (- v0 minv)))))
($cat vs ($fill! (tensor rem-steps) ($last vs)))))
(defun model (&optional (ni 4) (no 2))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform)
(affine-layer h2 no :weight-initializer :random-uniform))))
(defun best-action-selector (model &optional (epsilon 0))
(lambda (state)
(if (> (random 1D0) epsilon)
(let* ((state ($reshape state 1 4))
(q ($evaluate model state)))
($scalar ($argmin q 1)))
(random 2))))
(defun sample-experiences (experiences nbatch)
(let ((nr ($count experiences)))
(if (> nr nbatch)
(loop :repeat nbatch :collect ($ experiences (random nr)))
experiences)))
(defun train-model (model-online model-target experiences &optional (gamma 0.95D0) (lr 0.003))
(let ((nr ($count experiences)))
(let ((states (-> (apply #'$concat (mapcar #'$0 experiences))
($reshape! nr 4)))
(actions (-> (tensor.long (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1))))
(let* ((argmins (-> ($evaluate model-online next-states)
($argmin 1)))
(qns (-> ($evaluate model-target next-states)
($gather 1 argmins)))
(xs states)
(ts ($+ costs ($* gamma qns ($- 1 dones))))
(ys (-> ($execute model-online xs)
($gather 1 actions)))
(loss ($mse ys ts)))
($rmgd! model-online lr)
($data loss)))))
(defvar *max-buffer-size* 4096)
(defvar *batch-size* 512)
(defvar *max-epochs* 2000)
(defvar *sync-period* 15)
(defvar *eps0* 1D0)
(defvar *min-eps* 0.1D0)
(defvar *eps-decay-ratio* 0.9D0)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(defun sync-models (target online)
($cg! (list target online))
(loop :for pt :in ($parameters target)
:for po :in ($parameters online)
:do ($set! ($data pt) ($data po))))
(defun generate-epsilons ()
(decay-schedule *eps0* *min-eps* *eps-decay-ratio* *max-epochs*))
(defun ddqn (&optional model)
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model-target (model))
(model-online (or model (model)))
(experiences '())
(total-cost 0)
(success nil)
(epsilons (generate-epsilons)))
(sync-models model-target model-online)
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:for eps = ($ epsilons (1- epoch))
:do (let ((ctrain 0)
(ntrain 0))
(let* ((exsi (collect-experiences train-env
(best-action-selector model-online eps)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (let ((ne ($count experiences)))
(if (> ne *max-buffer-size*)
(append (nthcdr (- ne *max-buffer-size*) experiences)
exs)
(append experiences exs))))
(incf total-cost ctrain))
(let* ((loss (train-model model-online model-target
(sample-experiences experiences *batch-size*)
0.95D0 0.008))
(eres (evaluate eval-env (best-action-selector model-online)))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success))
(when (zerop (rem epoch *sync-period*))
(sync-models model-target model-online))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))
model-online))
(defparameter *m* nil)
(setf *m* (ddqn *m*))
(let ((env (cartpole-env :eval)))
(evaluate env (best-action-selector *m*)))
| 5,439
|
Common Lisp
|
.lisp
| 124
| 32.096774
| 95
| 0.515563
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
3f7c788ef6d18677b4a1737f6cf36fc37718f87c3b22660e767cec57fa9cfcc5
| 3,254
|
[
-1
] |
3,255
|
cartpole-nfq.lisp
|
chunsj_TH/examples/rl/cartpole-nfq.lisp
|
;; from https://github.com/seungjaeryanlee/implementations-nfq.git
(defpackage :cartpole-nfq
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env))
(in-package :cartpole-nfq)
(defconstant +gravity+ 9.8D0)
(defconstant +masscart+ 1D0)
(defconstant +masspole+ 0.1D0)
(defconstant +total-mass+ (+ +masscart+ +masspole+))
(defconstant +length+ 0.5D0)
(defconstant +polemass-length+ (* +masspole+ +length+))
(defconstant +force-mag+ 10D0)
(defconstant +tau+ 0.02D0)
(defconstant +x-success-range+ 2.4D0)
(defconstant +theta-success-range+ (/ (* 12 PI) 180D0))
(defconstant +x-threshold+ 2.4D0)
(defconstant +theta-threshold-radians+ (/ PI 2))
(defconstant +c-trans+ 0.01D0)
(defconstant +train-max-steps+ 100)
(defconstant +eval-max-steps+ 3000)
(defclass cartpole-env ()
((mode :initform nil :accessor env/mode)
(step :initform 0 :accessor env/episode-step)
(state :initform nil :accessor env/state)))
(defun cartpole-env (&optional (m :train))
(let ((n (make-instance 'cartpole-env)))
(setf (env/mode n) m)
(env/reset! n)
n))
(defmethod env/reset! ((env cartpole-env))
(with-slots (mode state step) env
(setf step 0)
(setf state (if (eq mode :train)
(tensor (list (random/uniform -2.3D0 2.3D0)
0
(random/uniform -0.3 0.3)
0))
(tensor (list (random/uniform -1D0 1D0)
0
(random/uniform -0.3 0.3)
0))))
state))
(defmethod env/step! ((env cartpole-env) action)
(let* ((x ($0 (env/state env)))
(xd ($1 (env/state env)))
(th ($2 (env/state env)))
(thd ($3 (env/state env)))
(force (if (eq action 1) +force-mag+ (- +force-mag+)))
(costh (cos th))
(sinth (sin th))
(tmp (/ (+ force (* +polemass-length+ thd thd sinth))
+total-mass+))
(thacc (/ (- (* +gravity+ sinth) (* costh tmp))
(* +length+
(- 4/3 (/ (* +masspole+ costh costh) +total-mass+)))))
(xacc (- tmp (/ (* +polemass-length+ thacc costh) +total-mass+)))
(cost +c-trans+)
(done nil)
(blown nil))
(incf (env/episode-step env))
(incf x (* +tau+ xd))
(incf xd (* +tau+ xacc))
(incf th (* +tau+ thd))
(incf thd (* +tau+ thacc))
(cond ((or (< x (- +x-threshold+)) (> x +x-threshold+)
(< th (- +theta-threshold-radians+)) (> th +theta-threshold-radians+))
(setf cost 1D0
done T))
((and (> x (- +x-success-range+)) (< x +x-success-range+)
(> th (- +theta-success-range+)) (< th +theta-success-range+))
(setf cost 0D0
done nil))
(T (setf cost +c-trans+
done nil)))
(when (>= (env/episode-step env)
(if (eq :train (env/mode env)) +train-max-steps+ +eval-max-steps+))
(setf blown T))
(let ((next-state (tensor (list x xd th thd))))
(setf (env/state env) next-state)
(list nil next-state cost done blown))))
(defun generate-goal-patterns (&optional (size 100))
(list (tensor (loop :repeat size
:collect (list (random/uniform -0.05 0.05)
(random/normal 0 1)
(random/uniform (- +theta-success-range+)
+theta-success-range+)
(random/normal 0 1)
(random 2))))
(zeros size 1)))
(defun collect-experiences (env &optional selector)
(let ((rollout '())
(episode-cost 0)
(state (env/reset! env))
(done nil)
(blown nil))
(loop :while (and (not done) (not blown))
:for action = (if selector
(funcall selector state)
(random 2))
:for tx = (env/step! env action)
:do (let ((next-state ($1 tx))
(cost ($2 tx)))
(setf done ($3 tx)
blown ($4 tx))
(push (list state action cost next-state done) rollout)
(incf episode-cost cost)
(setf state next-state)))
(list (reverse rollout) episode-cost)))
(defun model (&optional (ni 5) (no 1))
(let ((h1 5)
(h2 5))
(sequential-layer
(affine-layer ni h1 :weight-initializer :random-uniform)
(affine-layer h1 h2 :weight-initializer :random-uniform)
(affine-layer h2 no :weight-initializer :random-uniform))))
(defun best-action-selector (model)
(lambda (state)
(let* ((state ($reshape state 1 4))
(qleft ($evaluate model ($concat state (zeros 1 1) 1)))
(qright ($evaluate model ($concat state (ones 1 1) 1))))
(if (>= ($ qleft 0 0) ($ qright 0 0)) 1 0))))
(defun generate-patterns (model experiences &optional (gamma 0.95D0))
(let* ((nr ($count experiences))
(state-list (mapcar #'$0 experiences))
(states (-> (apply #'$concat state-list)
($reshape! nr 4)))
(actions (-> (tensor (mapcar #'$1 experiences))
($reshape! nr 1)))
(costs (-> (tensor (mapcar #'$2 experiences))
($reshape! nr 1)))
(next-states (-> (apply #'$concat (mapcar #'$3 experiences))
($reshape! nr 4)))
(dones (-> (tensor (mapcar (lambda (e) (if ($4 e) 1 0)) experiences))
($reshape! nr 1)))
(xs ($concat states actions 1))
(qleft ($evaluate model ($concat next-states (zeros nr 1) 1)))
(qright ($evaluate model ($concat next-states (ones nr 1) 1)))
(qns ($min ($concat qleft qright 1) 1))
(tqvs ($+ costs ($* gamma qns ($- 1 dones)))))
(list xs tqvs)))
(defun train (model xs ts)
(let* ((ys ($execute model xs))
(loss ($mse ys ts)))
($rpgd! model)
($data loss)))
(defun evaluate (env model)
(let ((state (env/reset! env))
(ne 0)
(done nil)
(blown nil)
(ecost 0D0)
(selector (best-action-selector model)))
(loop :while (and (not done) (not blown))
:for step :from 0 :below +eval-max-steps+
:for action = (funcall selector state)
:for tx = (env/step! env action)
:do (let ((next-state ($1 tx))
(cost ($2 tx)))
(setf done ($3 tx)
blown ($4 tx))
(incf ecost cost)
(incf ne)
(setf state next-state)))
(list ne
(and (>= ne (- +eval-max-steps+ 2)) (<= (abs ($0 state)) +x-success-range+))
ecost)))
(defvar *init-experience* T)
(defvar *increment-experience* T)
(defvar *hint-to-goal* T)
(defvar *max-epochs* 300)
(defun report (epoch loss ntrain ctrain neval ceval success)
(when (or success (zerop (rem epoch 20)))
(let ((fmt "EPOCH ~4D | TRAIN ~3D / ~4,2F | EVAL ~4D / ~5,2F | TRAIN.LOSS ~,4F"))
(prn (format nil fmt epoch ntrain ctrain neval ceval loss)))))
(with-max-heap ()
(let* ((train-env (cartpole-env :train))
(eval-env (cartpole-env :eval))
(model (model))
(experiences '())
(total-cost 0)
(success nil))
(when *init-experience*
(let* ((exsi (collect-experiences train-env))
(exs (car exsi))
(ecost (cadr exsi)))
(setf experiences exs)
(incf total-cost ecost)))
(loop :for epoch :from 1 :to *max-epochs*
:while (not success)
:do (let ((ctrain 0)
(ntrain 0))
(when *increment-experience*
(let* ((exsi (collect-experiences train-env (best-action-selector model)))
(exs (car exsi)))
(setf ctrain (cadr exsi))
(setf ntrain ($count exs))
(setf experiences (append experiences exs))
(incf total-cost ctrain)))
(let* ((xys (generate-patterns model experiences 0.95D0))
(xs (car xys))
(ys (cadr xys)))
(when *hint-to-goal*
(let ((gxys (generate-goal-patterns)))
(setf xs ($concat xs (car gxys) 0))
(setf ys ($concat ys (cadr gxys) 0))))
(let* ((loss (train model xs ys))
(eres (evaluate eval-env model))
(neval ($0 eres))
(ceval ($2 eres)))
(setf success ($1 eres))
(report epoch loss ntrain ctrain neval ceval success)))))
(when success
(prn (format nil "*** TOTAL ~6D / ~4,2F" ($count experiences) total-cost)))))
| 8,988
|
Common Lisp
|
.lisp
| 216
| 30.189815
| 92
| 0.50697
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
dfd2f43f6177c56325908cefc3890c9fbb90af12c8f3d6d487705640b60bb775
| 3,255
|
[
-1
] |
3,256
|
pg-compare.lisp
|
chunsj_TH/examples/rl/pg-compare.lisp
|
(defpackage :policy-gradient-comparison
(:use #:common-lisp
#:mu
#:th
#:th.layers
#:th.env
#:th.env.cartpole)
(:import-from #:th.env.examples #:short-corridor-env))
(in-package :policy-gradient-comparison)
;; for comparison, we need same starting point
(defparameter *w0* (tensor '((0.002 0.007)
(-0.002 0.005)
(-0.005 0.003)
(0.004 -0.008))))
;; utility functions for manual gradient computation
(defun softmax-grad (sm)
(let ((s ($transpose sm)))
($- ($diagflat s) ($@ s ($transpose s)))))
(defun policy-grad (ps action state)
"computing d/dw of log P(action)"
(let* ((ds ($ (softmax-grad ps) action))
(dl ($/ ds ($ ps 0 action))) ;; note that we're differentiating log(P(action))
(dw ($@ ($transpose state) ($unsqueeze dl 0))))
dw))
(defun returns (rewards gamma &optional standardizep) (rewards rewards gamma standardizep))
;; our the simple policy model for testing
;;(defun policy (state w) ($softmax ($@ ($unsqueeze state 0) w)))
(defun policy (state w)
(let ((ss ($exp ($@ ($unsqueeze state 0) w))))
($/ ss ($sum ss))))
;; action selection
(defun select-action (state w &optional greedy)
(let* ((probs (policy state w))
(action (if greedy
($argmax (if ($parameterp probs) ($data probs) probs) 1)
($multinomial (if ($parameterp probs) ($data probs) probs) 1))))
(list ($scalar action) ($gather probs 1 action) probs)))
;; action selector for evaluation
(defun selector (w)
(lambda (state)
($scalar ($argmax (policy state (if ($parameterp w) ($data w) w)) 1))))
;; to store gradient values for comparison
(defparameter *manual-grads* nil)
(defparameter *manual-probs* nil)
(defparameter *manual-actions* nil)
(defparameter *manual-gts* nil)
(defparameter *backprop-grads* nil)
(defparameter *backprop-probs* nil)
(defparameter *backprop-actions* nil)
(defparameter *backprop-gts* nil)
;; REINFORCE implementations - one with manual backprop, the other auto backprop
(defun reinforce-simple (env w &optional (max-episodes 2000))
(let ((gamma 1)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for grads = '()
:for rewards = '()
:for score = 0
:for done = nil
:do (let ((rts nil))
(setf *manual-grads* nil
*manual-probs* nil
*manual-actions* nil)
(loop :while (not done)
:for (action prob probs) = (select-action state w T)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let ((grad (policy-grad probs action state)))
(push grad grads)
(push reward rewards)
(incf score reward)
(push probs *manual-probs*)
(push action *manual-actions*)
(setf state next-state
done terminalp)))
(setf rts (returns (reverse rewards) gamma T))
(setf *manual-gts* rts)
(loop :for grad :in (reverse grads)
:for gt :in rts
:for i :from 0
:for gm = (expt gamma i)
:for gv = ($* gm gt grad)
:do (progn
;; for comparison, store value without learning rate
(push gv *manual-grads*)
($add! w ($* lr gv))))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
(defun reinforce-bp (env w &optional (max-episodes 2000))
(let ((gamma 1)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for score = 0
:for done = nil
:do (let ((losses nil)
(rts nil))
(setf *backprop-grads* nil
*backprop-probs* nil
*backprop-actions* nil)
(loop :while (not done)
:for (action prob probs) = (select-action state w T)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let* ((logP ($log prob)))
(push logP logPs)
(push reward rewards)
(incf score reward)
(push ($data probs) *backprop-probs*)
(push action *backprop-actions*)
(setf state next-state
done terminalp)))
(setf rts (returns (reverse rewards) gamma T))
(setf *backprop-gts* rts)
(loop :for logP :in (reverse logPs)
:for gt :in rts
:for i :from 0
:for gm = (expt gamma i)
:for l = ($- ($* gm logP gt))
:do (push l losses))
(reduce #'$+ losses)
(loop :for f :in (th::$fns w)
:do (push (funcall f) *backprop-grads*))
(setf *backprop-grads* (reverse *backprop-grads*))
($gd! w lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
;; to check gradient values, first, compute a single episode
(defparameter *wm* ($clone *w0*))
(defparameter *wb* ($parameter ($clone *w0*)))
(reinforce-simple (cartpole-fixed-env 1000) *wm* 13)
(reinforce-bp (cartpole-fixed-env 1000) *wb* 13)
;; check gradient values - the difference should be almost zero
(eq ($count *backprop-grads*) ($count *manual-grads*))
(loop :for bg :in *backprop-grads*
:for mg :in *manual-grads*
:for d = ($+ bg mg) ;; bg and mg has counter sign
:summing ($scalar ($sum ($square d))))
(loop :for bg :in *backprop-grads*
:for mg :in *manual-grads*
:for i :from 0
:for d = ($scalar ($sum ($square ($+ bg mg)))) ;; bg and mg has counter sign
:when (> d 0.000001)
:collect (list i bg mg))
(loop :for bps :in *backprop-probs*
:for mps :in *manual-probs*
:for d = ($sum ($square ($- bps mps)))
:for i :from 0
:when (> d 0.000001)
:collect (list i bps mps))
(loop :for ba :in *backprop-actions*
:for ma :in *manual-actions*
:for i :from 0
:when (not (eq ba ma))
:collect (list i ba ma))
(loop :for bg :in *backprop-gts*
:for mg :in *manual-gts*
:for d = ($square (- bg mg))
:for i :from 0
:when (> d 0.000001)
:collect (list i bg mg))
;; compare trained results
(defparameter *wm* ($clone *w0*))
(defparameter *wb* ($parameter ($clone *w0*)))
;; with 100 iterations
(reinforce-simple (cartpole-fixed-env 300) *wm* 100)
(reinforce-bp (cartpole-fixed-env 300) *wb* 100)
;; compare weight values - this as well, should be almost zero
($scalar ($sum ($square ($- *wm* ($data *wb*)))))
;; with 2000 more iterations
(reinforce-simple (cartpole-fixed-env 300) *wm* 2000)
(reinforce-bp (cartpole-fixed-env 300) *wb* 2000)
;; compare weight values - this as well, should be almost zero
($scalar ($sum ($square ($- *wm* ($data *wb*)))))
;; compare without collecting gradient values but with greedy selection
(defun reinforce-simple (env w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for grads = '()
:for rewards = '()
:for score = 0
:for done = nil
:do (progn
(loop :while (not done)
:for (action prob probs) = (select-action state w T)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let ((grad (policy-grad probs action state)))
(push grad grads)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for grad :in (reverse grads)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:for gv = ($* gm gt grad)
:do ($set! w ($+ w ($* lr gv))))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "MG ~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
(defun reinforce-bp (env w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for score = 0
:for done = nil
:do (let ((losses nil))
(loop :while (not done)
:for (action prob probs) = (select-action state w T)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let* ((logP ($log prob)))
(push logP logPs)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for logP :in (reverse logPs)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:do (push ($- ($* gm logP gt)) losses))
($gd! w lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "AG ~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
;; compare trained results
(defparameter *wm* ($clone *w0*))
(defparameter *wb* ($parameter ($clone *w0*)))
;; with 2000 iterations
(reinforce-simple (cartpole-fixed-env 100) *wm* 2000)
(reinforce-bp (cartpole-fixed-env 100) *wb* 2000)
;; compare weight values - this as well, should be almost zero
($scalar ($sum ($square ($- *wm* ($data *wb*)))))
(evaluate (cartpole-fixed-env 200) (selector *wm*))
(evaluate (cartpole-fixed-env 200) (selector *wb*))
;; now train with probabilistic selection
(defun reinforce-simple (env w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for grads = '()
:for rewards = '()
:for score = 0
:for done = nil
:do (progn
(loop :while (not done)
:for (action prob probs) = (select-action state w)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let ((grad (policy-grad probs action state)))
(push grad grads)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for grad :in (reverse grads)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:for gv = ($* gm gt grad)
:do ($set! w ($+ w ($* lr gv))))
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "MG ~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
(defun reinforce-bp (env w &optional (max-episodes 2000))
(let ((gamma 0.99)
(lr 0.01)
(avg-score nil))
(loop :repeat max-episodes
:for e :from 1
:for state = (env/reset! env)
:for rewards = '()
:for logPs = '()
:for score = 0
:for done = nil
:do (let ((losses nil))
(loop :while (not done)
:for (action prob) = (select-action state w)
:for (_ next-state reward terminalp successp) = (env/step! env action)
:do (let* ((logP ($log prob)))
(push logP logPs)
(push reward rewards)
(incf score reward)
(setf state next-state
done terminalp)))
(loop :for logP :in (reverse logPs)
:for gt :in (returns (reverse rewards) gamma T)
:for i :from 0
:for gm = (expt gamma i)
:do (push ($- ($* gm logP gt)) losses))
($gd! w lr)
(if (null avg-score)
(setf avg-score score)
(setf avg-score (+ (* 0.9 avg-score) (* 0.1 score))))
(when (zerop (rem e 100))
(prn (format nil "AG ~5D: ~8,2F / ~8,2F" e score avg-score)))))
avg-score))
;; compare trained results
(defparameter *wm* ($clone *w0*))
(defparameter *wb* ($parameter ($clone *w0*)))
;; with 2000 iterations
(reinforce-simple (cartpole-fixed-env 200) *wm* 2000)
(reinforce-bp (cartpole-fixed-env 200) *wb* 2000)
;; compare weight values - this would be different but not much
($scalar ($sum ($square ($- *wm* ($data *wb*)))))
(evaluate (cartpole-fixed-env 1000) (selector *wm*))
(evaluate (cartpole-fixed-env 1000) (selector *wb*))
| 14,938
|
Common Lisp
|
.lisp
| 341
| 30.72434
| 92
| 0.493272
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
ee91feb4956c0126657a226c7399714ccbc675f51417056e20c2e4939ca6f05d
| 3,256
|
[
-1
] |
3,257
|
rl.lisp
|
chunsj_TH/examples/rl/rl.lisp
|
(defpackage :rl-simple
(:use #:common-lisp
#:mu
#:th))
;; XXX
;; NO, this code will not teach or show you how to trade!
(in-package :rl-simple)
(defparameter *prices* (->> (read-lines-from "./data/msft.txt")
(mapcar (lambda (s) (parse-float s)))
(tensor)))
(defclass decision-policy ()
((actions :accessor policy-actions)))
(defgeneric select-action (policy current-state step))
(defgeneric update-Q (policy state action reward next-state))
(defmethod select-action ((policy decision-policy) current-state step))
(defmethod update-Q ((policy decision-policy) state action reward next-state))
(defclass random-decision-policy (decision-policy) ())
(defun random-decision-policy (actions)
(let ((n (make-instance 'random-decision-policy)))
(setf (policy-actions n) actions)
n))
(defmethod select-action ((policy random-decision-policy) current-state step)
($ (policy-actions policy) (random ($count (policy-actions policy)))))
(defun run-simulation (policy initial-budget initial-num-stocks prices hist)
(let ((budget initial-budget)
(num-stocks initial-num-stocks)
(share-value 0)
(transitions (list)))
(loop :for i :from 0 :below (- ($count prices) hist 1)
:for current-state = ($cat ($ prices (list i hist)) (tensor (list budget num-stocks)))
:for current-portfolio = (+ budget (* num-stocks share-value))
:for action = (select-action policy current-state i)
:do (progn
(setf share-value ($ prices (+ i hist 1)))
(cond ((and (eq action :buy) (>= budget share-value))
(progn
(decf budget share-value)
(incf num-stocks)))
((and (eq action :shell) (> num-stocks 0))
(progn
(incf budget share-value)
(decf num-stocks)))
(t (setf action :hold)))
(let* ((new-portfolio (+ budget (* num-stocks share-value)))
(reward (- new-portfolio current-portfolio))
(next-state ($cat ($ prices (list (1+ i) hist))
(tensor (list budget num-stocks)))))
(push (list current-state action reward next-state) transitions)
(update-Q policy current-state action reward next-state))))
(+ budget (* num-stocks share-value))))
(defun run-simulations (policy budget num-stocks prices hist)
(let ((num-tries 5))
(loop :for i :from 0 :below num-tries
:for final-portfolio = (run-simulation policy budget num-stocks prices hist)
:collect (progn
(prn final-portfolio)
final-portfolio))))
(defparameter *actions* '(:buy :sell :hold))
(defparameter *policy* (random-decision-policy *actions*))
(defparameter *budget* 1000D0)
(defparameter *num-stocks* 0)
(defparameter *hist* 3)
(run-simulations *policy* *budget* *num-stocks* *prices* *hist*)
(defclass q-learning-decision-policy (decision-policy)
((epsilon :initform 0.9D0 :accessor q-learning-epsilon)
(gamma :initform 0.001D0 :accessor q-learning-gamma)
(w1 :accessor q-learning-w1)
(b1 :accessor q-learning-b1)
(w2 :accessor q-learning-w2)
(b2 :accessor q-learning-b2)
(q :accessor policy-q)))
(defun q-learning-decision-policy (actions input-dim)
(let ((n (make-instance 'q-learning-decision-policy))
(nh 20))
(setf (policy-actions n) actions)
(setf (q-learning-w1 n) ($parameter (rndn input-dim nh)))
(setf (q-learning-b1 n) ($parameter ($* 0.1 (ones nh))))
(setf (q-learning-w2 n) ($parameter (rndn nh ($count actions))))
(setf (q-learning-b2 n) ($parameter ($* 0.1 (ones ($count actions)))))
n))
(defun q-learning-parameters (policy)
(list (q-learning-w1 policy) (q-learning-b1 policy)
(q-learning-w2 policy) (q-learning-b2 policy)))
(defun reset-gradients (policy) ($cg! (q-learning-parameters policy)))
(defun compute-q-value (policy x)
(-> x
($affine (q-learning-w1 policy) (q-learning-b1 policy))
($relu)
($affine (q-learning-w2 policy) (q-learning-b2 policy))
($relu)))
(defun train-q-value (policy x y)
(let* ((q (compute-q-value policy x))
(d ($- y q)))
($@ d d))
($amgd! (q-learning-parameters policy)))
(defun q-value (policy x)
(let ((q (compute-q-value policy x)))
(reset-gradients policy)
($data q)))
(defun $argmax (tensor &optional (dimension 0))
(let ((maxc ($max tensor dimension))
(nd ($ndim tensor)))
(cond ((eq nd 1) ($ (cadr maxc) 0))
((eq nd 2) ($ (cadr maxc) 0 0)))))
(defmethod select-action ((policy q-learning-decision-policy) current-state step)
(let ((threshold (min (q-learning-epsilon policy) (/ step 1000D0))))
(if (< (random 1D0) threshold)
(let* ((action-q-value (q-value policy current-state))
(argmax ($argmax action-q-value)))
($ (policy-actions policy) argmax))
($ (policy-actions policy) (random ($count (policy-actions policy)))))))
(defmethod update-Q ((policy q-learning-decision-policy) state action reward next-state)
(let* ((q (q-value policy state))
(nq (q-value policy next-state))
(nargmax ($argmax nq))
(na (position action (policy-actions policy))))
(setf ($ q na)
(+ reward (* (q-learning-gamma policy) ($ nq nargmax))))
(train-q-value policy state q)))
(defun run-simulation (policy initial-budget initial-num-stocks prices hist)
(let ((budget initial-budget)
(num-stocks initial-num-stocks)
(share-value 0)
(transitions (list)))
(loop :for i :from 0 :below (- ($count prices) hist 1)
:for current-state = ($cat ($ prices (list i hist)) (tensor (list budget num-stocks)))
:for current-portfolio = (+ budget (* num-stocks share-value))
:for action = (select-action policy current-state i)
:do (progn
(setf share-value ($ prices (+ i hist 1)))
(cond ((and (eq action :buy) (>= budget share-value))
(progn
(decf budget share-value)
(incf num-stocks)))
((and (eq action :shell) (> num-stocks 0))
(progn
(incf budget share-value)
(decf num-stocks)))
(t (setf action :hold)))
(let* ((new-portfolio (+ budget (* num-stocks share-value)))
(reward (- new-portfolio current-portfolio))
(next-state ($cat ($ prices (list (1+ i) hist))
(tensor (list budget num-stocks)))))
(push (list current-state action reward next-state) transitions)
(update-Q policy current-state action reward next-state))))
(+ budget (* num-stocks share-value))))
(defparameter *actions* '(:buy :sell :hold))
(defparameter *hist* 3)
(defparameter *policy* (q-learning-decision-policy *actions* (+ *hist* 2)))
(defparameter *budget* 1000D0)
(defparameter *num-stocks* 0)
(run-simulations *policy* *budget* *num-stocks* *prices* *hist*)
(gcf)
;; you'll need mplot
(ql:quickload :mplot)
(mplot:plot-lines (->> (read-lines-from "./data/msft.txt")
(mapcar (lambda (s) (parse-float s)))))
| 7,530
|
Common Lisp
|
.lisp
| 157
| 38.605096
| 96
| 0.592512
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
59adb1c0a80f51458fe298cb401e099d646cdc4c7d0f96af609a5493101a1bf5
| 3,257
|
[
-1
] |
3,258
|
nalu.lisp
|
chunsj_TH/examples/nalu/nalu.lisp
|
;; from
;; https://github.com/grananqvist/NALU-tf/blob/master/nalu.py
;;
;; Neural Arithmetic Logic Units
;; Refer - https://arxiv.org/abs/1808.00508
(defpackage :nalu-work
(:use #:common-lisp
#:mu
#:th))
(in-package :nalu-work)
;; XXX
;; for this kind of problem, we may need more accurate tensor data than float
;; sometimes, the computation emits overflow error.
(defparameter *batch-size* 10)
(defparameter *shape* (list 2 1))
(defparameter *dataset* nil)
(defparameter *target* nil)
(defparameter *operation* #'*)
;; generate
(loop :for n :from 0 :below 100
:do (let ((args (tensor *batch-size* 2))
(vals (tensor *batch-size* 1)))
(loop :for i :from 0 :below 10
:for n1 = (random 7)
:for n2 = (random 7)
:for r = (funcall *operation* n1 n2)
:do (progn
(setf ($ args i 0) n1)
(setf ($ args i 1) n2)
(setf ($ vals i 0) r)))
(push args *dataset*)
(push vals *target*)))
(defparameter *nalu* (parameters))
(defparameter *w-hat* ($push *nalu* (vrnt *shape* 0 0.02)))
(defparameter *m-hat* ($push *nalu* (vrnt *shape* 0 0.02)))
(defparameter *g* ($push *nalu* (vrnt *shape* 0 0.02)))
(defparameter *epochs* 200)
($cg! *nalu*)
(time
(loop :for epoch :from 1 :to *epochs*
:for iter = 1
:do (loop :for x :in *dataset*
:for y :in *target*
:for w = ($* ($tanh *w-hat*) ($sigmoid *m-hat*))
:for m = ($exp ($@ ($log ($+ ($abs x) 1E-7)) w))
:for g = ($sigmoid ($@ x *g*))
:for a = ($@ x w)
:for y* = ($+ ($* g a) ($* ($- 1 g) m))
:for d = ($- y* y)
:for l = ($/ ($dot d d) *batch-size*)
:do (progn
($adgd! *nalu*)
(when (zerop (rem iter 100))
(prn "LOSS:" iter epoch ($data l))
(prn ($sum ($- ($round ($data y*)) ($round y)))))
(incf iter)))))
;; check training accuracy
(loop :for x :in *dataset*
:for y :in *target*
:for w = ($* ($tanh *w-hat*) ($sigmoid *m-hat*))
:for m = ($exp ($@ ($log ($+ ($abs x) 1E-7)) w))
:for g = ($sigmoid ($@ x *g*))
:for a = ($@ x w)
:for y* = ($+ ($* g a) ($* ($- 1 g) m))
:for d = ($- y* y)
:for l = ($/ ($dot d d) *batch-size*)
:do (progn
($cg! *nalu*)
(when (> ($sum ($- ($round ($data y*)) y)) 1E-4)
(prn "Y*" y*)
(prn "Y" y))))
;; check test accuracy - generate new data
(defparameter *dataset* nil)
(defparameter *target* nil)
(loop :for n :from 0 :below 10
:do (let ((args (tensor *batch-size* 2))
(vals (tensor *batch-size* 1)))
(loop :for i :from 0 :below 10
:for n1 = (random 12)
:for n2 = (random 12)
:for r = (funcall *operation* n1 n2)
:do (progn
(setf ($ args i 0) n1)
(setf ($ args i 1) n2)
(setf ($ vals i 0) r)))
(push args *dataset*)
(push vals *target*)))
;; okay, check with new data -
(loop :for x :in *dataset*
:for y :in *target*
:for w = ($* ($tanh *w-hat*) ($sigmoid *m-hat*))
:for m = ($exp ($@ ($log ($+ ($abs x) 1E-7)) w))
:for g = ($sigmoid ($@ x *g*))
:for a = ($@ x w)
:for y* = ($+ ($* g a) ($* ($- 1 g) m))
:for d = ($- y* y)
:for l = ($/ ($dot d d) *batch-size*)
:do (progn
($cg! *nalu*)
(when (> ($sum ($- ($round ($data y*)) y)) 1E-4)
(prn "**DIFFERENT**")
(prn "X" x)
(prn "Y*" y*)
(prn "Y" y))))
| 3,922
|
Common Lisp
|
.lisp
| 104
| 27.432692
| 77
| 0.437664
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
3209fbc5c8295d1f0b4faa081c9fe727897e1c30eb05277e6f2770bfed36d6bb
| 3,258
|
[
-1
] |
3,259
|
demo-genchars.lisp
|
chunsj_TH/examples/genchars/demo-genchars.lisp
|
;; following code to demonstrate the layer based rnn code
;; this network learns from simple single sentence and
;; generates characters based on it.
(defpackage :demo-genchars
(:use #:common-lisp
#:mu
#:th
#:th.text
#:th.layers
#:th.ex.data))
(in-package :demo-genchars)
;; text encoder
(defparameter *encoder*
(character-encoder "the quick brown fox jumps over the lazy dog. 12345678901,!?"))
;; train data
(defparameter *data* (list "the quick brown fox jumps over the lazy dog. "
"quick brown fox jumps over the lazy dog. the "
"brown fox jumps over the lazy dog. the quick "
"fox jumps over the lazy dog. the quick brown "
"jumps over the lazy dog. the quick brown fox "
"over the lazy dog. the quick brown fox jumps "
"the lazy dog. the quick brown fox jumps over "))
;; train target
(defparameter *target* (mapcar (lambda (s) (rotate-left-string 1 s)) *data*))
;; network parameters
(defparameter *hidden-size* 100)
;; string generation function
;; encoder-choose function takes relative probabilities, make them normalized
;; probabilities and use it to generate new characters.
;; to use seed string induced state, stateful parameter is turned on.
;; after processing, it should be turned off for fresh restarting.
(defun generate-string (rnn encoder seedstr n &optional (temperature 1D0))
($generate-sequence rnn encoder seedstr n temperature))
;; network
;; the output of the network is relative probabilities of each characters and
;; it should be normalized or something to be interpreted as probabilities.
;; generally, softmax is used for it.
(defparameter *rnn* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (rnn-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
;; reset network
($reset! *rnn*)
;; train network - this is in fact make the network overfit the data.
;; for testing purpose, overfitting is good one :-P
(time
(let* ((epochs 1000)
(print-step 50)
(xs (encoder-encode *encoder* *data*))
(ts (encoder-encode *encoder* *target*)))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn* xs))
(losses (mapcar (lambda (y c) ($cec y c)) outputs ts))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn*)))))
;; test trained network - high temperature means more "creativity".
;; if you increase temperature, then you'll know what it means.
(let ((seed-string "the")
(gen-length 100)
(temperature 1D0))
(prn (generate-string *rnn* *encoder* seed-string gen-length temperature)))
;; lstm test
(defparameter *rnn-lstm* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (lstm-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
($reset! *rnn-lstm*)
(time
(let* ((epochs 1000)
(print-step 50)
(xs (encoder-encode *encoder* *data*))
(ts (encoder-encode *encoder* *target*)))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn-lstm* xs))
(losses (mapcar (lambda (y c) ($cec y c)) outputs ts))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn-lstm*)))))
(let ((seed-string "the")
(gen-length 100)
(temperature 1D0))
($reset-state! *rnn-lstm* nil) ;; in case of being true of statefulp
(prn (generate-string *rnn-lstm* *encoder* seed-string gen-length temperature)))
;; gru test
(defparameter *rnn-gru* (let ((vsize (encoder-vocabulary-size *encoder*)))
(sequential-layer
(recurrent-layer (gru-cell vsize *hidden-size*))
(recurrent-layer (affine-cell *hidden-size* vsize :activation :nil)))))
($reset! *rnn-gru*)
(time
(let* ((epochs 1000)
(print-step 50)
(xs (encoder-encode *encoder* *data*))
(ts (encoder-encode *encoder* *target*)))
(loop :for iter :from 0 :below epochs
:do (let* ((outputs ($execute *rnn-gru* xs))
(losses (mapcar (lambda (y c) ($cec y c)) outputs ts))
(loss ($div (apply #'$+ losses) ($count losses))))
(when (zerop (rem iter print-step))
(prn iter ($data loss)))
($rmgd! *rnn-gru*)))))
(let ((seed-string "the")
(gen-length 100)
(temperature 1D0))
(prn (generate-string *rnn-gru* *encoder* seed-string gen-length temperature)))
| 5,102
|
Common Lisp
|
.lisp
| 108
| 38.148148
| 99
| 0.600201
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
b6d9dad48403bec90917895a5080cf479a0157dc371d0417c80c3b0b95d2bfee
| 3,259
|
[
-1
] |
3,260
|
eng-fra.lisp
|
chunsj_TH/examples/seq2seq/eng-fra.lisp
|
(defpackage :eng-fra
(:use #:common-lisp
#:mu
#:th
#:th.ex.data
#:th.layers
#:th.text))
(in-package :eng-fra)
(defparameter *fra-eng* (mapcar (lambda (pair) (reverse pair)) (eng-fra-small-processed)))
(defparameter *eng-prefixes* '("i am " "i m "
"he is " "he s "
"she is " "she s "
"you are " "you re "
"we are " "we re "
"they are " "they re "))
(defun starts-with (s prefixes)
(loop :for k :in prefixes
:when (and (>= ($count s) ($count k))
(string-equal (subseq s 0 ($count k)) k))
:do (return T)))
(defparameter *pairs* (->> *fra-eng*
(filter (lambda (pair)
(let ((fra ($0 pair))
(eng ($1 pair)))
(and (< ($count (split #\Space fra)) 10)
(< ($count (split #\Space eng)) 10)
(starts-with eng *eng-prefixes*)))))
(mapcar (lambda (pair)
(let ((fra ($0 pair))
(eng ($1 pair)))
(list (split #\Space fra) (split #\Space eng)))))
(alexandria:shuffle)))
(defparameter *fra-encoder* (word-encoder (append '("SOS" "EOS") (flatten (mapcar #'$0 *pairs*)))))
(defparameter *eng-encoder* (word-encoder (append '("SOS" "EOS") (flatten (mapcar #'$1 *pairs*)))))
(defparameter *input-max-length* (reduce #'max (mapcar (lambda (pair) ($count ($0 pair))) *pairs*)))
(defparameter *output-max-length* (reduce #'max (mapcar (lambda (pair) ($count ($1 pair))) *pairs*)))
(defun fill-eos (sentence max-length)
(let ((n ($count sentence)))
(if (< n max-length)
(append sentence (loop :repeat (- max-length n) :collect "EOS"))
sentence)))
(defun encode-sentences (encoder sentences max-length)
(encoder-encode encoder (mapcar (lambda (sentence) (fill-eos sentence max-length)) sentences)))
(defun build-batches (encoder max-length data n)
(loop :for tail :on data :by (lambda (l) (nthcdr n l))
:collect (encode-sentences encoder (subseq tail 0 (min ($count tail) n)) max-length)))
(defparameter *batch-size* 100)
(defparameter *hidden-size* 256)
(defparameter *xs-batches* (build-batches *fra-encoder* *input-max-length*
(mapcar #'$0 *pairs*)
*batch-size*))
(defparameter *ys-batches* (build-batches *eng-encoder* *output-max-length*
(mapcar #'$1 *pairs*)
*batch-size*))
(defparameter *train-xs-batches* (subseq *xs-batches* 0 50))
(defparameter *train-ys-batches* (subseq *ys-batches* 0 50))
(defparameter *test-xs-batches* (subseq *xs-batches* 50 60))
(defparameter *test-ys-batches* (subseq *ys-batches* 50 60))
(defparameter *overfit-xs-batches* (subseq (build-batches *fra-encoder* *input-max-length*
(mapcar #'$0 *pairs*)
10)
0 1))
(defparameter *overfit-ys-batches* (subseq (build-batches *eng-encoder* *output-max-length*
(mapcar #'$1 *pairs*)
10)
0 1))
(defclass seq2seq ()
((from-encoder :initform nil :accessor $seq2seq-from-encoder)
(to-encoder :initform nil :accessor $seq2seq-to-encoder)
(encoder-network :initform nil :accessor $seq2seq-encoder-network)
(decoder-network :initform nil :accessor $seq2seq-decoder-network)))
(defun seq2seq (from-encoder to-encoder nhidden)
(let ((n (make-instance 'seq2seq))
(from-vsize (encoder-vocabulary-size from-encoder))
(to-vsize (encoder-vocabulary-size to-encoder))
(wvecsz 32))
(with-slots (encoder-network decoder-network) n
(setf ($seq2seq-from-encoder n) from-encoder
($seq2seq-to-encoder n) to-encoder)
(setf encoder-network (sequential-layer
(recurrent-layer (affine-cell from-vsize wvecsz
:activation :nil
:biasp nil))
(recurrent-layer (lstm-cell wvecsz nhidden)))
decoder-network (sequential-layer
(recurrent-layer (affine-cell to-vsize wvecsz
:activation :nil
:biasp nil))
(recurrent-layer (lstm-cell wvecsz nhidden))
(recurrent-layer
(sequential-layer
(parallel-layer
(attention-cell)
(functional-layer
(lambda (q &key (trainp t))
(declare (ignore trainp))
q)))
(functional-layer
(lambda (c q &key (trainp t))
(declare (ignore trainp))
($cat q c 1)))))
(recurrent-layer (affine-cell (* 2 nhidden) to-vsize
:activation :nil)))))
n))
(defun $encoder-state (s2s)
(with-slots (encoder-network) s2s
($cell-state ($cell ($ encoder-network 1)))))
(defun $update-decoder-network-state! (s2s hs h0)
(with-slots (decoder-network) s2s
($update-cell-state! ($ decoder-network 1) h0)
($set-memory! ($ ($ ($cell ($ decoder-network 2)) 0) 0) (concat-sequence hs))))
(defun $execute-seq2seq (s2s xs ts)
(with-slots (encoder-network decoder-network) s2s
(let ((hs ($execute encoder-network xs))
(h0 ($encoder-state s2s)))
($update-decoder-network-state! s2s hs h0)
(with-keeping-state (decoder-network)
(let* ((batch-size ($size (car xs) 0))
(ys (append (list ($fill (tensor.long batch-size) 0))
(butlast ts))))
($execute decoder-network ys))))))
(defun $compute-loss (s2s xs ts)
(let* ((ys ($execute-seq2seq s2s xs ts))
(losses (mapcar (lambda (y c) ($cec y c)) ys ts)))
($div (apply #'$+ losses) ($count losses))))
(defun $generate-seq2seq (s2s hs h0 xs0 n)
(with-slots (encoder-network decoder-network to-encoder) s2s
(let ((sampled '())
(xts xs0)
(batch-size ($size (car xs0) 0)))
($update-decoder-network-state! s2s hs h0)
(with-keeping-state (decoder-network)
(loop :for i :from 0 :below n
:do (let* ((yts ($evaluate decoder-network xts))
(rts (encoder-choose to-encoder yts -1)))
(push rts sampled)
(setf xts (encoder-encode to-encoder rts)))))
(let ((res (reverse sampled))
(results (make-list batch-size)))
(loop :for r :in res
:do (loop :for v :in r
:for i :from 0
:do (push (car v) ($ results i))))
(mapcar #'reverse results)))))
(defun $evaluate-seq2seq (s2s xs &optional (n 9))
(with-slots (encoder-network decoder-network) s2s
(let ((hs ($evaluate encoder-network xs))
(h0 ($encoder-state s2s)))
($generate-seq2seq s2s hs h0
(list ($fill (tensor.long ($size (car xs) 0)) 0))
n))))
(defun list-equal (l1 l2)
(and (eq ($count l1) ($count l2))
(loop :for i :in l1
:for j :in l2
:always (string-equal i j))))
(defun $matches-score (s2s ts ys)
(let ((tss (encoder-decode ($seq2seq-to-encoder s2s) ts))
(yss ys))
(let ((matches (mapcar (lambda (tn yn) (if (list-equal tn yn) 0 1)) tss yss)))
(* 1D0 (/ (reduce #'+ matches) ($count matches))))))
(defun gd! (s2s fn lr)
(with-slots (encoder-network decoder-network) s2s
(funcall fn decoder-network lr)
(funcall fn encoder-network lr)))
(defun replace-eos (lists)
(loop :for list :in lists
:collect (loop :for e :in list
:collect (if (string-equal e "EOS")
""
e))))
(defun $train (s2s xss tss &key (epochs 10) (pstep 100) (gdfn #'$adgd!) (lr 1D0) (testp T))
(let ((sz ($count xss)))
(with-slots (to-encoder) s2s
(loop :for epoch :from 0 :below epochs
:do (loop :for xs :in xss
:for ts :in tss
:for idx :from 0
:for iter = (+ idx (* epoch sz))
:do (let ((loss ($compute-loss s2s xs ts)))
(gd! s2s gdfn lr)
(when (zerop (rem iter pstep))
(let* ((lv ($data loss))
(tidx (random ($count *test-xs-batches*)))
(txs (if testp
($ *test-xs-batches* tidx)
xs))
(tts (if testp
($ *test-ys-batches* tidx)
ts))
(ys ($evaluate-seq2seq s2s txs))
(score ($matches-score s2s tts ys)))
(prn iter lv score)
(prn "TS" (replace-eos (encoder-decode to-encoder tts)))
(prn "YS" (replace-eos ys))
(prn "==")))))))))
(defmethod $reset! ((s2s seq2seq))
(with-slots (encoder-network decoder-network) s2s
($reset! encoder-network)
($reset! decoder-network)))
(defparameter *s2s* (seq2seq *fra-encoder* *eng-encoder* 256))
($reset! *s2s*)
;; to check whether the code works
(time ($train *s2s* *overfit-xs-batches* *overfit-ys-batches* :epochs 300 :pstep 100 :testp nil))
;; real one
(time ($train *s2s* *train-xs-batches* *train-ys-batches* :epochs 500 :pstep 100))
;; testing, checking
(prn (->> (->> ($0 *overfit-xs-batches*)
(mapcar (lambda (s) ($narrow s 0 0 1))))
(funcall (lambda (s)
(prn (encoder-decode *fra-encoder* s))
s))
($evaluate-seq2seq *s2s*)))
(prn (encoder-decode *fra-encoder* ($0 *overfit-xs-batches*)))
(prn (encoder-decode *eng-encoder* ($0 *overfit-ys-batches*)))
(prn ($evaluate-seq2seq *s2s* ($0 *overfit-xs-batches*)))
(prn (->> '(("tu" "es" "la" "professeur" "." "EOS" "EOS" "EOS" "EOS"))
(encoder-encode *fra-encoder*)
($evaluate-seq2seq *s2s*)))
| 11,317
|
Common Lisp
|
.lisp
| 219
| 34.757991
| 101
| 0.478037
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
5e5f43e5f47be962a28419607590dd48a70797b0e615e27cb48f8489c0152af7
| 3,260
|
[
-1
] |
3,261
|
min.lisp
|
chunsj_TH/pp/min.lisp
|
(in-package :th.pp)
(defun nm/runfunc (counter function args)
(let ((args (if (listp args) args ($list args))))
(setf ($ counter 0) (1+ ($ counter 0)))
(apply function args)))
(defun nm/xts (x xts)
(tensor (loop :for i :from 0 :below ($count xts)
:for v = ($ x i)
:for vt = ($ xts i)
:collect (if (= vt 1) (round v) v))))
(defun nm/maxsimdiff (sim xts)
(let ((sim0 (nm/xts ($ sim 0) xts)))
(loop :for k :from 1 :below ($size sim 0)
:for simk = (nm/xts ($ sim k) xts)
:maximizing ($max ($abs ($sub simk sim0))))))
(defun nm/maxfsimdiff (fsim) ($max ($abs ($sub fsim ($ fsim 0)))))
(defun nm/convergedp (sim fsim xts xatol fatol)
(and (<= (nm/maxsimdiff sim xts) xatol) (<= (nm/maxfsimdiff fsim) fatol)))
(defun nm/order (tensor ind)
(let ((size ($size tensor)))
($gather tensor 0 ($expand ($transpose ind) size))))
(defun nm/init! (sim x0 xts zdelt nonzdelt)
(setf ($ sim 0) x0)
(loop :for k :from 0 :below ($count x0)
:for vt = ($ xts k)
:for y = (tensor x0)
:do (progn
(if (zerop ($ y k))
(if (= vt 1)
(setf ($ y k) 1)
(setf ($ y k) zdelt))
(if (= vt 1)
(setf ($ y k) (if (> (abs (round (* nonzdelt ($ y k)))) 0)
(round (* (+ 1 nonzdelt) ($ y k)))
(+ ($ y k) (* (signum ($ y k)) 1))))
(setf ($ y k) (* (+ 1 nonzdelt) ($ y k)))))
(setf ($ sim (1+ k)) y))))
(defmacro nm/reorder! (sim fsim)
`(let ((sr ($sort ,fsim))
(ind nil))
(setf ,fsim (car sr)
ind (cadr sr))
(setf ,sim (nm/order ,sim ind))))
(defun nm/2ndworst (sim) ($ sim (- ($size sim 0) 2)))
(defun nm/worst (sim) ($ sim (1- ($size sim 0))))
(defun (setf nm/worst) (nv sim) (setf ($ sim (1- ($size sim 0))) nv))
(defun nm/xbar (sim)
($squeeze ($mean ($subview sim 0 (1- ($size sim 0))) 0)))
(defun nm/xr (sim xts xbar rho)
(let ((ws (nm/worst sim)))
(let ((xr ($sub ($mul (1+ rho) xbar) ($mul rho ws))))
(loop :for i :from 0 :below ($count xts)
:for vt = ($ xts i)
:for mv = ($ xbar i)
:for rv = ($ xr i)
:for wv = ($ ws i)
:when (= vt 1)
:do (if (>= rv wv)
(if (= rv wv)
(setf ($ xr i) (round ($ xr i)))
(if (= (round rv) (round wv))
(setf ($ xr i) (1+ (round ($ xr i))))
(setf ($ xr i) (round ($ xr i)))))
(if (= (round rv) (round wv))
(setf ($ xr i) (1- (round ($ xr i))))
(setf ($ xr i) (round ($ xr i))))))
xr)))
(defun nm/xe (sim xts xbar rho chi)
(let ((ws (nm/worst sim)))
(let ((xe ($sub ($mul (1+ (* rho chi)) xbar) ($mul (* rho chi) ws))))
(loop :for i :from 0 :below ($count xts)
:for vt = ($ xts i)
:for mv = ($ xbar i)
:for ev = ($ xe i)
:for wv = ($ ws i)
:when (= vt 1)
:do (if (>= ev wv)
(if (= ev wv)
(setf ($ xe i) (round ($ xe i)))
(if (= (round ev) (round wv))
(setf ($ xe i) (1+ (round ($ xe i))))
(setf ($ xe i) (round ($ xe i)))))
(if (= (round ev) (round wv))
(setf ($ xe i) (1- (round ($ xe i))))
(setf ($ xe i) (round ($ xe i))))))
xe)))
(defun nm/xc (sim xts xbar rho psi)
(let ((ws (nm/worst sim)))
(let ((xc ($sub ($mul (1+ (* psi rho)) xbar) ($mul (* psi rho) ws))))
(loop :for i :from 0 :below ($count xts)
:for vt = ($ xts i)
:for mv = ($ xbar i)
:for cv = ($ xc i)
:for wv = ($ ws i)
:when (= vt 1)
:do (if (>= cv wv)
(if (= cv wv)
(setf ($ xc i) (round ($ xc i)))
(if (= (round cv) (round wv))
(setf ($ xc i) (1+ (round ($ xc i))))
(setf ($ xc i) (round ($ xc i)))))
(if (= (round cv) (round wv))
(setf ($ xc i) (1- (round ($ xc i))))
(setf ($ xc i) (round ($ xc i))))))
xc)))
(defun nm/xcc (sim xts xbar psi)
(let ((ws (nm/worst sim)))
(let ((xcc ($add ($mul (- 1 psi) xbar) ($mul psi ws))))
(loop :for i :from 0 :below ($count xts)
:for vt = ($ xts i)
:for mv = ($ xbar i)
:for cv = ($ xcc i)
:for wv = ($ ws i)
:when (= vt 1)
:do (if (>= cv wv)
(if (= cv wv)
(setf ($ xcc i) (round ($ xcc i)))
(if (= (round cv) (round wv))
(setf ($ xcc i) (1+ (round ($ xcc i))))
(setf ($ xcc i) (round ($ xcc i)))))
(if (= (round cv) (round wv))
(setf ($ xcc i) (1- (round ($ xcc i))))
(setf ($ xcc i) (round ($ xcc i))))))
xcc)))
(defun nm/shrink (xts simj sim0 sigma)
(let ((nsim ($add simj ($mul sigma ($sub simj sim0)))))
(loop :for i :from 0 :below ($count xts)
:for vt = ($ xts i)
:for nv = ($ nsim i)
:for ov = ($ simj i)
:when (= vt 1)
:do (if (>= nv ov)
(if (= nv ov)
(setf ($ nsim i) (round ($ nsim i)))
(if (= (round nv) (round ov))
(setf ($ nsim i) (1+ (round ($ nsim i))))
(setf ($ nsim i) (round ($ nsim i)))))
(if (= (round nv) (round ov))
(setf ($ nsim i) (1- (round ($ nsim i))))
(setf ($ nsim i) (round ($ nsim i))))))
nsim))
(defun nm/shrink! (sim xts fsim fcalls function sigma)
(let ((n (1- ($size sim 0))))
(loop :for j :from 1 :to n
:for sim0 = ($ sim 0)
:for simj = ($ sim j)
:do (progn
(setf ($ sim j) (nm/shrink xts simj sim0 sigma))
(setf ($ fsim j) (nm/runfunc fcalls function ($ sim j)))))))
(defun nelder-mead (function x0 &key adaptive (xatol 1E-4) (fatol 1E-4))
(let* ((n ($count x0))
(maxiter (* n 200))
(maxfun (* n 200))
(rho 1.0)
(chi (if adaptive (+ 1.0 (/ 2.0 n)) 2.0))
(psi (if adaptive (- 0.75 (/ 1.0 (* 2.0 n))) 0.5))
(sigma (if adaptive (- 1.0 (/ 1.0 n)) 0.5))
(nonzdelt 0.05)
(zdelt 0.00025)
(sim (tensor (1+ n) n))
(fsim (tensor (1+ n)))
(fcalls (tensor.long '(0)))
(keep T)
(wflag 0)
(xts (tensor.long (loop :for k :from 0 :below ($count x0)
:collect (if (integerp ($ x0 k)) 1 0)))))
(nm/init! sim x0 xts zdelt nonzdelt)
(loop :for k :from 0 :to ($count x0)
:do (setf ($ fsim k) (nm/runfunc fcalls function ($ sim k))))
(nm/reorder! sim fsim)
(loop :for iterations :from 1
:while (let ((fw (< ($ fcalls 0) maxfun))
(iw (< iterations maxiter)))
(cond ((not fw) (setf wflag 1))
((not iw) (setf wflag 2)))
(and keep fw iw))
:for xbar = (nm/xbar sim)
:for xr = (nm/xr sim xts xbar rho)
:for fxr = (nm/runfunc fcalls function xr)
:do (let ((doshrink nil))
(if (< fxr ($ fsim 0))
(let* ((xe (nm/xe sim xts xbar rho chi))
(fxe (nm/runfunc fcalls function xe)))
(if (< fxe fxr)
(setf (nm/worst sim) xe
(nm/worst fsim) fxe)
(setf (nm/worst sim) xr
(nm/worst fsim) fxr)))
(if (< fxr (nm/2ndworst fsim))
(setf (nm/worst sim) xr
(nm/worst fsim) fxr)
(if (< fxr (nm/worst fsim))
(let* ((xc (nm/xc sim xts xbar rho psi))
(fxc (nm/runfunc fcalls function xc)))
(if (<= fxc fxr)
(setf (nm/worst sim) xc
(nm/worst fsim) fxc)
(setf doshrink T)))
(let* ((xcc (nm/xcc sim xts xbar psi))
(fxcc (nm/runfunc fcalls function xcc)))
(if (< fxcc (nm/worst fsim))
(setf (nm/worst sim) xcc
(nm/worst fsim) fxcc)
(setf doshrink T))))))
(when doshrink (nm/shrink! sim xts fsim fcalls function sigma))
(nm/reorder! sim fsim)
(when (nm/convergedp sim fsim xts xatol fatol) (setf keep nil))))
(let ((x ($ sim 0))
(fval ($min fsim)))
(unless (zerop wflag)
(cond ((= 1 wflag) (prn "[WARN] MAX FUNCTION CALLS EXCEEDED"))
((= 2 wflag) (prn "[WARN] MAX ITERATIONS EXCEEDED"))))
(values x fval))))
| 9,626
|
Common Lisp
|
.lisp
| 213
| 29.920188
| 81
| 0.391258
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
a871de42e0f6ba2613d6e568e7c77f4d5212a0e5fc06557c948ebfae3e68c008
| 3,261
|
[
-1
] |
3,262
|
mh.lisp
|
chunsj_TH/pp/mh.lisp
|
(in-package :th.pp)
(defgeneric r/deviance (rv))
(defclass r/cvar (r/continuous)
((deviance :initform nil)))
(defclass r/dvar (r/discrete)
((deviance :initform nil)))
(defun r/cvar (value &optional deviance)
(let ((rv (make-instance 'r/cvar))
(dev deviance))
(with-slots (deviance) rv
(setf ($data rv) value)
(when dev (setf deviance dev)))
rv))
(defun r/dvar (value &optional deviance)
(let ((rv (make-instance 'r/dvar))
(dev deviance))
(with-slots (deviance) rv
(setf ($data rv) value)
(when dev (setf deviance dev)))
rv))
(defmethod r/deviance ((rv r/variable)))
(defmethod r/deviance ((rv r/cvar))
(with-slots (deviance) rv
deviance))
(defmethod r/deviance ((rv r/dvar))
(with-slots (deviance) rv
deviance))
(defgeneric proposal/scale! (proposal s))
(defgeneric proposal/rescale! (proposal))
(defgeneric proposal/accepted! (proposal acceptedp))
(defgeneric proposal/propose (proposal value))
(defgeneric r/proposal (rv &optional dev))
(defgeneric r/propose! (rv proposal))
(defgeneric r/accept! (rv proposal acceptedp))
(defclass r/proposal ()
((accepted :initform 0)
(rejected :initform 0)
(factor :initform 1.0)
(scale :initform 1.0)
(pvalue :initform nil :accessor $data)))
(defmethod proposal/scale! ((proposal r/proposal) s)
(with-slots (factor scale) proposal
(setf scale s)))
(defmethod proposal/rescale! ((proposal r/proposal))
(with-slots (accepted rejected factor) proposal
(let ((total (+ accepted rejected)))
(when (> total 0)
(let ((r (/ accepted total)))
(cond ((< r 0.001) (setf factor (* factor 0.1)))
((< r 0.05) (setf factor (* factor 0.5)))
((< r 0.2) (setf factor (* factor 0.9)))
((> r 0.95) (setf factor (* factor 10.0)))
((> r 0.75) (setf factor (* factor 2.0)))
((> r 0.5) (setf factor (* factor 1.1))))
(setf accepted 0
rejected 0))))))
(defmethod proposal/accepted! ((proposal r/proposal) acceptedp)
(with-slots (accepted rejected) proposal
(if acceptedp
(incf accepted)
(incf rejected))
proposal))
(defmethod r/propose! ((rv r/variable) (proposal r/proposal))
(let ((proposed (proposal/propose proposal ($data rv))))
(setf ($data proposal) ($data rv))
(setf ($data rv) (car proposed))
(cdr proposed)))
(defmethod r/propose! ((rvs list) (proposals list))
(loop :for proposal :in proposals
:for candidate :in rvs
:for lhr = (r/propose! candidate proposal)
:summing lhr))
(defmethod r/accept! ((rv r/variable) (proposal r/proposal) acceptedp)
(proposal/accepted! proposal acceptedp)
(unless acceptedp
(setf ($data rv) ($data proposal)
($data proposal) nil))
rv)
(defclass proposal/gaussian (r/proposal) ())
(defun proposal/gaussian (&optional (scale 1.0))
(let ((n (make-instance 'proposal/gaussian))
(s scale))
(with-slots (scale) n
(setf scale s))
n))
(defmethod proposal/propose ((proposal proposal/gaussian) value)
(with-slots (scale factor) proposal
(cons (sample/gaussian value (max 1E-7 (* factor scale))) 0.0)))
(defclass proposal/discrete (r/proposal) ())
(defun proposal/discrete (&optional (scale 2))
(let ((n (make-instance 'proposal/discrete))
(s scale))
(with-slots (scale) n
(setf scale s))
n))
(defun rwalk (w)
(let ((w (max 2 (round (abs w)))))
(sample/discrete-uniform (- w) w)))
(defmethod proposal/propose ((proposal proposal/discrete) value)
(with-slots (scale factor) proposal
(cons (+ value (rwalk (* scale factor))) 0.0)))
(defclass proposal/discrete-gaussian (r/proposal) ())
(defun proposal/discrete-gaussian (&optional (scale 2.0))
(let ((n (make-instance 'proposal/discrete-gaussian))
(s scale))
(with-slots (scale) n
(setf scale s))
n))
(defmethod proposal/propose ((proposal proposal/discrete-gaussian) value)
(with-slots (scale factor) proposal
(cons (round (sample/gaussian value (max 1E-7 (* factor scale)))) 0.0)))
(defclass proposal/poisson (r/proposal) ())
(defun proposal/poisson (&optional (scale 1.0))
(let ((n (make-instance 'proposal/poisson))
(s scale))
(with-slots (scale) n
(setf scale s))
n))
(defmethod proposal/propose ((proposal proposal/poisson) value)
(with-slots (scale factor) proposal
(cons (+ value (* (sample/poisson (max 1E-7 (* factor scale)))
(if (= 1 (random 2)) -1 1)))
0.0)))
(defmethod r/proposal ((rv r/discrete) &optional dev)
(let ((p (proposal/poisson)))
(unless (zerop ($data rv))
(with-slots (scale) p
(if dev
(setf scale dev)
(if (r/deviance rv)
(setf scale (if (zerop (r/deviance rv)) 1 (r/deviance rv)))
(let ((absv ($abs ($data rv))))
(setf scale (* scale (if (zerop absv) 1 absv))))))))
p))
(defmethod r/proposal ((rv r/continuous) &optional dev)
(let ((p (proposal/gaussian)))
(unless (zerop ($data rv))
(with-slots (scale) p
(if dev
(setf scale dev)
(if (r/deviance rv)
(setf scale (if (zerop (r/deviance rv)) 1 (r/deviance rv)))
(let ((absv ($abs ($data rv))))
(setf scale (* scale (if (zerop absv) 1 absv))))))))
p))
(defclass rstat ()
((n :initform 0.0)
(pm :initform 0.0)
(nm :initform 0.0)
(ps :initform 0.0)
(ns :initform 0.0)))
(defun rstat () (make-instance 'rstat))
(defun rstat/push! (rstat x)
(with-slots (n pm nm ps ns) rstat
(incf n)
(if (= n 1)
(setf pm x
nm x
ps 0.0)
(setf nm (+ pm (/ (- x pm) n))
ns (+ ps (* (- x pm) (- x nm)))))
(setf pm nm
ps ns)))
(defun rstat/mean (rstat)
(with-slots (n nm) rstat
(if (> n 0)
nm
0.0)))
(defun rstat/variance (rstat)
(with-slots (n ns) rstat
(if (> n 1)
(if (> n 2)
(/ ns (1- n))
(/ ns n))
0.0)))
(defun mh/accepted (prob nprob log-hastings-ratio)
(when (and prob nprob log-hastings-ratio)
(let ((alpha (+ (- nprob prob) log-hastings-ratio)))
(> alpha (log (+ (random 1.0) 1E-7))))))
(defun em-proposals (parameters)
(->> parameters
(mapcar (lambda (p)
(if (r/deviance p)
(if (r/continuousp p)
(r/proposal p)
(proposal/poisson (r/deviance p)))
(if (r/continuousp p)
(r/proposal p (if (zerop ($data p))
1.0
(round ($abs ($data p)))))
(proposal/poisson (if (zerop ($data p))
1
(round ($abs ($data p)))))))))))
(defun mcmc/mh-em (parameters posterior-function
&key (iterations 40000) (burn-in 10000) tune-steps)
(labels ((posterior (vs) (apply posterior-function vs))
(vals (parameters) (mapcar #'$data parameters)))
(let ((prob (posterior (vals parameters)))
(tune-steps (or tune-steps 1000)))
(when prob
(let ((proposals (em-proposals parameters))
(traces (r/traces (mapcar #'$clone (mapcar #'$data parameters))
:n iterations :burn-in burn-in))
(candidates (mapcar #'$clone parameters))
(nsize (+ iterations burn-in))
(bstep (round (/ burn-in 10)))
(pstep (round (/ iterations 10)))
(maxprob prob)
(naccepted 0))
(prn (format nil "[MH/EM: BURNING"))
(loop :repeat nsize
:for iter :from 1
:for burning = (<= iter burn-in)
:for tuneable = (zerop (rem iter tune-steps))
:do (progn
(when (and burning (zerop (rem iter bstep)))
(prns "."))
(when (= burn-in iter)
(prns (format nil " DONE. SAMPLING")))
(when (and (not burning) (zerop (rem (- iter burn-in) pstep)))
(prns "."))
(loop :for proposal :in proposals
:for candidate :in candidates
:for trace :in traces
:for lhr = (r/propose! candidate proposal)
:for nprob = (posterior (vals candidates))
:do (let ((accepted (mh/accepted prob nprob lhr)))
(setf ($ (trace/proposals trace) (1- iter))
($clone ($data candidate)))
(r/accept! candidate proposal accepted)
(setf ($ trace (1- iter)) ($clone ($data candidate)))
(trace/accepted! trace accepted)
(when accepted
(incf naccepted)
(setf prob nprob)
(when (> prob maxprob)
(setf maxprob prob)
(loop :for tr :in traces
:for c :in candidates
:do (setf ($data tr) ($clone ($data c))))))
(when tuneable
(proposal/rescale! proposal))))))
(if (zerop naccepted)
(prns (format nil " FAILED]~%"))
(prns (format nil " DONE]~%")))
traces)))))
(defun mcmc/mh-ae (parameters posterior-function
&key (iterations 40000) (burn-in 10000) tune-steps)
(labels ((posterior (vs) (apply posterior-function vs))
(vals (parameters) (mapcar #'$data parameters)))
(let ((prob (posterior (vals parameters)))
(tune-steps (or tune-steps 1000)))
(when prob
(let ((proposals (em-proposals parameters))
(traces (r/traces (mapcar #'$clone (mapcar #'$data parameters))
:n iterations :burn-in burn-in))
(candidates (mapcar #'$clone parameters))
(nsize (+ iterations burn-in))
(bstep (round (/ burn-in 10)))
(pstep (round (/ iterations 10)))
(maxprob prob)
(naccepted 0))
(prn (format nil "[MH/AE: BURNING"))
(loop :repeat nsize
:for iter :from 1
:for burning = (<= iter burn-in)
:for tuneable = (zerop (rem iter tune-steps))
:do (progn
(when (and burning (zerop (rem iter bstep)))
(prns "."))
(when (= burn-in iter)
(prns (format nil " DONE. SAMPLING")))
(when (and (not burning) (zerop (rem (- iter burn-in) pstep)))
(prns "."))
(let ((lhr (loop :for proposal :in proposals
:for candidate :in candidates
:for lhr = (r/propose! candidate proposal)
:summing lhr)))
(let ((nprob (posterior (vals candidates))))
(let ((accepted (mh/accepted prob nprob lhr)))
(loop :for candidate :in candidates
:for proposal :in proposals
:for trace :in traces
:do (progn
(setf ($ (trace/proposals trace) (1- iter))
($clone ($data candidate)))
(r/accept! candidate proposal accepted)
(setf ($ trace (1- iter)) ($clone ($data candidate)))
(trace/accepted! trace accepted)))
(when accepted
(incf naccepted)
(setf prob nprob)
(when (> prob maxprob)
(setf maxprob prob)
(loop :for tr :in traces
:for c :in candidates
:do (setf ($data tr) ($clone ($data c)))))))
(when tuneable
(loop :for proposal :in proposals
:do (proposal/rescale! proposal)))))))
(if (zerop naccepted)
(prns (format nil " FAILED]~%"))
(prns (format nil " DONE]~%")))
traces)))))
(defun sc-proposals (parameters)
(->> parameters
(mapcar (lambda (p)
(if (r/deviance p)
(if (r/continuousp p)
(r/proposal p)
(proposal/discrete-gaussian (r/deviance p)))
(if (r/continuousp p)
(r/proposal p (if (zerop ($data p))
1.0
(round ($abs ($data p)))))
(proposal/discrete-gaussian (if (zerop ($data p))
1
(round ($abs ($data p)))))))))))
(defun mcmc/mh-sc (parameters posterior-function
&key (iterations 40000) (burn-in 10000) tune-steps)
(labels ((posterior (vs) (apply posterior-function vs))
(vals (parameters) (mapcar #'$data parameters)))
(let ((prob (posterior (vals parameters)))
(tune-steps (or tune-steps 1000)))
(when prob
(let ((proposals (sc-proposals parameters))
(traces (r/traces (mapcar #'$clone (mapcar #'$data parameters))
:n iterations :burn-in burn-in))
(candidates (mapcar #'$clone parameters))
(rstats (loop :for p :in parameters :collect (rstat)))
(nsize (+ iterations burn-in))
(bstep (round (/ burn-in 10)))
(pstep (round (/ iterations 10)))
(maxprob prob)
(naccepted 0)
(cf (* 2.4 2.4)))
(prn (format nil "[MH/SC: BURNING"))
(loop :repeat nsize
:for iter :from 1
:for burning = (<= iter burn-in)
:for adaptable = (>= iter 11)
:for tuneable = (zerop (rem iter tune-steps))
:do (progn
(when (and burning (zerop (rem iter bstep)))
(prns "."))
(when (and (not burning) (zerop (rem (- iter burn-in) pstep)))
(prns "."))
(when (= burn-in iter)
(prns (format nil " DONE. SAMPLING")))
(loop :for proposal :in proposals
:for candidate :in candidates
:for trace :in traces
:for rs :in rstats
:for lhr = (r/propose! candidate proposal)
:for nprob = (posterior (vals candidates))
:do (let ((accepted (mh/accepted prob nprob lhr)))
(setf ($ (trace/proposals trace) (1- iter))
($clone ($data candidate)))
(r/accept! candidate proposal accepted)
(setf ($ trace (1- iter)) ($clone ($data candidate)))
(trace/accepted! trace accepted)
(rstat/push! rs ($data candidate))
(when adaptable
(let ((g (* cf (+ (rstat/variance rs) 0.01))))
(proposal/scale! proposal (sqrt g))))
(when tuneable
(proposal/rescale! proposal))
(when accepted
(incf naccepted)
(setf prob nprob)
(when (> prob maxprob)
(setf maxprob prob)
(loop :for tr :in traces
:for c :in candidates
:do (setf ($data tr) ($clone ($data c))))))))))
(if (zerop naccepted)
(prns (format nil " FAILED]~%"))
(prns (format nil " DONE]~%")))
traces)))))
(defun am-proposals (parameters)
(->> parameters
(mapcar (lambda (p)
(if (r/deviance p)
(if (r/continuousp p)
(r/proposal p)
(proposal/discrete-gaussian (r/deviance p)))
(if (r/continuousp p)
(r/proposal p (if (zerop ($data p))
1.0
(round ($abs ($data p)))))
(proposal/discrete-gaussian (if (zerop ($data p))
1
(round ($abs ($data p)))))))))))
(defun mcmc/mh-am (parameters posterior-function
&key (iterations 40000) (burn-in 10000) tune-steps)
(labels ((posterior (vs) (apply posterior-function vs))
(vals (parameters) (mapcar #'$data parameters)))
(let ((prob (posterior (vals parameters)))
(tune-steps (or tune-steps 1000))
(cf (/ (* 2.38 2.38) ($count parameters)))
(bd (/ (* 0.1 0.1) ($count parameters))))
(when prob
(let ((proposals (am-proposals parameters))
(traces (r/traces (mapcar #'$clone (mapcar #'$data parameters))
:n iterations :burn-in burn-in))
(candidates (mapcar #'$clone parameters))
(rstats (loop :for p :in parameters :collect (rstat)))
(nsize (+ iterations burn-in))
(bstep (round (/ burn-in 10)))
(pstep (round (/ iterations 10)))
(maxprob prob)
(naccepted 0))
(prn (format nil "[MH/AM: BURNING"))
(loop :repeat nsize
:for iter :from 1
:for burning = (<= iter burn-in)
:for adaptable = (>= iter 11)
:for tuneable = (zerop (rem iter tune-steps))
:do (progn
(when (and burning (zerop (rem iter bstep)))
(prns "."))
(when (and (not burning) (zerop (rem (- iter burn-in) pstep)))
(prns "."))
(when (= burn-in iter)
(prns (format nil " DONE. SAMPLING")))
(let* ((lhr (r/propose! candidates proposals))
(nprob (posterior (vals candidates)))
(accepted (mh/accepted prob nprob lhr)))
(loop :for proposal :in proposals
:for candidate :in candidates
:for trace :in traces
:for rs :in rstats
:do (progn
(setf ($ (trace/proposals trace) (1- iter))
($clone ($data candidate)))
(setf ($ (trace/psds trace) (1- iter))
(with-slots (factor scale) proposal
(* factor scale)))
(r/accept! candidate proposal accepted)
(setf ($ trace (1- iter)) ($clone ($data candidate)))
(trace/accepted! trace accepted)
(rstat/push! rs ($data candidate))
(when (r/continuousp candidate)
(when adaptable
(let ((g (+ (* cf (rstat/variance rs)) bd)))
(proposal/scale! proposal (sqrt g)))))
(when (r/discretep candidate)
(when tuneable
(proposal/rescale! proposal)))))
(when accepted
(incf naccepted)
(setf prob nprob)
(when (> prob maxprob)
(setf maxprob prob)
(loop :for tr :in traces
:for c :in candidates
:do (setf ($data tr) ($clone ($data c)))))))))
(if (zerop naccepted)
(prns (format nil " FAILED]~%"))
(prns (format nil " DONE]~%")))
traces)))))
(defun wrap-parameters (parameters)
(->> parameters
(mapcar (lambda (p)
(cond ((r/variablep p) p)
((integerp p) (r/dvar p))
((floatp p) (r/cvar p))
((listp p) (cond ((integerp (car p)) (r/dvar (car p) (cadr p)))
((floatp (car p)) (r/cvar (car p) (cadr p)))
(T (r/cvar (car p) (cadr p)))))
(T (r/cvar p)))))))
(defun mcmc/mh (parameters posterior-function
&key (iterations 40000) (burn-in 10000) (tune-steps 1000) (type :am))
(let ((parameters (wrap-parameters parameters)))
(cond ((eq type :em) (mcmc/mh-em parameters posterior-function
:iterations iterations :burn-in burn-in
:tune-steps tune-steps))
((eq type :ae) (mcmc/mh-ae parameters posterior-function
:iterations iterations :burn-in burn-in
:tune-steps tune-steps))
((eq type :sc) (mcmc/mh-sc parameters posterior-function
:iterations iterations :burn-in burn-in
:tune-steps tune-steps))
((eq type :am) (mcmc/mh-am parameters posterior-function
:iterations iterations :burn-in burn-in
:tune-steps tune-steps)))))
| 23,165
|
Common Lisp
|
.lisp
| 486
| 30.220165
| 93
| 0.453429
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
fec54f6a583a6249e12635c84f84845a00a3a21c03cfca18d07897ff19da8d94
| 3,262
|
[
-1
] |
3,263
|
trace.lisp
|
chunsj_TH/pp/trace.lisp
|
(in-package :th.pp)
(defclass r/trace (r/variable)
((collection :initform nil :reader trace/collection)
(proposals :initform nil :accessor trace/proposals)
(psds :initform nil :accessor trace/psds)
(burn-ins :initform 0)
(vals :initform nil :accessor trace/values)
(mean :initform nil)
(variance :initform nil)
(naccepted :initform 0)
(nrejected :initform 0)))
(defun r/trace (v &key (n 1) (burn-in 0))
;; XXX use the shape of v.
(let ((tr (make-instance 'r/trace))
(nb burn-in))
(with-slots (value collection proposals psds burn-ins vals) tr
(setf collection (zeros (+ n nb))
proposals (zeros (+ n nb))
psds (zeros (+ n nb))
value v
burn-ins nb
vals (zeros n)))
tr))
(defun r/traces (vs &key (n 1) (burn-in 0))
(loop :for v :in vs
:collect (r/trace v :n n :burn-in burn-in)))
(defun trace/thin (trace thin)
(let* ((vs (trace/values trace))
(n0 ($count vs))
(nt (make-instance 'r/trace)))
(with-slots (value vals) nt
(setf value ($clone ($data trace))
vals (tensor (loop :for i :from 0 :below n0 :by thin :collect ($clone ($ vs i)))))
nt)))
(defmethod $count ((trace r/trace))
(with-slots (collection) trace
($count collection)))
(defmethod $ ((trace r/trace) index &rest others-and-default)
(declare (ignore others-and-default))
(with-slots (collection) trace
($ collection index)))
(defmethod (setf $) (value (trace r/trace) index &rest others)
(declare (ignore others))
(with-slots (collection burn-ins vals) trace
(setf ($ collection index) value)
(when (>= index burn-ins)
(let ((i (- index burn-ins)))
(setf ($ vals i) value)))
value))
(defun trace/accepted! (trace acceptedp)
(with-slots (naccepted nrejected) trace
(if acceptedp
(incf naccepted)
(incf nrejected))))
(defun trace/rejected! (trace acceptedp)
(trace/rejected! trace acceptedp))
(defun trace/act (trace)
(with-slots (naccepted nrejected) trace
(if (zerop (+ naccepted nrejected))
0
(round (/ (* 100 naccepted) (+ naccepted nrejected))))))
(defun trace/mean (trace)
(with-slots (mean) trace
(unless mean
(setf mean ($mean (trace/values trace))))
mean))
(defun trace/variance (trace)
(with-slots (variance) trace
(unless variance
(setf variance ($var (trace/values trace))))
variance))
(defun trace/sd (trace)
(with-slots (variance) trace
(unless variance
(setf variance ($var (trace/values trace))))
($sqrt variance)))
(defun trace/error (trace)
(let ((n ($count (trace/values trace)))
(sd (trace/sd trace)))
(when (>= n 1) (/ sd (sqrt n)))))
(defun trace/acr (trace &key (maxlag 100))
(when (> (trace/sd trace) 1E-7)
(let ((vals (trace/values trace)))
(loop :for k :from 0 :to (min maxlag (1- ($count vals)))
:collect ($acr vals k)))))
(defun trace/quantiles (trace)
(let* ((trcvs (trace/values trace))
(n ($count trcvs))
(qlist '(2.5 25 50 75 97.5)))
(when (> n 10)
(let ((vs (car ($sort trcvs))))
(loop :for q :in qlist
:for ridx = (round (* n (/ q 100)))
:collect (let ((i ridx))
(when (< i 0) (setf i 0))
(when (> i (1- n)) n)
(cons q ($ vs i))))))))
(defun trace/hpd (trace &optional (alpha 0.05))
(labels ((min-interval (vs alpha)
(let* ((mn nil)
(mx nil)
(n ($count vs))
(start 0)
(end (round (* n (- 1 alpha))))
(min-width most-positive-single-float))
(loop :while (< end n)
:for hi = ($ vs end)
:for lo = ($ vs start)
:for width = (- hi lo)
:do (progn
(when (< width min-width)
(setf min-width width
mn lo
mx hi))
(incf start)
(incf end)))
(cons mn mx))))
(let ((vs (car ($sort (trace/values trace)))))
(when (> ($count vs) 10)
(min-interval vs alpha)))))
(defun trace/geweke (trace &key (first 0.1) (last 0.5) (intervals 20))
(when (and (< (+ first last)) (> (trace/sd trace) 1E-7))
(labels ((interval-zscores (vs a b &optional (intervals 20))
(let* ((end (1- ($count vs)))
(hend (/ end 2))
(sindices (loop :for i :from 0 :below (round hend)
:by (round (/ hend intervals))
:collect i)))
(loop :for start :in sindices
:for asize = (round (* a (- end start)))
:for slice-a = ($subview vs start asize)
:for bstart = (round (- end (* b (- end start))))
:for slice-b = ($subview vs bstart (- ($count vs) bstart))
:for zn = (- ($mean slice-a) ($mean slice-b))
:for zd = (+ ($square ($sd slice-a)) ($square ($sd slice-b)))
:collect (cons start (/ zn zd))))))
(let ((vs (trace/values trace)))
(when (> ($count vs) intervals)
(interval-zscores vs first last intervals))))))
(defun trace/summary (trace)
(let ((quantiles (trace/quantiles trace))
(n ($count (trace/values trace)))
(sd (trace/sd trace))
(m (trace/mean trace))
(err (trace/error trace))
(hpd (trace/hpd trace 0.05))
(acr (trace/acr trace))
(gvs (mapcar #'cdr (trace/geweke trace))))
(list :count n
:mean m
:sd sd
:error err
:hpd-95 hpd
:quantiles quantiles
:acmean (when acr ($mean (subseq acr 1)))
:gwkrng (when gvs (cons (apply #'min gvs) (apply #'max gvs))))))
(defun traces/sample (traces &key (n 1) transform)
(let ((trcs (loop :for trace :in traces :collect (trace/values trace))))
(loop :repeat n
:for parameters = (loop :for trc :in trcs
:for ntrc = ($count trc)
:for idx = (random ntrc)
:collect ($ trc idx))
:collect (apply (or transform #'identity) parameters))))
| 6,547
|
Common Lisp
|
.lisp
| 164
| 29.54878
| 94
| 0.517209
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
ec89f45e5752a538286d4b362d6c2ad2284605d270016b5aa6c64b49ed0d23db
| 3,263
|
[
-1
] |
3,264
|
pp.lisp
|
chunsj_TH/pp/pp.lisp
|
(defpackage :th.pp
(:use #:common-lisp
#:mu
#:th)
(:export #:mcmc/mh
#:trace/thin
#:trace/act
#:trace/values
#:trace/mean
#:trace/sd
#:trace/quantiles
#:trace/error
#:trace/hpd
#:trace/acr
#:trace/geweke
#:trace/summary
#:traces/sample
#:map/fit))
| 411
|
Common Lisp
|
.lisp
| 18
| 12.777778
| 28
| 0.427481
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
8f49c0f2b6bd0439cab9fe3fb77f6b2e70fae0da7ff026ee392d1f0981f29dd5
| 3,264
|
[
-1
] |
3,265
|
rv.lisp
|
chunsj_TH/pp/rv.lisp
|
(in-package :th.pp)
(defgeneric r/variablep (rv))
(defgeneric r/continuousp (rv))
(defgeneric r/discretep (rv))
(defclass r/variable ()
((value :initform nil :accessor $data)))
(defmethod print-object ((rv r/variable) stream)
(with-slots (value) rv
(cond ((integerp value) (format stream "<~A>" ($data rv)))
((floatp value) (cond ((> (abs value) 100) (format stream "<~,1F>" ($data rv)))
((> (abs value) 1) (format stream "<~,2F>" ($data rv)))
((> (abs value) 0.01) (format stream "<~,4F>" ($data rv)))
(T (format stream "<~,4E>" ($data rv)))))
(T (format stream "<~A>" ($data rv))))))
(defmethod $clone ((rv r/variable))
(let ((n (make-instance (class-of rv))))
(with-slots (value) rv
(let ((v value))
(with-slots (value) n
(setf value ($clone v)))))
n))
(defmethod r/continuousp ((rv r/variable)) nil)
(defmethod r/discretep ((rv r/variable)) nil)
(defclass r/continuous (r/variable)
())
(defmethod r/continuousp ((rv r/continuous)) T)
(defmethod r/discretep ((rv r/continuous)) nil)
(defclass r/discrete (r/variable)
())
(defmethod r/continuousp ((rv r/discrete)) nil)
(defmethod r/discretep ((rv r/discrete)) T)
(defun r/variable (value &optional (type :continuous))
(let ((rv (cond ((eq type :continuous) (make-instance 'r/continuous))
((eq type :discrete) (make-instance 'r/discrete)))))
(when rv
(setf ($data rv) value)
rv)))
(defmethod r/variablep ((rv r/variable)) T)
(defmethod r/variablep ((rv T)) nil)
| 1,624
|
Common Lisp
|
.lisp
| 39
| 35.25641
| 90
| 0.590591
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
5fdb84a51fa99695eb575de06f04e499396fcf32fa9d494bd15a53bca188a651
| 3,265
|
[
-1
] |
3,266
|
opt.lisp
|
chunsj_TH/pp/opt.lisp
|
(in-package :th.pp)
(defun objective-function (posterior initial-values)
(lambda (&rest args)
(let ((p (apply posterior
(map 'list (lambda (v v0)
(if (integerp v0)
(round v)
v))
args initial-values))))
(if p
($neg p)
most-positive-single-float))))
(defun map/fit (posterior initial-values)
(multiple-value-bind (vs fv)
(nelder-mead (objective-function posterior initial-values) initial-values
:xatol (* 1E-2 (apply #'min (mapcar #'abs initial-values))))
(values (loop :for k :from 0 :below ($count vs)
:for v = ($ vs k)
:for i :in initial-values
:collect (if (integerp i)
(round v)
v))
fv)))
| 937
|
Common Lisp
|
.lisp
| 23
| 24.73913
| 79
| 0.448465
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
a5024875a83653465c1932e018507b9848ae4fd31c5f573e89aecb8e395a5f3f
| 3,266
|
[
-1
] |
3,267
|
tensors.lisp
|
chunsj_TH/ffi/tensors.lisp
|
(declaim (optimize (speed 3) (debug 1) (safety 0)))
(in-package :th)
(defparameter *th-tensor-functions*
'(("storage" storage storageptr (tensor tensorptr))
("storageOffset" storage-offset ptrdiff-t (tensor tensorptr))
("nDimension" n-dimension :int (tensor tensorptr))
("size" size :long (tensor tensorptr) (dim :int))
("stride" stride :long (tensor tensorptr) (dim :int))
("newSizeOf" new-size-of th-long-storage-ptr (tensor tensorptr))
("newStrideOf" new-stride-of th-long-storage-ptr (tensor tensorptr))
("data" data realptr (tensor tensorptr))
("setFlag" set-flag :void (tensor tensorptr) (flag :char))
("clearFlag" clear-flag :void (tensor tensorptr) (flag :char))
("new" new tensorptr)
("newWithTensor" new-with-tensor tensorptr (tensor tensorptr))
("newWithStorage" new-with-storage tensorptr (storage storageptr) (storage-offset ptrdiff-t)
(size th-long-storage-ptr) (stride (th-long-storage-ptr)))
("newWithStorage1d" new-with-storage-1d tensorptr (storage storageptr) (storage-offset ptrdiff-t)
(size0 :long) (stride0 :long))
("newWithStorage2d" new-with-storage-2d tensorptr (storage storageptr) (storage-offset ptrdiff-t)
(size0 :long) (stride0 :long) (size1 :long) (stride1 :long))
("newWithStorage3d" new-with-storage-3d tensorptr (storage storageptr) (storage-offset ptrdiff-t)
(size0 :long) (stride0 :long) (size1 :long) (stride1 :long) (size2 :long) (stride2 :long))
("newWithStorage4d" new-with-storage-4d tensorptr (storage storageptr) (storage-offset ptrdiff-t)
(size0 :long) (stride0 :long) (size1 :long) (stride1 :long) (size2 :long) (stride2 :long)
(size3 :long) (stride3 :long))
("newWithSize" new-with-size tensorptr (size th-long-storage-ptr) (stride th-long-storage-ptr))
("newWithSize1d" new-with-size-1d tensorptr (size0 :long))
("newWithSize2d" new-with-size-2d tensorptr (size0 :long) (size1 :long))
("newWithSize3d" new-with-size-3d tensorptr (size0 :long) (size1 :long) (size2 :long))
("newWithSize4d" new-with-size-4d tensorptr (size0 :long) (size1 :long) (size2 :long)
(size3 :long))
("newClone" new-clone tensorptr (tensor tensorptr))
("newContiguous" new-contiguous tensorptr (tensor tensorptr))
("newSelect" new-select tensorptr (tensor tensorptr) (dimension :int) (slice-index :long))
("newNarrow" new-narrow tensorptr (tensor tensorptr) (dimension :int) (first-idnex :long)
(size :long))
("newTranspose" new-transpose tensorptr (tensor tensorptr) (dimension1 :int) (dimension2 :int))
("newUnfold" new-unfold tensorptr (tensor tensorptr) (dim :int) (size :long) (step :long))
("newView" new-view tensorptr (tensor tensorptr) (size th-long-storage-ptr))
("newExpand" new-expand tensorptr (tensor tensorptr) (size th-long-storage-ptr))
("expand" expand :void (r tensorptr) (tensor tensorptr) (size th-long-storage-ptr))
("resize" resize :void (tensor tensorptr) (size th-long-storage-ptr)
(stride th-long-storage-ptr))
("resizeAs" resize-as :void (tensor tensorptr) (src tensorptr))
("resizeNd" resize-nd :void (tensor tensorptr) (dim :int) (size (:pointer :long))
(stride (:pointer :long)))
("resize1d" resize-1d :void (tensor tensorptr) (size0 :long))
("resize2d" resize-2d :void (tensor tensorptr) (size0 :long) (size1 :long))
("resize3d" resize-3d :void (tensor tensorptr) (size0 :long) (size1 :long) (size2 :long))
("resize4d" resize-4d :void (tensor tensorptr) (size0 :long) (size1 :long) (size2 :long)
(size3 :long))
("resize5d" resize-5d :void (tensor tensorptr) (size0 :long) (size1 :long) (size2 :long)
(size3 :long) (size4 :long))
("set" set :void (tensor tensorptr) (src tensorptr))
("setStorage" set-storage :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (size th-long-storage-ptr) (stride th-long-storage-ptr))
("setStorageNd" set-storage-nd :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (dim :int) (size (:pointer :long)) (stride (:pointer :long)))
("setStorage1d" set-storage-1d :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (size0 :long) (stride0 :long))
("setStorage2d" set-storage-2d :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (size0 :long) (stride0 :long) (size1 :long) (stride1 :long))
("setStorage3d" set-storage-3d :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (size0 :long) (stride0 :long) (size1 :long) (stride1 :long)
(size2 :long) (stride2 :long))
("setStorage4d" set-storage-4d :void (tensor tensorptr) (storage storageptr)
(storage-offset ptrdiff-t) (size0 :long) (stride0 :long) (size1 :long) (stride1 :long)
(size2 :long) (stride2 :long) (size3 :long) (stride3 :long))
("narrow" narrow :void (tensor tensorptr) (src tensorptr) (dim :int) (first-index :long)
(size :long))
("select" select :void (tensor tensorptr) (src tensorptr) (dim :int) (slice-index :long))
("transpose" transpose :void (tensor tensorptr) (src tensorptr) (dim1 :int) (dim2 :int))
("unfold" unfold :void (tensor tensorptr) (src tensorptr) (dim :int) (size :long) (step :long))
("squeeze" squeeze :void (tensor tensorptr) (src tensorptr))
("squeeze1d" squeeze-1d :void (tensor tensorptr) (src tensorptr) (dimension :int))
("unsqueeze1d" unsqueeze-1d :void (tensor tensorptr) (src tensorptr) (dimension :int))
("isContiguous" is-contiguous :int (tensor tensorptr))
("isSameSizeAs" is-same-size-as :int (tensor tensorptr) (src tensorptr))
("isSetTo" is-set-to :int (tensor tensorptr) (src tensorptr))
("isSize" is-size :int (tensor tensorptr) (dims th-long-storage-ptr))
("nElement" n-element ptrdiff-t (tensor tensorptr))
("retain" retain :void (tensor tensorptr))
("free" free :void (tensor tensorptr))
("freeCopyTo" free-copy-to :void (tensor tensorptr) (dst tensorptr))
("set1d" set-1d :void (tensor tensorptr) (index0 :long) (value real))
("set2d" set-2d :void (tensor tensorptr) (index0 :long) (index1 :long) (value real))
("set3d" set-3d :void (tensor tensorptr) (index0 :long) (index1 :long) (index2 :long)
(value real))
("set4d" set-4d :void (tensor tensorptr) (index0 :long) (index1 :long) (index2 :long)
(index3 :long) (value real))
("get1d" get-1d real (tensor tensorptr) (index0 :long))
("get2d" get-2d real (tensor tensorptr) (index0 :long) (index1 :long))
("get3d" get-3d real (tensor tensorptr) (index0 :long) (index1 :long) (index2 :long))
("get4d" get-4d real (tensor tensorptr) (index0 :long) (index1 :long) (index2 :long)
(index3 :long))
("copy" copy :void (tensor tensorptr) (src tensorptr))
("copyByte" copy-byte :void (tensor tensorptr) (src th-byte-tensor-ptr))
("copyChar" copy-char :void (tensor tensorptr) (src th-char-tensor-ptr))
("copyShort" copy-short :void (tensor tensorptr) (src th-short-tensor-ptr))
("copyInt" copy-int :void (tensor tensorptr) (src th-int-tensor-ptr))
("copyLong" copy-long :void (tensor tensorptr) (src th-long-tensor-ptr))
("copyFloat" copy-float :void (tensor tensorptr) (src th-float-tensor-ptr))
("copyDouble" copy-double :void (tensor tensorptr) (src th-double-tensor-ptr))
("random" random :void (tensor tensorptr) (generator th-generator-ptr))
("geometric" geometric :void (tensor tensorptr) (generator th-generator-ptr) (p :double))
("hypergeometric" hypergeometric :void (tensor tensorptr) (generator th-generator-ptr)
(nr :int) (nb :int) (k :int))
("poisson" poisson :void (tensor tensorptr) (generator th-generator-ptr) (mu :double))
("bernoulli" bernoulli :void (tensor tensorptr) (generator th-generator-ptr) (p :double))
("binomial" binomial :void (tensor tensorptr) (generator th-generator-ptr) (n :int)
(p :double))
("bernoulli_FloatTensor" bernoulli-float-tensor :void (tensor tensorptr)
(generator th-generator-ptr) (p th-float-tensor-ptr))
("bernoulli_DoubleTensor" bernoulli-double-tensor :void (tensor tensorptr)
(generator th-generator-ptr) (p th-double-tensor-ptr))
("fill" fill :void (tensor tensorptr) (value real))
("zero" zero :void (tensor tensorptr))
("maskedFill" masked-fill :void (tensor tensorptr) (mask th-byte-tensor-ptr) (value real))
("maskedCopy" masked-copy :void (tensor tensorptr) (mask th-byte-tensor-ptr) (src tensorptr))
("maskedSelect" masked-select :void (tensor tensorptr) (src tensorptr)
(mask th-byte-tensor-ptr))
("nonzero" nonzero :void (subscript th-long-tensor-ptr) (tensor tensorptr))
("indexSelect" index-select :void (tensor tensorptr) (src tensorptr) (dim :int)
(index th-long-tensor-ptr))
("indexCopy" index-copy :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(src tensorptr))
("indexAdd" index-add :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(src tensorptr))
("indexFill" index-fill :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(value real))
("gather" gather :void (tensor tensorptr) (src tensorptr) (dim :int)
(index th-long-tensor-ptr))
("scatter" scatter :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(src tensorptr))
("scatterAdd" scatter-add :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(src tensorptr))
("scatterFill" scatter-fill :void (tensor tensorptr) (dim :int) (index th-long-tensor-ptr)
(value real))
("dot" dot acreal (tensor tensorptr) (src tensorptr))
("minall" min-all real (tensor tensorptr))
("maxall" max-all real (tensor tensorptr))
("medianall" median-all real (tensor tensorptr))
("sumall" sum-all acreal (tensor tensorptr))
("prodall" prod-all acreal (tensor tensorptr))
("add" add :void (result tensorptr) (tensor tensorptr) (value real))
("sub" sub :void (result tensorptr) (tensor tensorptr) (value real))
("mul" mul :void (result tensorptr) (tensor tensorptr) (value real))
("div" div :void (result tensorptr) (tensor tensorptr) (value real))
("lshift" lshift :void (result tensorptr) (tensor tensorptr) (value real))
("rshift" rshift :void (result tensorptr) (tensor tensorptr) (value real))
("fmod" fmod :void (result tensorptr) (tensor tensorptr) (value real))
("remainder" remainder :void (result tensorptr) (tensor tensorptr) (value real))
("clamp" clamp :void (result tensorptr) (tensor tensorptr) (minv real) (maxv real))
("bitand" bitand :void (result tensorptr) (tensor tensorptr) (value real))
("bitor" bitor :void (result tensorptr) (tensor tensorptr) (value real))
("bitxor" bitxor :void (result tensorptr) (tensor tensorptr) (value real))
("cadd" cadd :void (result tensorptr) (tensor tensorptr) (value real) (src tensorptr))
("csub" csub :void (result tensorptr) (tensor tensorptr) (value real) (src tensorptr))
("cmul" cmul :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cpow" cpow :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cdiv" cdiv :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("clshift" clshift :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("crshift" crshift :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cfmod" cfmod :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cremainder" cremainder :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cbitand" cbitand :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cbitor" cbitor :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cbitxor" cbitxor :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("addcmul" add-cmul :void (result tensorptr) (tensor tensorptr) (value real)
(src1 tensorptr) (src2 tensorptr))
("addcdiv" add-cdiv :void (result tensorptr) (tensor tensorptr) (value real)
(src1 tensorptr) (src2 tensorptr))
("addmv" add-mv :void (result tensorptr) (beta real) (tensor tensorptr) (alpha real)
(maxtrix tensorptr) (vector tensorptr))
("addmm" add-mm :void (result tensorptr) (beta real) (tensor tensorptr) (alpha real)
(maxtrix1 tensorptr) (matrix2 tensorptr))
("addr" add-r :void (result tensorptr) (beta real) (tensor tensorptr) (alpha real)
(vector1 tensorptr) (vector2 tensorptr))
("addbmm" add-bmm :void (result tensorptr) (beta real) (tensor tensorptr) (alpha real)
(batch1 tensorptr) (batch2 tensorptr))
("baddbmm" badd-bmm :void (result tensorptr) (beta real) (tensor tensorptr) (alpha real)
(batch1 tensorptr) (batch2 tensorptr))
("match" match :void (result tensorptr) (m1 tensorptr) (m2 tensorptr) (gain real))
("numel" numel ptrdiff-t (tensor tensorptr))
("max" max :void (values tensorptr) (indices th-long-tensor-ptr) (tensor tensorptr)
(dim :int) (keep-dim :int))
("min" min :void (values tensorptr) (indices th-long-tensor-ptr) (tensor tensorptr)
(dim :int) (keep-dim :int))
("kthvalue" kth-value :void (values tensorptr) (indices th-long-tensor-ptr)
(tensor tensorptr) (k :long) (dim :int) (keep-dim :int))
("mode" mode :void (values tensorptr) (indices th-long-tensor-ptr) (tensor tensorptr)
(dim :int) (keep-dim :int))
("median" median :void (values tensorptr) (indices th-long-tensor-ptr) (tensor tensorptr)
(dim :int) (keep-dim :int))
("sum" sum :void (values tensorptr) (tensor tensorptr) (dim :int) (keep-dim :int))
("prod" prod :void (values tensorptr) (tensor tensorptr) (dim :int) (keep-dim :int))
("cumsum" cum-sum :void (result tensorptr) (tensor tensorptr) (dim :int))
("cumprod" cum-prod :void (result tensorptr) (tensor tensorptr) (dim :int))
("sign" sign :void (result tensorptr) (tensor tensorptr))
("trace" trace acreal (tensor tensorptr))
("cross" cross :void (result tensorptr) (a tensorptr) (b tensorptr) (dim :int))
("cmax" cmax :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cmin" cmin :void (result tensorptr) (tensor tensorptr) (src tensorptr))
("cmaxValue" cmax-value :void (result tensorptr) (tensor tensorptr) (value real))
("cminValue" cmin-value :void (result tensorptr) (tensor tensorptr) (value real))
("zeros" zeros :void (result tensorptr) (size th-long-storage-ptr))
("ones" ones :void (result tensorptr) (size th-long-storage-ptr))
("diag" diag :void (result tensorptr) (tensor tensorptr) (k :int))
("eye" eye :void (result tensorptr) (n :long) (m :long))
("arange" arange :void (result tensorptr) (xmin acreal) (xmax acreal) (step acreal))
("range" range :void (result tensorptr) (xmin acreal) (xmax acreal) (step acreal))
("randperm" rand-perm :void (result tensorptr) (generator th-generator-ptr) (n :long))
("reshape" reshape :void (result tensorptr) (tensor tensorptr) (size th-long-storage-ptr))
("sort" sort :void (rtensor tensorptr) (itensor th-long-tensor-ptr) (tensor tensorptr)
(dim :int) (discending-order :int))
("topk" topk :void (rtensor tensorptr) (itensor th-long-tensor-ptr) (tensor tensorptr)
(k :long) (dim :int) (dir :int) (sorted :int))
("tril" tril :void (result tensorptr) (tensor tensorptr) (k :long))
("triu" triu :void (result tensorptr) (tensor tensorptr) (k :long))
("cat" cat :void (result tensorptr) (a tensorptr) (b tensorptr) (dim :int))
("catArray" cat-array :void (reuslt tensorptr) (inputs (:pointer tensorptr))
(num-inputs :int) (dimension :int))
("equal" equal :int (a tensorptr) (b tensorptr))
("ltValue" lt-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("leValue" le-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("gtValue" gt-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("geValue" ge-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("neValue" ne-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("eqValue" eq-value :void (result th-byte-tensor-ptr) (tensor tensorptr) (value real))
("ltValueT" lt-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("leValueT" le-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("gtValueT" gt-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("geValueT" ge-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("neValueT" ne-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("eqValueT" eq-value-t :void (result tensorptr) (tensor tensorptr) (value real))
("ltTensor" lt-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("leTensor" le-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("gtTensor" gt-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("gtTensor" ge-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("neTensor" ne-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("eqTensor" eq-tensor :void (result th-byte-tensor-ptr) (a tensorptr) (b tensorptr))
("ltTensorT" lt-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("leTensorT" le-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("gtTensorT" gt-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("gtTensorT" ge-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("neTensorT" ne-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("eqTensorT" eq-tensor-t :void (result tensorptr) (a tensorptr) (b tensorptr))
("validXCorr2Dptr" valid-x-corr-2d-ptr :void (res realptr) (alpha real)
(ten realptr) (ir :long) (ic :long) (k realptr) (kr :long) (kc :long)
(sr :long) (sc :long))
("validConv2Dptr" valid-conv-2d-ptr :void (res realptr) (alpha real)
(ten realptr) (ir :long) (ic :long) (k realptr) (kr :long) (kc :long)
(sr :long) (sc :long))
("fullXCorr2Dptr" full-x-corr-2d-ptr :void (res realptr) (alpha real)
(ten realptr) (ir :long) (ic :long) (k realptr) (kr :long) (kc :long)
(sr :long) (sc :long))
("fullConv2Dptr" full-conv-2d-ptr :void (res realptr) (alpha real)
(ten realptr) (ir :long) (ic :long) (k realptr) (kr :long) (kc :long)
(sr :long) (sc :long))
("validXCorr2DRevptr" valid-x-corr-2d-rev-ptr :void (res realptr) (alpha real)
(ten realptr) (ir :long) (ic :long) (k realptr) (kr :long) (kc :long)
(sr :long) (sc :long))
("conv2DRevger" conv-2d-rev-ger :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long))
("conv2DRevgerm" conv-2d-rev-germ :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long))
("conv2Dger" conv-2d-ger :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv2Dmv" conv-2d-mv :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv2Dmm" conv-2d-mm :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv2Dmul" conv-2d-mul :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv2Dcmul" conv-2d-cmul :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (srow :long) (scol :long)
(vf :string) (xc :string))
("validXCorr3Dptr" valid-x-corr-3d-ptr :void (res realptr) (alpha real) (ten realptr)
(it :long) (ir :long) (ic :long) (k realptr)
(kt :long) (kr :long) (kc :long) (st :long) (sr :long) (sc :long))
("validConv3Dptr" valid-conv-3d-ptr :void (res realptr) (alpha real) (ten realptr)
(it :long) (ir :long) (ic :long) (k realptr)
(kt :long) (kr :long) (kc :long) (st :long) (sr :long) (sc :long))
("fullXCorr3Dptr" full-x-corr-3d-ptr :void (res realptr) (alpha real) (ten realptr)
(it :long) (ir :long) (ic :long) (k realptr)
(kt :long) (kr :long) (kc :long) (st :long) (sr :long) (sc :long))
("fullConv3Dptr" full-conv-3d-ptr :void (res realptr) (alpha real) (ten realptr)
(it :long) (ir :long) (ic :long) (k realptr)
(kt :long) (kr :long) (kc :long) (st :long) (sr :long) (sc :long))
("validXCorr3DRevptr" valid-x-corr-3d-rev-ptr :void (res realptr) (alpha real) (ten realptr)
(it :long) (ir :long) (ic :long) (k realptr)
(kt :long) (kr :long) (kc :long) (st :long) (sr :long) (sc :long))
("conv3DRevger" conv-3d-rev-ger :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (sdepth :long) (srow :long) (scol :long))
("conv3Dger" conv-3d-ger :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (sdepth :long) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv3Dmv" conv-3d-mv :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (sdepth :long) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv3Dmul" conv-3d-mul :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (sdepth :long) (srow :long) (scol :long)
(vf :string) (xc :string))
("conv3Dcmul" conv-3d-cmul :void (result tensorptr) (beta real) (alpha real)
(tensor tensorptr) (k tensorptr) (sdepth :long) (srow :long) (scol :long)
(vf :string) (xc :string))))
(loop :for td :in *th-type-infos*
:for prefix = (caddr td)
:for real = (cadr td)
:for acreal = (cadddr td)
:do (loop :for fl :in *th-tensor-functions*
:for df = (make-defcfun-tensor fl prefix real acreal)
:do (eval df)))
;; #if defined(TH_REAL_IS_BYTE)
;; void THTensor_(getRNGState)(THGenerator *_generator, THTensor *self);
(cffi:defcfun ("THByteTensor_getRNGState" th-byte-tensor-get-rng-state) :void
(generator th-generator-ptr)
(tensor th-byte-tensor-ptr))
;; void THTensor_(setRNGState)(THGenerator *_generator, THTensor *self);
(cffi:defcfun ("THByteTensor_setRNGState" th-byte-tensor-set-rng-state) :void
(generator th-generator-ptr)
(tensor th-byte-tensor-ptr))
;; #endif /* TH_REAL_IS_BYTE */
;; #if defined(TH_REAL_IS_BYTE)
;; int THTensor_(logicalall)(THTensor *self);
(cffi:defcfun ("THByteTensor_logicalall" th-byte-tensor-logical-all) :int
(tensor th-byte-tensor-ptr))
;; int THTensor_(logicalany)(THTensor *self);
(cffi:defcfun ("THByteTensor_logicalany" th-byte-tensor-logical-any) :int
(tensor th-byte-tensor-ptr))
;; #endif /* TH_REAL_IS_BYTE */
(defparameter *th-float-tensor-functions*
'(("neg" neg :void (result tensorptr) (tensor tensorptr))
("cinv" cinv :void (result tensorptr) (tensor tensorptr))
("uniform" uniform :void (tensor tensorptr) (generator th-generator-ptr)
(a :double) (b :double))
("normal" normal :void (tensor tensorptr) (generator th-generator-ptr)
(mean :double) (stdv :double))
("exponential" exponential :void (tensor tensorptr) (generator th-generator-ptr)
(lam :double))
("cauchy" cauchy :void (tensor tensorptr) (generator th-generator-ptr)
(median :double) (sigma :double))
("logNormal" log-normal :void (tensor tensorptr) (generator th-generator-ptr)
(mean :double) (stdv :double))
("rbeta" rbeta :void (tensor tensorptr) (generator th-generator-ptr)
(a :double) (b :double))
("rgamma" rgamma :void (tensor tensorptr) (generator th-generator-ptr)
(shape :double) (scale :double))
("multinomial" multinomial :void (tensor tensorptr) (generator th-generator-ptr)
(prob-dist tensorptr) (n-sample :int) (replacement :int))
("multinomialAliasSetup" multinomial-alias-setup :void
(prob-dist tensorptr) (j th-long-tensor-ptr) (q tensorptr))
("multinomialAliasDraw" multinomial-alias-draw :void (tensor tensorptr)
(generator th-generator-ptr) (j th-long-tensor-ptr) (q tensorptr))
("sigmoid" sigmoid :void (result tensorptr) (tensor tensorptr))
("log" log :void (result tensorptr) (tensor tensorptr))
("gamma" gamma :void (result tensorptr) (tensor tensorptr))
("lgamma" lgamma :void (result tensorptr) (tensor tensorptr))
("erf" erf :void (result tensorptr) (tensor tensorptr))
("erfc" erfc :void (result tensorptr) (tensor tensorptr))
("polygamma" polygamma :void (n :int) (input tensorptr) (output tensorptr))
("lbeta" lbeta :void (a tensorptr) (b tensorptr) (output tensorptr))
("log1p" log1p :void (result tensorptr) (tensor tensorptr))
("exp" exp :void (result tensorptr) (tensor tensorptr))
("cos" cos :void (result tensorptr) (tensor tensorptr))
("acos" acos :void (result tensorptr) (tensor tensorptr))
("cosh" cosh :void (result tensorptr) (tensor tensorptr))
("sin" sin :void (result tensorptr) (tensor tensorptr))
("asin" asin :void (result tensorptr) (tensor tensorptr))
("sinh" sinh :void (result tensorptr) (tensor tensorptr))
("tan" tan :void (result tensorptr) (tensor tensorptr))
("atan" atan :void (result tensorptr) (tensor tensorptr))
("atan2" atan2 :void (result tensorptr) (tensorx tensorptr) (tensory tensorptr))
("tanh" tanh :void (result tensorptr) (tensor tensorptr))
("pow" pow :void (result tensorptr) (tensor tensorptr) (value real))
("tpow" tpow :void (result tensorptr) (value real) (tensor tensorptr))
("sqrt" sqrt :void (result tensorptr) (tensor tensorptr))
("rsqrt" rsqrt :void (result tensorptr) (tensor tensorptr))
("ceil" ceil :void (result tensorptr) (tensor tensorptr))
("floor" floor :void (result tensorptr) (tensor tensorptr))
("round" round :void (result tensorptr) (tensor tensorptr))
("abs" abs :void (result tensorptr) (tensor tensorptr))
("trunc" trunc :void (result tensorptr) (tensor tensorptr))
("frac" frac :void (result tensorptr) (tensor tensorptr))
("lerp" lerp :void (result tensorptr) (a tensorptr) (b tensorptr) (weight real))
("mean" mean :void (result tensorptr) (tensor tensorptr) (dim :int) (keep-dim :int))
("std" std :void (result tensorptr) (tensor tensorptr) (dim :int) (biased :int) (keep-dim :int))
("var" var :void (result tensorptr) (tensor tensorptr) (dim :int) (biased :int) (keep-dim :int))
("varall" varall :double (tensor tensorptr) (biased :int))
("norm" norm :void (res tensorptr) (tensor tensorptr) (value real) (dim :int) (keep-dim :int))
("renorm" renorm :void (res tensorptr) (tensor tensorptr) (value real) (dim :int) (maxnorm real))
("dist" dist acreal (a tensorptr) (b tensorptr) (value real))
("histc" histc :void (hist tensorptr) (tensor tensorptr) (nbins :long)
(min-value real) (max-value real))
("bhistc" bhistc :void (hist tensorptr) (tensor tensorptr) (nbins :long)
(min-value real) (max-value real))
("meanall" mean-all acreal (tensor tensorptr))
("varall" val-all acreal (tensor tensorptr) (biased :int))
("stdall" std-all acreal (tensor tensorptr) (biased :int))
("normall" norm-all acreal (tensor tensorptr) (value real))
("linspace" linspace :void (result tensorptr) (a real) (b real) (n :long))
("logspace" logspace :void (result tensorptr) (a real) (b real) (n :long))
("rand" rand :void (result tensorptr) (generator th-generator-ptr) (size th-long-storage-ptr))
("randn" randn :void (result tensorptr) (generator th-generator-ptr) (size th-long-storage-ptr))
("trtrs" trtrs :void (rb tensorptr) (ra tensorptr) (b tensorptr) (a tensorptr) (uplo :string) (trans :string) (diag :string))
("gesv" gesv :void (rb tensorptr) (ra tensorptr) (b tensorptr) (a tensorptr))
("gels" gels :void (rb tensorptr) (ra tensorptr) (b tensorptr) (a tensorptr))
("syev" syev :void (re tensorptr) (rv tensorptr) (a tensorptr) (jobz :string) (uplo :string))
("geev" geev :void (re tensorptr) (rv tensorptr) (a tensorptr) (jobvr :string))
("gesvd" gesvd :void (ru tensorptr) (rs tensorptr) (rv tensorptr) (a tensorptr) (jobu :string))
("gesvd2" gesvd2 :void (ru tensorptr) (rs tensorptr) (rv tensorptr) (ra tensorptr)
(a tensorptr) (jobu :string))
("getri" getri :void (ra tensorptr) (a tensorptr))
("potrf" potrf :void (ra tensorptr) (a tensorptr) (uplo :string))
("potrs" potrs :void (rb tensorptr) (b tensorptr) (a tensorptr) (uplo :string))
("potri" potri :void (ra tensorptr) (a tensorptr) (uplo :string))
("qr" qr :void (rq tensorptr) (rr tensorptr) (a tensorptr))
("geqrf" geqrf :void (ra tensorptr) (rtau tensorptr) (a tensorptr))
("orgqr" orgqr :void (ra tensorptr) (a tensorptr) (tau tensorptr))
("ormqr" ormqr :void (ra tensorptr) (a tensorptr) (tau tensorptr) (c tensorptr)
(side :string) (trans :string))
("pstrf" pstrf :void (ra tensorptr) (rpiv th-int-tensor-ptr) (a tensorptr) (uplo :string)
(tol real))
("btrifact" btrifact :void (ra tensorptr) (rpivots th-int-tensor-ptr)
(rinfo th-int-tensor-ptr) (pivot :int) (a tensorptr))
("btrisolve" btrisolve :void (rb tensorptr) (b tensorptr) (atf tensorptr)
(pivots th-int-tensor-ptr))))
(loop :for td :in (last *th-type-infos* 2)
:for prefix = (caddr td)
:for real = (cadr td)
:for acreal = (cadddr td)
:do (loop :for fl :in *th-float-tensor-functions*
:for df = (make-defcfun-tensor fl prefix real acreal)
:do (eval df)))
(defparameter *th-blas-functions*
'(("swap" swap :void (n :long) (x realptr) (incx :long) (y realptr) (incy :long))
("scal" scal :void (n :long) (a real) (x realptr) (incx :long))
("copy" copy :void (n :long) (x realptr) (incx :long) (y realptr) (incy :long))
("axpy" axpy :void (n :long) (a real) (x realptr) (incx :long) (y realptr) (incy :long))
("dot" dot :void (n :long) (x realptr) (incx :long) (y realptr) (incy :long))
("gemv" gemv :void (trans :char) (m :long) (n :long) (alpha real) (a realptr) (lda :long)
(x realptr) (incx :long) (beta real) (y realptr) (incy :long))
("ger" ger :void (m :long) (n :long) (alpha real) (x realptr) (incx :long) (y realptr)
(incy :long) (a realptr) (lda :long))
("gemm" gemm :void (transa :char) (transb :char) (m :long) (n :long) (k :long)
(alpha real) (a realptr) (lda :long) (b realptr) (ldb :long) (beta real) (c realptr)
(ldc :long))))
(loop :for td :in *th-type-infos*
:for prefix = (caddr td)
:for real = (cadr td)
:for acreal = (cadddr td)
:do (loop :for fl :in *th-blas-functions*
:for df = (make-defcfun-blas fl prefix real acreal)
:do (eval df)))
(defparameter *th-lapack-functions*
'(("gesv" gesv :void (n :int) (nrhs :int) (a realptr) (lda :int) (ipiv (:pointer :int))
(b realptr) (ldb :int) (info (:pointer :int)))
("trtrs" trtrs :void (uplo :char) (trans :char) (diag :char) (n :int) (nrhs :int)
(a realptr) (lda :int) (b realptr) (ldb :int) (info (:pointer :int)))
("gels" gels :void (trans :char) (m :int) (n :int) (nrhs :int) (a realptr) (lda :int)
(b realptr) (ldb :int) (work realptr) (lwork :int) (info (:pointer :int)))
("syev" syev :void (jobz :char) (uplo :char) (n :int) (a realptr) (lda :int) (w realptr)
(work realptr) (lword :int) (info (:pointer :int)))
("geev" geev :void (jobvl :char) (jobvr :char) (n :int) (a realptr) (lda :int) (wr realptr)
(wi realptr) (vl realptr) (ldvl :int) (vr realptr) (ldvr :int) (work realptr) (lwork :int)
(info (:pointer :int)))
("gesvd" gesvd :void (jobu :char) (jobvt :char) (m :int) (n :int) (a realptr) (lda :int)
(s realptr) (u realptr) (ldu :int) (vt realptr) (ldvt :int) (work realptr) (lwork :int)
(info (:pointer :int)))
("getrf" getrf :void (m :int) (n :int) (a realptr) (lda :int) (ipiv (:pointer :int))
(info (:pointer :int)))
("getrs" getrs :void (trans :char) (n :int) (nrhs :int) (a realptr) (lda :int)
(ipiv (:pointer :int)) (b realptr) (ldb :int) (info (:pointer :int)))
("getri" getri :void (n :int) (a realptr) (lda :int) (ipiv (:pointer :int))
(work realptr) (lwork :int) (info (:pointer :int)))
("potrf" potrf :void (uplo :char) (n :int) (a realptr) (lda :int) (info (:pointer :int)))
("potri" potri :void (uplo :char) (n :int) (a realptr) (lda :int) (info (:pointer :int)))
("potrs" potrs :void (uplo :char) (n :int) (nrhs :int) (a realptr) (lda :int) (b realptr)
(ldb :int) (info (:pointer :int)))
("pstrf" pstrf :void (uplo :char) (n :int) (a realptr) (lda :int) (piv (:pointer :int))
(rank (:pointer :int)) (tol real) (work realptr) (info (:pointer :int)))
("geqrf" geqrf :void (m :int) (n :int) (a realptr) (lda :int) (tau realptr) (work realptr)
(lwork :int) (info (:pointer :int)))
("orgqr" orgqr :void (m :int) (n :int) (k :int) (a realptr) (lda :int) (tau realptr)
(work realptr) (lwork :int) (info (:pointer :int)))
("ormqr" ormqr :void (side :char) (trans :char) (m :int) (n :int) (k :int) (a realptr)
(lda :int) (tau realptr) (c realptr) (ldc :int) (work realptr) (lwork :int)
(info (:pointer :int)))))
(loop :for td :in *th-type-infos*
:for prefix = (caddr td)
:for real = (cadr td)
:for acreal = (cadddr td)
:do (loop :for fl :in *th-lapack-functions*
:for df = (make-defcfun-lapack fl prefix real acreal)
:do (eval df)))
| 33,707
|
Common Lisp
|
.lisp
| 500
| 62.088
| 129
| 0.667691
|
chunsj/TH
| 59
| 8
| 0
|
GPL-3.0
|
9/19/2024, 11:26:04 AM (Europe/Amsterdam)
|
de736ab7b7f4496074059affb3785c1fe53079addf3e82618478e2ffcd82da5a
| 3,267
|
[
-1
] |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.